X86ISelLowering.cpp revision 360784
1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that X86 uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "X86ISelLowering.h"
15#include "Utils/X86ShuffleDecode.h"
16#include "X86CallingConv.h"
17#include "X86FrameLowering.h"
18#include "X86InstrBuilder.h"
19#include "X86IntrinsicsInfo.h"
20#include "X86MachineFunctionInfo.h"
21#include "X86TargetMachine.h"
22#include "X86TargetObjectFile.h"
23#include "llvm/ADT/SmallBitVector.h"
24#include "llvm/ADT/SmallSet.h"
25#include "llvm/ADT/Statistic.h"
26#include "llvm/ADT/StringExtras.h"
27#include "llvm/ADT/StringSwitch.h"
28#include "llvm/Analysis/BlockFrequencyInfo.h"
29#include "llvm/Analysis/EHPersonalities.h"
30#include "llvm/Analysis/ProfileSummaryInfo.h"
31#include "llvm/CodeGen/IntrinsicLowering.h"
32#include "llvm/CodeGen/MachineFrameInfo.h"
33#include "llvm/CodeGen/MachineFunction.h"
34#include "llvm/CodeGen/MachineInstrBuilder.h"
35#include "llvm/CodeGen/MachineJumpTableInfo.h"
36#include "llvm/CodeGen/MachineModuleInfo.h"
37#include "llvm/CodeGen/MachineRegisterInfo.h"
38#include "llvm/CodeGen/TargetLowering.h"
39#include "llvm/CodeGen/WinEHFuncInfo.h"
40#include "llvm/IR/CallSite.h"
41#include "llvm/IR/CallingConv.h"
42#include "llvm/IR/Constants.h"
43#include "llvm/IR/DerivedTypes.h"
44#include "llvm/IR/DiagnosticInfo.h"
45#include "llvm/IR/Function.h"
46#include "llvm/IR/GlobalAlias.h"
47#include "llvm/IR/GlobalVariable.h"
48#include "llvm/IR/Instructions.h"
49#include "llvm/IR/Intrinsics.h"
50#include "llvm/MC/MCAsmInfo.h"
51#include "llvm/MC/MCContext.h"
52#include "llvm/MC/MCExpr.h"
53#include "llvm/MC/MCSymbol.h"
54#include "llvm/Support/CommandLine.h"
55#include "llvm/Support/Debug.h"
56#include "llvm/Support/ErrorHandling.h"
57#include "llvm/Support/KnownBits.h"
58#include "llvm/Support/MathExtras.h"
59#include "llvm/Target/TargetOptions.h"
60#include <algorithm>
61#include <bitset>
62#include <cctype>
63#include <numeric>
64using namespace llvm;
65
66#define DEBUG_TYPE "x86-isel"
67
68STATISTIC(NumTailCalls, "Number of tail calls");
69
70static cl::opt<int> ExperimentalPrefLoopAlignment(
71    "x86-experimental-pref-loop-alignment", cl::init(4),
72    cl::desc(
73        "Sets the preferable loop alignment for experiments (as log2 bytes)"
74        "(the last x86-experimental-pref-loop-alignment bits"
75        " of the loop header PC will be 0)."),
76    cl::Hidden);
77
78// Added in 10.0.
79static cl::opt<bool> EnableOldKNLABI(
80    "x86-enable-old-knl-abi", cl::init(false),
81    cl::desc("Enables passing v32i16 and v64i8 in 2 YMM registers instead of "
82             "one ZMM register on AVX512F, but not AVX512BW targets."),
83    cl::Hidden);
84
85static cl::opt<bool> MulConstantOptimization(
86    "mul-constant-optimization", cl::init(true),
87    cl::desc("Replace 'mul x, Const' with more effective instructions like "
88             "SHIFT, LEA, etc."),
89    cl::Hidden);
90
91static cl::opt<bool> ExperimentalUnorderedISEL(
92    "x86-experimental-unordered-atomic-isel", cl::init(false),
93    cl::desc("Use LoadSDNode and StoreSDNode instead of "
94             "AtomicSDNode for unordered atomic loads and "
95             "stores respectively."),
96    cl::Hidden);
97
98/// Call this when the user attempts to do something unsupported, like
99/// returning a double without SSE2 enabled on x86_64. This is not fatal, unlike
100/// report_fatal_error, so calling code should attempt to recover without
101/// crashing.
102static void errorUnsupported(SelectionDAG &DAG, const SDLoc &dl,
103                             const char *Msg) {
104  MachineFunction &MF = DAG.getMachineFunction();
105  DAG.getContext()->diagnose(
106      DiagnosticInfoUnsupported(MF.getFunction(), Msg, dl.getDebugLoc()));
107}
108
109X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
110                                     const X86Subtarget &STI)
111    : TargetLowering(TM), Subtarget(STI) {
112  bool UseX87 = !Subtarget.useSoftFloat() && Subtarget.hasX87();
113  X86ScalarSSEf64 = Subtarget.hasSSE2();
114  X86ScalarSSEf32 = Subtarget.hasSSE1();
115  MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
116
117  // Set up the TargetLowering object.
118
119  // X86 is weird. It always uses i8 for shift amounts and setcc results.
120  setBooleanContents(ZeroOrOneBooleanContent);
121  // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
122  setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
123
124  // For 64-bit, since we have so many registers, use the ILP scheduler.
125  // For 32-bit, use the register pressure specific scheduling.
126  // For Atom, always use ILP scheduling.
127  if (Subtarget.isAtom())
128    setSchedulingPreference(Sched::ILP);
129  else if (Subtarget.is64Bit())
130    setSchedulingPreference(Sched::ILP);
131  else
132    setSchedulingPreference(Sched::RegPressure);
133  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
134  setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
135
136  // Bypass expensive divides and use cheaper ones.
137  if (TM.getOptLevel() >= CodeGenOpt::Default) {
138    if (Subtarget.hasSlowDivide32())
139      addBypassSlowDiv(32, 8);
140    if (Subtarget.hasSlowDivide64() && Subtarget.is64Bit())
141      addBypassSlowDiv(64, 32);
142  }
143
144  if (Subtarget.isTargetWindowsMSVC() ||
145      Subtarget.isTargetWindowsItanium()) {
146    // Setup Windows compiler runtime calls.
147    setLibcallName(RTLIB::SDIV_I64, "_alldiv");
148    setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
149    setLibcallName(RTLIB::SREM_I64, "_allrem");
150    setLibcallName(RTLIB::UREM_I64, "_aullrem");
151    setLibcallName(RTLIB::MUL_I64, "_allmul");
152    setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
153    setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
154    setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
155    setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
156    setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
157  }
158
159  if (Subtarget.getTargetTriple().isOSMSVCRT()) {
160    // MSVCRT doesn't have powi; fall back to pow
161    setLibcallName(RTLIB::POWI_F32, nullptr);
162    setLibcallName(RTLIB::POWI_F64, nullptr);
163  }
164
165  // If we don't have cmpxchg8b(meaing this is a 386/486), limit atomic size to
166  // 32 bits so the AtomicExpandPass will expand it so we don't need cmpxchg8b.
167  // FIXME: Should we be limitting the atomic size on other configs? Default is
168  // 1024.
169  if (!Subtarget.hasCmpxchg8b())
170    setMaxAtomicSizeInBitsSupported(32);
171
172  // Set up the register classes.
173  addRegisterClass(MVT::i8, &X86::GR8RegClass);
174  addRegisterClass(MVT::i16, &X86::GR16RegClass);
175  addRegisterClass(MVT::i32, &X86::GR32RegClass);
176  if (Subtarget.is64Bit())
177    addRegisterClass(MVT::i64, &X86::GR64RegClass);
178
179  for (MVT VT : MVT::integer_valuetypes())
180    setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
181
182  // We don't accept any truncstore of integer registers.
183  setTruncStoreAction(MVT::i64, MVT::i32, Expand);
184  setTruncStoreAction(MVT::i64, MVT::i16, Expand);
185  setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
186  setTruncStoreAction(MVT::i32, MVT::i16, Expand);
187  setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
188  setTruncStoreAction(MVT::i16, MVT::i8,  Expand);
189
190  setTruncStoreAction(MVT::f64, MVT::f32, Expand);
191
192  // SETOEQ and SETUNE require checking two conditions.
193  setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
194  setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
195  setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
196  setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
197  setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
198  setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
199
200  // Integer absolute.
201  if (Subtarget.hasCMov()) {
202    setOperationAction(ISD::ABS            , MVT::i16  , Custom);
203    setOperationAction(ISD::ABS            , MVT::i32  , Custom);
204  }
205  setOperationAction(ISD::ABS              , MVT::i64  , Custom);
206
207  // Funnel shifts.
208  for (auto ShiftOp : {ISD::FSHL, ISD::FSHR}) {
209    setOperationAction(ShiftOp             , MVT::i16  , Custom);
210    setOperationAction(ShiftOp             , MVT::i32  , Custom);
211    if (Subtarget.is64Bit())
212      setOperationAction(ShiftOp           , MVT::i64  , Custom);
213  }
214
215  if (!Subtarget.useSoftFloat()) {
216    // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
217    // operation.
218    setOperationAction(ISD::UINT_TO_FP,        MVT::i8, Promote);
219    setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i8, Promote);
220    setOperationAction(ISD::UINT_TO_FP,        MVT::i16, Promote);
221    setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i16, Promote);
222    // We have an algorithm for SSE2, and we turn this into a 64-bit
223    // FILD or VCVTUSI2SS/SD for other targets.
224    setOperationAction(ISD::UINT_TO_FP,        MVT::i32, Custom);
225    setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom);
226    // We have an algorithm for SSE2->double, and we turn this into a
227    // 64-bit FILD followed by conditional FADD for other targets.
228    setOperationAction(ISD::UINT_TO_FP,        MVT::i64, Custom);
229    setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom);
230
231    // Promote i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
232    // this operation.
233    setOperationAction(ISD::SINT_TO_FP,        MVT::i8, Promote);
234    setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i8, Promote);
235    // SSE has no i16 to fp conversion, only i32. We promote in the handler
236    // to allow f80 to use i16 and f64 to use i16 with sse1 only
237    setOperationAction(ISD::SINT_TO_FP,        MVT::i16, Custom);
238    setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i16, Custom);
239    // f32 and f64 cases are Legal with SSE1/SSE2, f80 case is not
240    setOperationAction(ISD::SINT_TO_FP,        MVT::i32, Custom);
241    setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
242    // In 32-bit mode these are custom lowered.  In 64-bit mode F32 and F64
243    // are Legal, f80 is custom lowered.
244    setOperationAction(ISD::SINT_TO_FP,        MVT::i64, Custom);
245    setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
246
247    // Promote i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
248    // this operation.
249    setOperationAction(ISD::FP_TO_SINT,        MVT::i8,  Promote);
250    // FIXME: This doesn't generate invalid exception when it should. PR44019.
251    setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i8,  Promote);
252    setOperationAction(ISD::FP_TO_SINT,        MVT::i16, Custom);
253    setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i16, Custom);
254    setOperationAction(ISD::FP_TO_SINT,        MVT::i32, Custom);
255    setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
256    // In 32-bit mode these are custom lowered.  In 64-bit mode F32 and F64
257    // are Legal, f80 is custom lowered.
258    setOperationAction(ISD::FP_TO_SINT,        MVT::i64, Custom);
259    setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
260
261    // Handle FP_TO_UINT by promoting the destination to a larger signed
262    // conversion.
263    setOperationAction(ISD::FP_TO_UINT,        MVT::i8,  Promote);
264    // FIXME: This doesn't generate invalid exception when it should. PR44019.
265    setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i8,  Promote);
266    setOperationAction(ISD::FP_TO_UINT,        MVT::i16, Promote);
267    // FIXME: This doesn't generate invalid exception when it should. PR44019.
268    setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i16, Promote);
269    setOperationAction(ISD::FP_TO_UINT,        MVT::i32, Custom);
270    setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
271    setOperationAction(ISD::FP_TO_UINT,        MVT::i64, Custom);
272    setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom);
273  }
274
275  // Handle address space casts between mixed sized pointers.
276  setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
277  setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
278
279  // TODO: when we have SSE, these could be more efficient, by using movd/movq.
280  if (!X86ScalarSSEf64) {
281    setOperationAction(ISD::BITCAST        , MVT::f32  , Expand);
282    setOperationAction(ISD::BITCAST        , MVT::i32  , Expand);
283    if (Subtarget.is64Bit()) {
284      setOperationAction(ISD::BITCAST      , MVT::f64  , Expand);
285      // Without SSE, i64->f64 goes through memory.
286      setOperationAction(ISD::BITCAST      , MVT::i64  , Expand);
287    }
288  } else if (!Subtarget.is64Bit())
289    setOperationAction(ISD::BITCAST      , MVT::i64  , Custom);
290
291  // Scalar integer divide and remainder are lowered to use operations that
292  // produce two results, to match the available instructions. This exposes
293  // the two-result form to trivial CSE, which is able to combine x/y and x%y
294  // into a single instruction.
295  //
296  // Scalar integer multiply-high is also lowered to use two-result
297  // operations, to match the available instructions. However, plain multiply
298  // (low) operations are left as Legal, as there are single-result
299  // instructions for this in x86. Using the two-result multiply instructions
300  // when both high and low results are needed must be arranged by dagcombine.
301  for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
302    setOperationAction(ISD::MULHS, VT, Expand);
303    setOperationAction(ISD::MULHU, VT, Expand);
304    setOperationAction(ISD::SDIV, VT, Expand);
305    setOperationAction(ISD::UDIV, VT, Expand);
306    setOperationAction(ISD::SREM, VT, Expand);
307    setOperationAction(ISD::UREM, VT, Expand);
308  }
309
310  setOperationAction(ISD::BR_JT            , MVT::Other, Expand);
311  setOperationAction(ISD::BRCOND           , MVT::Other, Custom);
312  for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128,
313                   MVT::i8,  MVT::i16, MVT::i32, MVT::i64 }) {
314    setOperationAction(ISD::BR_CC,     VT, Expand);
315    setOperationAction(ISD::SELECT_CC, VT, Expand);
316  }
317  if (Subtarget.is64Bit())
318    setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
319  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16  , Legal);
320  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8   , Legal);
321  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1   , Expand);
322
323  setOperationAction(ISD::FREM             , MVT::f32  , Expand);
324  setOperationAction(ISD::FREM             , MVT::f64  , Expand);
325  setOperationAction(ISD::FREM             , MVT::f80  , Expand);
326  setOperationAction(ISD::FREM             , MVT::f128 , Expand);
327  setOperationAction(ISD::FLT_ROUNDS_      , MVT::i32  , Custom);
328
329  // Promote the i8 variants and force them on up to i32 which has a shorter
330  // encoding.
331  setOperationPromotedToType(ISD::CTTZ           , MVT::i8   , MVT::i32);
332  setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i8   , MVT::i32);
333  if (!Subtarget.hasBMI()) {
334    setOperationAction(ISD::CTTZ           , MVT::i16  , Custom);
335    setOperationAction(ISD::CTTZ           , MVT::i32  , Custom);
336    setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16  , Legal);
337    setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32  , Legal);
338    if (Subtarget.is64Bit()) {
339      setOperationAction(ISD::CTTZ         , MVT::i64  , Custom);
340      setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Legal);
341    }
342  }
343
344  if (Subtarget.hasLZCNT()) {
345    // When promoting the i8 variants, force them to i32 for a shorter
346    // encoding.
347    setOperationPromotedToType(ISD::CTLZ           , MVT::i8   , MVT::i32);
348    setOperationPromotedToType(ISD::CTLZ_ZERO_UNDEF, MVT::i8   , MVT::i32);
349  } else {
350    setOperationAction(ISD::CTLZ           , MVT::i8   , Custom);
351    setOperationAction(ISD::CTLZ           , MVT::i16  , Custom);
352    setOperationAction(ISD::CTLZ           , MVT::i32  , Custom);
353    setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8   , Custom);
354    setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16  , Custom);
355    setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32  , Custom);
356    if (Subtarget.is64Bit()) {
357      setOperationAction(ISD::CTLZ         , MVT::i64  , Custom);
358      setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
359    }
360  }
361
362  // Special handling for half-precision floating point conversions.
363  // If we don't have F16C support, then lower half float conversions
364  // into library calls.
365  if (Subtarget.useSoftFloat() || !Subtarget.hasF16C()) {
366    setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
367    setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
368  }
369
370  // There's never any support for operations beyond MVT::f32.
371  setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
372  setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand);
373  setOperationAction(ISD::FP16_TO_FP, MVT::f128, Expand);
374  setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
375  setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
376  setOperationAction(ISD::FP_TO_FP16, MVT::f128, Expand);
377
378  setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
379  setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
380  setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
381  setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f16, Expand);
382  setTruncStoreAction(MVT::f32, MVT::f16, Expand);
383  setTruncStoreAction(MVT::f64, MVT::f16, Expand);
384  setTruncStoreAction(MVT::f80, MVT::f16, Expand);
385  setTruncStoreAction(MVT::f128, MVT::f16, Expand);
386
387  if (Subtarget.hasPOPCNT()) {
388    setOperationPromotedToType(ISD::CTPOP, MVT::i8, MVT::i32);
389  } else {
390    setOperationAction(ISD::CTPOP          , MVT::i8   , Expand);
391    setOperationAction(ISD::CTPOP          , MVT::i16  , Expand);
392    setOperationAction(ISD::CTPOP          , MVT::i32  , Expand);
393    if (Subtarget.is64Bit())
394      setOperationAction(ISD::CTPOP        , MVT::i64  , Expand);
395    else
396      setOperationAction(ISD::CTPOP        , MVT::i64  , Custom);
397  }
398
399  setOperationAction(ISD::READCYCLECOUNTER , MVT::i64  , Custom);
400
401  if (!Subtarget.hasMOVBE())
402    setOperationAction(ISD::BSWAP          , MVT::i16  , Expand);
403
404  // X86 wants to expand cmov itself.
405  for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128 }) {
406    setOperationAction(ISD::SELECT, VT, Custom);
407    setOperationAction(ISD::SETCC, VT, Custom);
408    setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
409    setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
410  }
411  for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
412    if (VT == MVT::i64 && !Subtarget.is64Bit())
413      continue;
414    setOperationAction(ISD::SELECT, VT, Custom);
415    setOperationAction(ISD::SETCC,  VT, Custom);
416  }
417
418  // Custom action for SELECT MMX and expand action for SELECT_CC MMX
419  setOperationAction(ISD::SELECT, MVT::x86mmx, Custom);
420  setOperationAction(ISD::SELECT_CC, MVT::x86mmx, Expand);
421
422  setOperationAction(ISD::EH_RETURN       , MVT::Other, Custom);
423  // NOTE: EH_SJLJ_SETJMP/_LONGJMP are not recommended, since
424  // LLVM/Clang supports zero-cost DWARF and SEH exception handling.
425  setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
426  setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
427  setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
428  if (TM.Options.ExceptionModel == ExceptionHandling::SjLj)
429    setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
430
431  // Darwin ABI issue.
432  for (auto VT : { MVT::i32, MVT::i64 }) {
433    if (VT == MVT::i64 && !Subtarget.is64Bit())
434      continue;
435    setOperationAction(ISD::ConstantPool    , VT, Custom);
436    setOperationAction(ISD::JumpTable       , VT, Custom);
437    setOperationAction(ISD::GlobalAddress   , VT, Custom);
438    setOperationAction(ISD::GlobalTLSAddress, VT, Custom);
439    setOperationAction(ISD::ExternalSymbol  , VT, Custom);
440    setOperationAction(ISD::BlockAddress    , VT, Custom);
441  }
442
443  // 64-bit shl, sra, srl (iff 32-bit x86)
444  for (auto VT : { MVT::i32, MVT::i64 }) {
445    if (VT == MVT::i64 && !Subtarget.is64Bit())
446      continue;
447    setOperationAction(ISD::SHL_PARTS, VT, Custom);
448    setOperationAction(ISD::SRA_PARTS, VT, Custom);
449    setOperationAction(ISD::SRL_PARTS, VT, Custom);
450  }
451
452  if (Subtarget.hasSSEPrefetch() || Subtarget.has3DNow())
453    setOperationAction(ISD::PREFETCH      , MVT::Other, Legal);
454
455  setOperationAction(ISD::ATOMIC_FENCE  , MVT::Other, Custom);
456
457  // Expand certain atomics
458  for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
459    setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
460    setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
461    setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom);
462    setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Custom);
463    setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Custom);
464    setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Custom);
465    setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
466  }
467
468  if (!Subtarget.is64Bit())
469    setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
470
471  if (Subtarget.hasCmpxchg16b()) {
472    setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
473  }
474
475  // FIXME - use subtarget debug flags
476  if (!Subtarget.isTargetDarwin() && !Subtarget.isTargetELF() &&
477      !Subtarget.isTargetCygMing() && !Subtarget.isTargetWin64() &&
478      TM.Options.ExceptionModel != ExceptionHandling::SjLj) {
479    setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
480  }
481
482  setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
483  setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
484
485  setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
486  setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
487
488  setOperationAction(ISD::TRAP, MVT::Other, Legal);
489  setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
490
491  // VASTART needs to be custom lowered to use the VarArgsFrameIndex
492  setOperationAction(ISD::VASTART           , MVT::Other, Custom);
493  setOperationAction(ISD::VAEND             , MVT::Other, Expand);
494  bool Is64Bit = Subtarget.is64Bit();
495  setOperationAction(ISD::VAARG,  MVT::Other, Is64Bit ? Custom : Expand);
496  setOperationAction(ISD::VACOPY, MVT::Other, Is64Bit ? Custom : Expand);
497
498  setOperationAction(ISD::STACKSAVE,          MVT::Other, Expand);
499  setOperationAction(ISD::STACKRESTORE,       MVT::Other, Expand);
500
501  setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
502
503  // GC_TRANSITION_START and GC_TRANSITION_END need custom lowering.
504  setOperationAction(ISD::GC_TRANSITION_START, MVT::Other, Custom);
505  setOperationAction(ISD::GC_TRANSITION_END, MVT::Other, Custom);
506
507  if (!Subtarget.useSoftFloat() && X86ScalarSSEf64) {
508    // f32 and f64 use SSE.
509    // Set up the FP register classes.
510    addRegisterClass(MVT::f32, Subtarget.hasAVX512() ? &X86::FR32XRegClass
511                                                     : &X86::FR32RegClass);
512    addRegisterClass(MVT::f64, Subtarget.hasAVX512() ? &X86::FR64XRegClass
513                                                     : &X86::FR64RegClass);
514
515    // Disable f32->f64 extload as we can only generate this in one instruction
516    // under optsize. So its easier to pattern match (fpext (load)) for that
517    // case instead of needing to emit 2 instructions for extload in the
518    // non-optsize case.
519    setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
520
521    for (auto VT : { MVT::f32, MVT::f64 }) {
522      // Use ANDPD to simulate FABS.
523      setOperationAction(ISD::FABS, VT, Custom);
524
525      // Use XORP to simulate FNEG.
526      setOperationAction(ISD::FNEG, VT, Custom);
527
528      // Use ANDPD and ORPD to simulate FCOPYSIGN.
529      setOperationAction(ISD::FCOPYSIGN, VT, Custom);
530
531      // These might be better off as horizontal vector ops.
532      setOperationAction(ISD::FADD, VT, Custom);
533      setOperationAction(ISD::FSUB, VT, Custom);
534
535      // We don't support sin/cos/fmod
536      setOperationAction(ISD::FSIN   , VT, Expand);
537      setOperationAction(ISD::FCOS   , VT, Expand);
538      setOperationAction(ISD::FSINCOS, VT, Expand);
539    }
540
541    // Lower this to MOVMSK plus an AND.
542    setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
543    setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
544
545  } else if (!useSoftFloat() && X86ScalarSSEf32 && (UseX87 || Is64Bit)) {
546    // Use SSE for f32, x87 for f64.
547    // Set up the FP register classes.
548    addRegisterClass(MVT::f32, &X86::FR32RegClass);
549    if (UseX87)
550      addRegisterClass(MVT::f64, &X86::RFP64RegClass);
551
552    // Use ANDPS to simulate FABS.
553    setOperationAction(ISD::FABS , MVT::f32, Custom);
554
555    // Use XORP to simulate FNEG.
556    setOperationAction(ISD::FNEG , MVT::f32, Custom);
557
558    if (UseX87)
559      setOperationAction(ISD::UNDEF, MVT::f64, Expand);
560
561    // Use ANDPS and ORPS to simulate FCOPYSIGN.
562    if (UseX87)
563      setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
564    setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
565
566    // We don't support sin/cos/fmod
567    setOperationAction(ISD::FSIN   , MVT::f32, Expand);
568    setOperationAction(ISD::FCOS   , MVT::f32, Expand);
569    setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
570
571    if (UseX87) {
572      // Always expand sin/cos functions even though x87 has an instruction.
573      setOperationAction(ISD::FSIN, MVT::f64, Expand);
574      setOperationAction(ISD::FCOS, MVT::f64, Expand);
575      setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
576    }
577  } else if (UseX87) {
578    // f32 and f64 in x87.
579    // Set up the FP register classes.
580    addRegisterClass(MVT::f64, &X86::RFP64RegClass);
581    addRegisterClass(MVT::f32, &X86::RFP32RegClass);
582
583    for (auto VT : { MVT::f32, MVT::f64 }) {
584      setOperationAction(ISD::UNDEF,     VT, Expand);
585      setOperationAction(ISD::FCOPYSIGN, VT, Expand);
586
587      // Always expand sin/cos functions even though x87 has an instruction.
588      setOperationAction(ISD::FSIN   , VT, Expand);
589      setOperationAction(ISD::FCOS   , VT, Expand);
590      setOperationAction(ISD::FSINCOS, VT, Expand);
591    }
592  }
593
594  // Expand FP32 immediates into loads from the stack, save special cases.
595  if (isTypeLegal(MVT::f32)) {
596    if (UseX87 && (getRegClassFor(MVT::f32) == &X86::RFP32RegClass)) {
597      addLegalFPImmediate(APFloat(+0.0f)); // FLD0
598      addLegalFPImmediate(APFloat(+1.0f)); // FLD1
599      addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
600      addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
601    } else // SSE immediates.
602      addLegalFPImmediate(APFloat(+0.0f)); // xorps
603  }
604  // Expand FP64 immediates into loads from the stack, save special cases.
605  if (isTypeLegal(MVT::f64)) {
606    if (UseX87 && getRegClassFor(MVT::f64) == &X86::RFP64RegClass) {
607      addLegalFPImmediate(APFloat(+0.0)); // FLD0
608      addLegalFPImmediate(APFloat(+1.0)); // FLD1
609      addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
610      addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
611    } else // SSE immediates.
612      addLegalFPImmediate(APFloat(+0.0)); // xorpd
613  }
614  // Handle constrained floating-point operations of scalar.
615  setOperationAction(ISD::STRICT_FADD,      MVT::f32, Legal);
616  setOperationAction(ISD::STRICT_FADD,      MVT::f64, Legal);
617  setOperationAction(ISD::STRICT_FSUB,      MVT::f32, Legal);
618  setOperationAction(ISD::STRICT_FSUB,      MVT::f64, Legal);
619  setOperationAction(ISD::STRICT_FMUL,      MVT::f32, Legal);
620  setOperationAction(ISD::STRICT_FMUL,      MVT::f64, Legal);
621  setOperationAction(ISD::STRICT_FDIV,      MVT::f32, Legal);
622  setOperationAction(ISD::STRICT_FDIV,      MVT::f64, Legal);
623  setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
624  setOperationAction(ISD::STRICT_FP_ROUND,  MVT::f32, Legal);
625  setOperationAction(ISD::STRICT_FP_ROUND,  MVT::f64, Legal);
626  setOperationAction(ISD::STRICT_FSQRT,     MVT::f32, Legal);
627  setOperationAction(ISD::STRICT_FSQRT,     MVT::f64, Legal);
628
629  // We don't support FMA.
630  setOperationAction(ISD::FMA, MVT::f64, Expand);
631  setOperationAction(ISD::FMA, MVT::f32, Expand);
632
633  // f80 always uses X87.
634  if (UseX87) {
635    addRegisterClass(MVT::f80, &X86::RFP80RegClass);
636    setOperationAction(ISD::UNDEF,     MVT::f80, Expand);
637    setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
638    {
639      APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended());
640      addLegalFPImmediate(TmpFlt);  // FLD0
641      TmpFlt.changeSign();
642      addLegalFPImmediate(TmpFlt);  // FLD0/FCHS
643
644      bool ignored;
645      APFloat TmpFlt2(+1.0);
646      TmpFlt2.convert(APFloat::x87DoubleExtended(), APFloat::rmNearestTiesToEven,
647                      &ignored);
648      addLegalFPImmediate(TmpFlt2);  // FLD1
649      TmpFlt2.changeSign();
650      addLegalFPImmediate(TmpFlt2);  // FLD1/FCHS
651    }
652
653    // Always expand sin/cos functions even though x87 has an instruction.
654    setOperationAction(ISD::FSIN   , MVT::f80, Expand);
655    setOperationAction(ISD::FCOS   , MVT::f80, Expand);
656    setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
657
658    setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
659    setOperationAction(ISD::FCEIL,  MVT::f80, Expand);
660    setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
661    setOperationAction(ISD::FRINT,  MVT::f80, Expand);
662    setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
663    setOperationAction(ISD::FMA, MVT::f80, Expand);
664    setOperationAction(ISD::LROUND, MVT::f80, Expand);
665    setOperationAction(ISD::LLROUND, MVT::f80, Expand);
666    setOperationAction(ISD::LRINT, MVT::f80, Expand);
667    setOperationAction(ISD::LLRINT, MVT::f80, Expand);
668
669    // Handle constrained floating-point operations of scalar.
670    setOperationAction(ISD::STRICT_FADD     , MVT::f80, Legal);
671    setOperationAction(ISD::STRICT_FSUB     , MVT::f80, Legal);
672    setOperationAction(ISD::STRICT_FMUL     , MVT::f80, Legal);
673    setOperationAction(ISD::STRICT_FDIV     , MVT::f80, Legal);
674    setOperationAction(ISD::STRICT_FSQRT    , MVT::f80, Legal);
675    setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f80, Legal);
676    // FIXME: When the target is 64-bit, STRICT_FP_ROUND will be overwritten
677    // as Custom.
678    setOperationAction(ISD::STRICT_FP_ROUND, MVT::f80, Legal);
679  }
680
681  // f128 uses xmm registers, but most operations require libcalls.
682  if (!Subtarget.useSoftFloat() && Subtarget.is64Bit() && Subtarget.hasSSE1()) {
683    addRegisterClass(MVT::f128, Subtarget.hasVLX() ? &X86::VR128XRegClass
684                                                   : &X86::VR128RegClass);
685
686    addLegalFPImmediate(APFloat::getZero(APFloat::IEEEquad())); // xorps
687
688    setOperationAction(ISD::FADD,        MVT::f128, LibCall);
689    setOperationAction(ISD::STRICT_FADD, MVT::f128, LibCall);
690    setOperationAction(ISD::FSUB,        MVT::f128, LibCall);
691    setOperationAction(ISD::STRICT_FSUB, MVT::f128, LibCall);
692    setOperationAction(ISD::FDIV,        MVT::f128, LibCall);
693    setOperationAction(ISD::STRICT_FDIV, MVT::f128, LibCall);
694    setOperationAction(ISD::FMUL,        MVT::f128, LibCall);
695    setOperationAction(ISD::STRICT_FMUL, MVT::f128, LibCall);
696    setOperationAction(ISD::FMA,         MVT::f128, LibCall);
697    setOperationAction(ISD::STRICT_FMA,  MVT::f128, LibCall);
698
699    setOperationAction(ISD::FABS, MVT::f128, Custom);
700    setOperationAction(ISD::FNEG, MVT::f128, Custom);
701    setOperationAction(ISD::FCOPYSIGN, MVT::f128, Custom);
702
703    setOperationAction(ISD::FSIN,         MVT::f128, LibCall);
704    setOperationAction(ISD::STRICT_FSIN,  MVT::f128, LibCall);
705    setOperationAction(ISD::FCOS,         MVT::f128, LibCall);
706    setOperationAction(ISD::STRICT_FCOS,  MVT::f128, LibCall);
707    setOperationAction(ISD::FSINCOS,      MVT::f128, LibCall);
708    // No STRICT_FSINCOS
709    setOperationAction(ISD::FSQRT,        MVT::f128, LibCall);
710    setOperationAction(ISD::STRICT_FSQRT, MVT::f128, LibCall);
711
712    setOperationAction(ISD::FP_EXTEND,        MVT::f128, Custom);
713    setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Custom);
714    // We need to custom handle any FP_ROUND with an f128 input, but
715    // LegalizeDAG uses the result type to know when to run a custom handler.
716    // So we have to list all legal floating point result types here.
717    if (isTypeLegal(MVT::f32)) {
718      setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
719      setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Custom);
720    }
721    if (isTypeLegal(MVT::f64)) {
722      setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
723      setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Custom);
724    }
725    if (isTypeLegal(MVT::f80)) {
726      setOperationAction(ISD::FP_ROUND, MVT::f80, Custom);
727      setOperationAction(ISD::STRICT_FP_ROUND, MVT::f80, Custom);
728    }
729
730    setOperationAction(ISD::SETCC, MVT::f128, Custom);
731
732    setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f32, Expand);
733    setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f64, Expand);
734    setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f80, Expand);
735    setTruncStoreAction(MVT::f128, MVT::f32, Expand);
736    setTruncStoreAction(MVT::f128, MVT::f64, Expand);
737    setTruncStoreAction(MVT::f128, MVT::f80, Expand);
738  }
739
740  // Always use a library call for pow.
741  setOperationAction(ISD::FPOW             , MVT::f32  , Expand);
742  setOperationAction(ISD::FPOW             , MVT::f64  , Expand);
743  setOperationAction(ISD::FPOW             , MVT::f80  , Expand);
744  setOperationAction(ISD::FPOW             , MVT::f128 , Expand);
745
746  setOperationAction(ISD::FLOG, MVT::f80, Expand);
747  setOperationAction(ISD::FLOG2, MVT::f80, Expand);
748  setOperationAction(ISD::FLOG10, MVT::f80, Expand);
749  setOperationAction(ISD::FEXP, MVT::f80, Expand);
750  setOperationAction(ISD::FEXP2, MVT::f80, Expand);
751  setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
752  setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
753
754  // Some FP actions are always expanded for vector types.
755  for (auto VT : { MVT::v4f32, MVT::v8f32, MVT::v16f32,
756                   MVT::v2f64, MVT::v4f64, MVT::v8f64 }) {
757    setOperationAction(ISD::FSIN,      VT, Expand);
758    setOperationAction(ISD::FSINCOS,   VT, Expand);
759    setOperationAction(ISD::FCOS,      VT, Expand);
760    setOperationAction(ISD::FREM,      VT, Expand);
761    setOperationAction(ISD::FCOPYSIGN, VT, Expand);
762    setOperationAction(ISD::FPOW,      VT, Expand);
763    setOperationAction(ISD::FLOG,      VT, Expand);
764    setOperationAction(ISD::FLOG2,     VT, Expand);
765    setOperationAction(ISD::FLOG10,    VT, Expand);
766    setOperationAction(ISD::FEXP,      VT, Expand);
767    setOperationAction(ISD::FEXP2,     VT, Expand);
768  }
769
770  // First set operation action for all vector types to either promote
771  // (for widening) or expand (for scalarization). Then we will selectively
772  // turn on ones that can be effectively codegen'd.
773  for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
774    setOperationAction(ISD::SDIV, VT, Expand);
775    setOperationAction(ISD::UDIV, VT, Expand);
776    setOperationAction(ISD::SREM, VT, Expand);
777    setOperationAction(ISD::UREM, VT, Expand);
778    setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
779    setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
780    setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
781    setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
782    setOperationAction(ISD::FMA,  VT, Expand);
783    setOperationAction(ISD::FFLOOR, VT, Expand);
784    setOperationAction(ISD::FCEIL, VT, Expand);
785    setOperationAction(ISD::FTRUNC, VT, Expand);
786    setOperationAction(ISD::FRINT, VT, Expand);
787    setOperationAction(ISD::FNEARBYINT, VT, Expand);
788    setOperationAction(ISD::SMUL_LOHI, VT, Expand);
789    setOperationAction(ISD::MULHS, VT, Expand);
790    setOperationAction(ISD::UMUL_LOHI, VT, Expand);
791    setOperationAction(ISD::MULHU, VT, Expand);
792    setOperationAction(ISD::SDIVREM, VT, Expand);
793    setOperationAction(ISD::UDIVREM, VT, Expand);
794    setOperationAction(ISD::CTPOP, VT, Expand);
795    setOperationAction(ISD::CTTZ, VT, Expand);
796    setOperationAction(ISD::CTLZ, VT, Expand);
797    setOperationAction(ISD::ROTL, VT, Expand);
798    setOperationAction(ISD::ROTR, VT, Expand);
799    setOperationAction(ISD::BSWAP, VT, Expand);
800    setOperationAction(ISD::SETCC, VT, Expand);
801    setOperationAction(ISD::FP_TO_UINT, VT, Expand);
802    setOperationAction(ISD::FP_TO_SINT, VT, Expand);
803    setOperationAction(ISD::UINT_TO_FP, VT, Expand);
804    setOperationAction(ISD::SINT_TO_FP, VT, Expand);
805    setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
806    setOperationAction(ISD::TRUNCATE, VT, Expand);
807    setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
808    setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
809    setOperationAction(ISD::ANY_EXTEND, VT, Expand);
810    setOperationAction(ISD::SELECT_CC, VT, Expand);
811    for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
812      setTruncStoreAction(InnerVT, VT, Expand);
813
814      setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
815      setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
816
817      // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
818      // types, we have to deal with them whether we ask for Expansion or not.
819      // Setting Expand causes its own optimisation problems though, so leave
820      // them legal.
821      if (VT.getVectorElementType() == MVT::i1)
822        setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
823
824      // EXTLOAD for MVT::f16 vectors is not legal because f16 vectors are
825      // split/scalarized right now.
826      if (VT.getVectorElementType() == MVT::f16)
827        setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
828    }
829  }
830
831  // FIXME: In order to prevent SSE instructions being expanded to MMX ones
832  // with -msoft-float, disable use of MMX as well.
833  if (!Subtarget.useSoftFloat() && Subtarget.hasMMX()) {
834    addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
835    // No operations on x86mmx supported, everything uses intrinsics.
836  }
837
838  if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1()) {
839    addRegisterClass(MVT::v4f32, Subtarget.hasVLX() ? &X86::VR128XRegClass
840                                                    : &X86::VR128RegClass);
841
842    setOperationAction(ISD::FNEG,               MVT::v4f32, Custom);
843    setOperationAction(ISD::FABS,               MVT::v4f32, Custom);
844    setOperationAction(ISD::FCOPYSIGN,          MVT::v4f32, Custom);
845    setOperationAction(ISD::BUILD_VECTOR,       MVT::v4f32, Custom);
846    setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v4f32, Custom);
847    setOperationAction(ISD::VSELECT,            MVT::v4f32, Custom);
848    setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
849    setOperationAction(ISD::SELECT,             MVT::v4f32, Custom);
850
851    setOperationAction(ISD::LOAD,               MVT::v2f32, Custom);
852    setOperationAction(ISD::STORE,              MVT::v2f32, Custom);
853
854    setOperationAction(ISD::STRICT_FADD,        MVT::v4f32, Legal);
855    setOperationAction(ISD::STRICT_FSUB,        MVT::v4f32, Legal);
856    setOperationAction(ISD::STRICT_FMUL,        MVT::v4f32, Legal);
857    setOperationAction(ISD::STRICT_FDIV,        MVT::v4f32, Legal);
858    setOperationAction(ISD::STRICT_FSQRT,       MVT::v4f32, Legal);
859  }
860
861  if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
862    addRegisterClass(MVT::v2f64, Subtarget.hasVLX() ? &X86::VR128XRegClass
863                                                    : &X86::VR128RegClass);
864
865    // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
866    // registers cannot be used even for integer operations.
867    addRegisterClass(MVT::v16i8, Subtarget.hasVLX() ? &X86::VR128XRegClass
868                                                    : &X86::VR128RegClass);
869    addRegisterClass(MVT::v8i16, Subtarget.hasVLX() ? &X86::VR128XRegClass
870                                                    : &X86::VR128RegClass);
871    addRegisterClass(MVT::v4i32, Subtarget.hasVLX() ? &X86::VR128XRegClass
872                                                    : &X86::VR128RegClass);
873    addRegisterClass(MVT::v2i64, Subtarget.hasVLX() ? &X86::VR128XRegClass
874                                                    : &X86::VR128RegClass);
875
876    for (auto VT : { MVT::v2i8, MVT::v4i8, MVT::v8i8,
877                     MVT::v2i16, MVT::v4i16, MVT::v2i32 }) {
878      setOperationAction(ISD::SDIV, VT, Custom);
879      setOperationAction(ISD::SREM, VT, Custom);
880      setOperationAction(ISD::UDIV, VT, Custom);
881      setOperationAction(ISD::UREM, VT, Custom);
882    }
883
884    setOperationAction(ISD::MUL,                MVT::v2i8,  Custom);
885    setOperationAction(ISD::MUL,                MVT::v4i8,  Custom);
886    setOperationAction(ISD::MUL,                MVT::v8i8,  Custom);
887
888    setOperationAction(ISD::MUL,                MVT::v16i8, Custom);
889    setOperationAction(ISD::MUL,                MVT::v4i32, Custom);
890    setOperationAction(ISD::MUL,                MVT::v2i64, Custom);
891    setOperationAction(ISD::MULHU,              MVT::v4i32, Custom);
892    setOperationAction(ISD::MULHS,              MVT::v4i32, Custom);
893    setOperationAction(ISD::MULHU,              MVT::v16i8, Custom);
894    setOperationAction(ISD::MULHS,              MVT::v16i8, Custom);
895    setOperationAction(ISD::MULHU,              MVT::v8i16, Legal);
896    setOperationAction(ISD::MULHS,              MVT::v8i16, Legal);
897    setOperationAction(ISD::MUL,                MVT::v8i16, Legal);
898    setOperationAction(ISD::FNEG,               MVT::v2f64, Custom);
899    setOperationAction(ISD::FABS,               MVT::v2f64, Custom);
900    setOperationAction(ISD::FCOPYSIGN,          MVT::v2f64, Custom);
901
902    for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
903      setOperationAction(ISD::SMAX, VT, VT == MVT::v8i16 ? Legal : Custom);
904      setOperationAction(ISD::SMIN, VT, VT == MVT::v8i16 ? Legal : Custom);
905      setOperationAction(ISD::UMAX, VT, VT == MVT::v16i8 ? Legal : Custom);
906      setOperationAction(ISD::UMIN, VT, VT == MVT::v16i8 ? Legal : Custom);
907    }
908
909    setOperationAction(ISD::UADDSAT,            MVT::v16i8, Legal);
910    setOperationAction(ISD::SADDSAT,            MVT::v16i8, Legal);
911    setOperationAction(ISD::USUBSAT,            MVT::v16i8, Legal);
912    setOperationAction(ISD::SSUBSAT,            MVT::v16i8, Legal);
913    setOperationAction(ISD::UADDSAT,            MVT::v8i16, Legal);
914    setOperationAction(ISD::SADDSAT,            MVT::v8i16, Legal);
915    setOperationAction(ISD::USUBSAT,            MVT::v8i16, Legal);
916    setOperationAction(ISD::SSUBSAT,            MVT::v8i16, Legal);
917    setOperationAction(ISD::UADDSAT,            MVT::v4i32, Custom);
918    setOperationAction(ISD::USUBSAT,            MVT::v4i32, Custom);
919    setOperationAction(ISD::UADDSAT,            MVT::v2i64, Custom);
920    setOperationAction(ISD::USUBSAT,            MVT::v2i64, Custom);
921
922    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v8i16, Custom);
923    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v4i32, Custom);
924    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v4f32, Custom);
925
926    for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
927      setOperationAction(ISD::SETCC,              VT, Custom);
928      setOperationAction(ISD::STRICT_FSETCC,      VT, Custom);
929      setOperationAction(ISD::STRICT_FSETCCS,     VT, Custom);
930      setOperationAction(ISD::CTPOP,              VT, Custom);
931      setOperationAction(ISD::ABS,                VT, Custom);
932
933      // The condition codes aren't legal in SSE/AVX and under AVX512 we use
934      // setcc all the way to isel and prefer SETGT in some isel patterns.
935      setCondCodeAction(ISD::SETLT, VT, Custom);
936      setCondCodeAction(ISD::SETLE, VT, Custom);
937    }
938
939    for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
940      setOperationAction(ISD::SCALAR_TO_VECTOR,   VT, Custom);
941      setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
942      setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
943      setOperationAction(ISD::VSELECT,            VT, Custom);
944      setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
945    }
946
947    for (auto VT : { MVT::v2f64, MVT::v2i64 }) {
948      setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
949      setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
950      setOperationAction(ISD::VSELECT,            VT, Custom);
951
952      if (VT == MVT::v2i64 && !Subtarget.is64Bit())
953        continue;
954
955      setOperationAction(ISD::INSERT_VECTOR_ELT,  VT, Custom);
956      setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
957    }
958
959    // Custom lower v2i64 and v2f64 selects.
960    setOperationAction(ISD::SELECT,             MVT::v2f64, Custom);
961    setOperationAction(ISD::SELECT,             MVT::v2i64, Custom);
962    setOperationAction(ISD::SELECT,             MVT::v4i32, Custom);
963    setOperationAction(ISD::SELECT,             MVT::v8i16, Custom);
964    setOperationAction(ISD::SELECT,             MVT::v16i8, Custom);
965
966    setOperationAction(ISD::FP_TO_SINT,         MVT::v4i32, Legal);
967    setOperationAction(ISD::FP_TO_SINT,         MVT::v2i32, Custom);
968    setOperationAction(ISD::STRICT_FP_TO_SINT,  MVT::v4i32, Legal);
969    setOperationAction(ISD::STRICT_FP_TO_SINT,  MVT::v2i32, Custom);
970
971    // Custom legalize these to avoid over promotion or custom promotion.
972    for (auto VT : {MVT::v2i8, MVT::v4i8, MVT::v8i8, MVT::v2i16, MVT::v4i16}) {
973      setOperationAction(ISD::FP_TO_SINT,        VT, Custom);
974      setOperationAction(ISD::FP_TO_UINT,        VT, Custom);
975      setOperationAction(ISD::STRICT_FP_TO_SINT, VT, Custom);
976      setOperationAction(ISD::STRICT_FP_TO_UINT, VT, Custom);
977    }
978
979    setOperationAction(ISD::SINT_TO_FP,         MVT::v4i32, Legal);
980    setOperationAction(ISD::STRICT_SINT_TO_FP,  MVT::v4i32, Legal);
981    setOperationAction(ISD::SINT_TO_FP,         MVT::v2i32, Custom);
982    setOperationAction(ISD::STRICT_SINT_TO_FP,  MVT::v2i32, Custom);
983
984    setOperationAction(ISD::UINT_TO_FP,         MVT::v2i32, Custom);
985    setOperationAction(ISD::STRICT_UINT_TO_FP,  MVT::v2i32, Custom);
986
987    setOperationAction(ISD::UINT_TO_FP,         MVT::v4i32, Custom);
988    setOperationAction(ISD::STRICT_UINT_TO_FP,  MVT::v4i32, Custom);
989
990    // Fast v2f32 UINT_TO_FP( v2i32 ) custom conversion.
991    setOperationAction(ISD::SINT_TO_FP,         MVT::v2f32, Custom);
992    setOperationAction(ISD::STRICT_SINT_TO_FP,  MVT::v2f32, Custom);
993    setOperationAction(ISD::UINT_TO_FP,         MVT::v2f32, Custom);
994    setOperationAction(ISD::STRICT_UINT_TO_FP,  MVT::v2f32, Custom);
995
996    setOperationAction(ISD::FP_EXTEND,          MVT::v2f32, Custom);
997    setOperationAction(ISD::STRICT_FP_EXTEND,   MVT::v2f32, Custom);
998    setOperationAction(ISD::FP_ROUND,           MVT::v2f32, Custom);
999    setOperationAction(ISD::STRICT_FP_ROUND,    MVT::v2f32, Custom);
1000
1001    // We want to legalize this to an f64 load rather than an i64 load on
1002    // 64-bit targets and two 32-bit loads on a 32-bit target. Similar for
1003    // store.
1004    setOperationAction(ISD::LOAD,               MVT::v2i32, Custom);
1005    setOperationAction(ISD::LOAD,               MVT::v4i16, Custom);
1006    setOperationAction(ISD::LOAD,               MVT::v8i8,  Custom);
1007    setOperationAction(ISD::STORE,              MVT::v2i32, Custom);
1008    setOperationAction(ISD::STORE,              MVT::v4i16, Custom);
1009    setOperationAction(ISD::STORE,              MVT::v8i8,  Custom);
1010
1011    setOperationAction(ISD::BITCAST,            MVT::v2i32, Custom);
1012    setOperationAction(ISD::BITCAST,            MVT::v4i16, Custom);
1013    setOperationAction(ISD::BITCAST,            MVT::v8i8,  Custom);
1014    if (!Subtarget.hasAVX512())
1015      setOperationAction(ISD::BITCAST, MVT::v16i1, Custom);
1016
1017    setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v2i64, Custom);
1018    setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i32, Custom);
1019    setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i16, Custom);
1020
1021    setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
1022
1023    setOperationAction(ISD::TRUNCATE,    MVT::v2i8,  Custom);
1024    setOperationAction(ISD::TRUNCATE,    MVT::v2i16, Custom);
1025    setOperationAction(ISD::TRUNCATE,    MVT::v2i32, Custom);
1026    setOperationAction(ISD::TRUNCATE,    MVT::v4i8,  Custom);
1027    setOperationAction(ISD::TRUNCATE,    MVT::v4i16, Custom);
1028    setOperationAction(ISD::TRUNCATE,    MVT::v8i8,  Custom);
1029
1030    // In the customized shift lowering, the legal v4i32/v2i64 cases
1031    // in AVX2 will be recognized.
1032    for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1033      setOperationAction(ISD::SRL,              VT, Custom);
1034      setOperationAction(ISD::SHL,              VT, Custom);
1035      setOperationAction(ISD::SRA,              VT, Custom);
1036    }
1037
1038    setOperationAction(ISD::ROTL,               MVT::v4i32, Custom);
1039    setOperationAction(ISD::ROTL,               MVT::v8i16, Custom);
1040
1041    // With AVX512, expanding (and promoting the shifts) is better.
1042    if (!Subtarget.hasAVX512())
1043      setOperationAction(ISD::ROTL,             MVT::v16i8, Custom);
1044
1045    setOperationAction(ISD::STRICT_FSQRT,       MVT::v2f64, Legal);
1046    setOperationAction(ISD::STRICT_FADD,        MVT::v2f64, Legal);
1047    setOperationAction(ISD::STRICT_FSUB,        MVT::v2f64, Legal);
1048    setOperationAction(ISD::STRICT_FMUL,        MVT::v2f64, Legal);
1049    setOperationAction(ISD::STRICT_FDIV,        MVT::v2f64, Legal);
1050  }
1051
1052  if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) {
1053    setOperationAction(ISD::ABS,                MVT::v16i8, Legal);
1054    setOperationAction(ISD::ABS,                MVT::v8i16, Legal);
1055    setOperationAction(ISD::ABS,                MVT::v4i32, Legal);
1056    setOperationAction(ISD::BITREVERSE,         MVT::v16i8, Custom);
1057    setOperationAction(ISD::CTLZ,               MVT::v16i8, Custom);
1058    setOperationAction(ISD::CTLZ,               MVT::v8i16, Custom);
1059    setOperationAction(ISD::CTLZ,               MVT::v4i32, Custom);
1060    setOperationAction(ISD::CTLZ,               MVT::v2i64, Custom);
1061
1062    // These might be better off as horizontal vector ops.
1063    setOperationAction(ISD::ADD,                MVT::i16, Custom);
1064    setOperationAction(ISD::ADD,                MVT::i32, Custom);
1065    setOperationAction(ISD::SUB,                MVT::i16, Custom);
1066    setOperationAction(ISD::SUB,                MVT::i32, Custom);
1067  }
1068
1069  if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) {
1070    for (MVT RoundedTy : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
1071      setOperationAction(ISD::FFLOOR,            RoundedTy,  Legal);
1072      setOperationAction(ISD::STRICT_FFLOOR,     RoundedTy,  Legal);
1073      setOperationAction(ISD::FCEIL,             RoundedTy,  Legal);
1074      setOperationAction(ISD::STRICT_FCEIL,      RoundedTy,  Legal);
1075      setOperationAction(ISD::FTRUNC,            RoundedTy,  Legal);
1076      setOperationAction(ISD::STRICT_FTRUNC,     RoundedTy,  Legal);
1077      setOperationAction(ISD::FRINT,             RoundedTy,  Legal);
1078      setOperationAction(ISD::STRICT_FRINT,      RoundedTy,  Legal);
1079      setOperationAction(ISD::FNEARBYINT,        RoundedTy,  Legal);
1080      setOperationAction(ISD::STRICT_FNEARBYINT, RoundedTy,  Legal);
1081    }
1082
1083    setOperationAction(ISD::SMAX,               MVT::v16i8, Legal);
1084    setOperationAction(ISD::SMAX,               MVT::v4i32, Legal);
1085    setOperationAction(ISD::UMAX,               MVT::v8i16, Legal);
1086    setOperationAction(ISD::UMAX,               MVT::v4i32, Legal);
1087    setOperationAction(ISD::SMIN,               MVT::v16i8, Legal);
1088    setOperationAction(ISD::SMIN,               MVT::v4i32, Legal);
1089    setOperationAction(ISD::UMIN,               MVT::v8i16, Legal);
1090    setOperationAction(ISD::UMIN,               MVT::v4i32, Legal);
1091
1092    // FIXME: Do we need to handle scalar-to-vector here?
1093    setOperationAction(ISD::MUL,                MVT::v4i32, Legal);
1094
1095    // We directly match byte blends in the backend as they match the VSELECT
1096    // condition form.
1097    setOperationAction(ISD::VSELECT,            MVT::v16i8, Legal);
1098
1099    // SSE41 brings specific instructions for doing vector sign extend even in
1100    // cases where we don't have SRA.
1101    for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1102      setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Legal);
1103      setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Legal);
1104    }
1105
1106    // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1107    for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1108      setLoadExtAction(LoadExtOp, MVT::v8i16, MVT::v8i8,  Legal);
1109      setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i8,  Legal);
1110      setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i8,  Legal);
1111      setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i16, Legal);
1112      setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i16, Legal);
1113      setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i32, Legal);
1114    }
1115
1116    // i8 vectors are custom because the source register and source
1117    // source memory operand types are not the same width.
1118    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v16i8, Custom);
1119
1120    if (Subtarget.is64Bit() && !Subtarget.hasAVX512()) {
1121      // We need to scalarize v4i64->v432 uint_to_fp using cvtsi2ss, but we can
1122      // do the pre and post work in the vector domain.
1123      setOperationAction(ISD::UINT_TO_FP,        MVT::v4i64, Custom);
1124      setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i64, Custom);
1125      // We need to mark SINT_TO_FP as Custom even though we want to expand it
1126      // so that DAG combine doesn't try to turn it into uint_to_fp.
1127      setOperationAction(ISD::SINT_TO_FP,        MVT::v4i64, Custom);
1128      setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i64, Custom);
1129    }
1130  }
1131
1132  if (!Subtarget.useSoftFloat() && Subtarget.hasXOP()) {
1133    for (auto VT : { MVT::v16i8, MVT::v8i16,  MVT::v4i32, MVT::v2i64,
1134                     MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
1135      setOperationAction(ISD::ROTL, VT, Custom);
1136
1137    // XOP can efficiently perform BITREVERSE with VPPERM.
1138    for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 })
1139      setOperationAction(ISD::BITREVERSE, VT, Custom);
1140
1141    for (auto VT : { MVT::v16i8, MVT::v8i16,  MVT::v4i32, MVT::v2i64,
1142                     MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
1143      setOperationAction(ISD::BITREVERSE, VT, Custom);
1144  }
1145
1146  if (!Subtarget.useSoftFloat() && Subtarget.hasAVX()) {
1147    bool HasInt256 = Subtarget.hasInt256();
1148
1149    addRegisterClass(MVT::v32i8,  Subtarget.hasVLX() ? &X86::VR256XRegClass
1150                                                     : &X86::VR256RegClass);
1151    addRegisterClass(MVT::v16i16, Subtarget.hasVLX() ? &X86::VR256XRegClass
1152                                                     : &X86::VR256RegClass);
1153    addRegisterClass(MVT::v8i32,  Subtarget.hasVLX() ? &X86::VR256XRegClass
1154                                                     : &X86::VR256RegClass);
1155    addRegisterClass(MVT::v8f32,  Subtarget.hasVLX() ? &X86::VR256XRegClass
1156                                                     : &X86::VR256RegClass);
1157    addRegisterClass(MVT::v4i64,  Subtarget.hasVLX() ? &X86::VR256XRegClass
1158                                                     : &X86::VR256RegClass);
1159    addRegisterClass(MVT::v4f64,  Subtarget.hasVLX() ? &X86::VR256XRegClass
1160                                                     : &X86::VR256RegClass);
1161
1162    for (auto VT : { MVT::v8f32, MVT::v4f64 }) {
1163      setOperationAction(ISD::FFLOOR,            VT, Legal);
1164      setOperationAction(ISD::STRICT_FFLOOR,     VT, Legal);
1165      setOperationAction(ISD::FCEIL,             VT, Legal);
1166      setOperationAction(ISD::STRICT_FCEIL,      VT, Legal);
1167      setOperationAction(ISD::FTRUNC,            VT, Legal);
1168      setOperationAction(ISD::STRICT_FTRUNC,     VT, Legal);
1169      setOperationAction(ISD::FRINT,             VT, Legal);
1170      setOperationAction(ISD::STRICT_FRINT,      VT, Legal);
1171      setOperationAction(ISD::FNEARBYINT,        VT, Legal);
1172      setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
1173      setOperationAction(ISD::FNEG,              VT, Custom);
1174      setOperationAction(ISD::FABS,              VT, Custom);
1175      setOperationAction(ISD::FCOPYSIGN,         VT, Custom);
1176    }
1177
1178    // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1179    // even though v8i16 is a legal type.
1180    setOperationPromotedToType(ISD::FP_TO_SINT,        MVT::v8i16, MVT::v8i32);
1181    setOperationPromotedToType(ISD::FP_TO_UINT,        MVT::v8i16, MVT::v8i32);
1182    setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v8i16, MVT::v8i32);
1183    setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v8i16, MVT::v8i32);
1184    setOperationAction(ISD::FP_TO_SINT,                MVT::v8i32, Legal);
1185    setOperationAction(ISD::STRICT_FP_TO_SINT,         MVT::v8i32, Legal);
1186
1187    setOperationAction(ISD::SINT_TO_FP,         MVT::v8i32, Legal);
1188    setOperationAction(ISD::STRICT_SINT_TO_FP,  MVT::v8i32, Legal);
1189
1190    setOperationAction(ISD::STRICT_FP_ROUND,    MVT::v4f32, Legal);
1191    setOperationAction(ISD::STRICT_FADD,        MVT::v8f32, Legal);
1192    setOperationAction(ISD::STRICT_FADD,        MVT::v4f64, Legal);
1193    setOperationAction(ISD::STRICT_FSUB,        MVT::v8f32, Legal);
1194    setOperationAction(ISD::STRICT_FSUB,        MVT::v4f64, Legal);
1195    setOperationAction(ISD::STRICT_FMUL,        MVT::v8f32, Legal);
1196    setOperationAction(ISD::STRICT_FMUL,        MVT::v4f64, Legal);
1197    setOperationAction(ISD::STRICT_FDIV,        MVT::v8f32, Legal);
1198    setOperationAction(ISD::STRICT_FDIV,        MVT::v4f64, Legal);
1199    setOperationAction(ISD::STRICT_FP_EXTEND,   MVT::v4f64, Legal);
1200    setOperationAction(ISD::STRICT_FSQRT,       MVT::v8f32, Legal);
1201    setOperationAction(ISD::STRICT_FSQRT,       MVT::v4f64, Legal);
1202
1203    if (!Subtarget.hasAVX512())
1204      setOperationAction(ISD::BITCAST, MVT::v32i1, Custom);
1205
1206    // In the customized shift lowering, the legal v8i32/v4i64 cases
1207    // in AVX2 will be recognized.
1208    for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1209      setOperationAction(ISD::SRL, VT, Custom);
1210      setOperationAction(ISD::SHL, VT, Custom);
1211      setOperationAction(ISD::SRA, VT, Custom);
1212    }
1213
1214    // These types need custom splitting if their input is a 128-bit vector.
1215    setOperationAction(ISD::SIGN_EXTEND,       MVT::v8i64,  Custom);
1216    setOperationAction(ISD::SIGN_EXTEND,       MVT::v16i32, Custom);
1217    setOperationAction(ISD::ZERO_EXTEND,       MVT::v8i64,  Custom);
1218    setOperationAction(ISD::ZERO_EXTEND,       MVT::v16i32, Custom);
1219
1220    setOperationAction(ISD::ROTL,              MVT::v8i32,  Custom);
1221    setOperationAction(ISD::ROTL,              MVT::v16i16, Custom);
1222
1223    // With BWI, expanding (and promoting the shifts) is the better.
1224    if (!Subtarget.hasBWI())
1225      setOperationAction(ISD::ROTL,            MVT::v32i8,  Custom);
1226
1227    setOperationAction(ISD::SELECT,            MVT::v4f64, Custom);
1228    setOperationAction(ISD::SELECT,            MVT::v4i64, Custom);
1229    setOperationAction(ISD::SELECT,            MVT::v8i32, Custom);
1230    setOperationAction(ISD::SELECT,            MVT::v16i16, Custom);
1231    setOperationAction(ISD::SELECT,            MVT::v32i8, Custom);
1232    setOperationAction(ISD::SELECT,            MVT::v8f32, Custom);
1233
1234    for (auto VT : { MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1235      setOperationAction(ISD::SIGN_EXTEND,     VT, Custom);
1236      setOperationAction(ISD::ZERO_EXTEND,     VT, Custom);
1237      setOperationAction(ISD::ANY_EXTEND,      VT, Custom);
1238    }
1239
1240    setOperationAction(ISD::TRUNCATE,          MVT::v16i8, Custom);
1241    setOperationAction(ISD::TRUNCATE,          MVT::v8i16, Custom);
1242    setOperationAction(ISD::TRUNCATE,          MVT::v4i32, Custom);
1243    setOperationAction(ISD::BITREVERSE,        MVT::v32i8, Custom);
1244
1245    for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1246      setOperationAction(ISD::SETCC,           VT, Custom);
1247      setOperationAction(ISD::STRICT_FSETCC,   VT, Custom);
1248      setOperationAction(ISD::STRICT_FSETCCS,  VT, Custom);
1249      setOperationAction(ISD::CTPOP,           VT, Custom);
1250      setOperationAction(ISD::CTLZ,            VT, Custom);
1251
1252      // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1253      // setcc all the way to isel and prefer SETGT in some isel patterns.
1254      setCondCodeAction(ISD::SETLT, VT, Custom);
1255      setCondCodeAction(ISD::SETLE, VT, Custom);
1256    }
1257
1258    if (Subtarget.hasAnyFMA()) {
1259      for (auto VT : { MVT::f32, MVT::f64, MVT::v4f32, MVT::v8f32,
1260                       MVT::v2f64, MVT::v4f64 }) {
1261        setOperationAction(ISD::FMA, VT, Legal);
1262        setOperationAction(ISD::STRICT_FMA, VT, Legal);
1263      }
1264    }
1265
1266    for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1267      setOperationAction(ISD::ADD, VT, HasInt256 ? Legal : Custom);
1268      setOperationAction(ISD::SUB, VT, HasInt256 ? Legal : Custom);
1269    }
1270
1271    setOperationAction(ISD::MUL,       MVT::v4i64,  Custom);
1272    setOperationAction(ISD::MUL,       MVT::v8i32,  HasInt256 ? Legal : Custom);
1273    setOperationAction(ISD::MUL,       MVT::v16i16, HasInt256 ? Legal : Custom);
1274    setOperationAction(ISD::MUL,       MVT::v32i8,  Custom);
1275
1276    setOperationAction(ISD::MULHU,     MVT::v8i32,  Custom);
1277    setOperationAction(ISD::MULHS,     MVT::v8i32,  Custom);
1278    setOperationAction(ISD::MULHU,     MVT::v16i16, HasInt256 ? Legal : Custom);
1279    setOperationAction(ISD::MULHS,     MVT::v16i16, HasInt256 ? Legal : Custom);
1280    setOperationAction(ISD::MULHU,     MVT::v32i8,  Custom);
1281    setOperationAction(ISD::MULHS,     MVT::v32i8,  Custom);
1282
1283    setOperationAction(ISD::ABS,       MVT::v4i64,  Custom);
1284    setOperationAction(ISD::SMAX,      MVT::v4i64,  Custom);
1285    setOperationAction(ISD::UMAX,      MVT::v4i64,  Custom);
1286    setOperationAction(ISD::SMIN,      MVT::v4i64,  Custom);
1287    setOperationAction(ISD::UMIN,      MVT::v4i64,  Custom);
1288
1289    setOperationAction(ISD::UADDSAT,   MVT::v32i8,  HasInt256 ? Legal : Custom);
1290    setOperationAction(ISD::SADDSAT,   MVT::v32i8,  HasInt256 ? Legal : Custom);
1291    setOperationAction(ISD::USUBSAT,   MVT::v32i8,  HasInt256 ? Legal : Custom);
1292    setOperationAction(ISD::SSUBSAT,   MVT::v32i8,  HasInt256 ? Legal : Custom);
1293    setOperationAction(ISD::UADDSAT,   MVT::v16i16, HasInt256 ? Legal : Custom);
1294    setOperationAction(ISD::SADDSAT,   MVT::v16i16, HasInt256 ? Legal : Custom);
1295    setOperationAction(ISD::USUBSAT,   MVT::v16i16, HasInt256 ? Legal : Custom);
1296    setOperationAction(ISD::SSUBSAT,   MVT::v16i16, HasInt256 ? Legal : Custom);
1297
1298    for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) {
1299      setOperationAction(ISD::ABS,  VT, HasInt256 ? Legal : Custom);
1300      setOperationAction(ISD::SMAX, VT, HasInt256 ? Legal : Custom);
1301      setOperationAction(ISD::UMAX, VT, HasInt256 ? Legal : Custom);
1302      setOperationAction(ISD::SMIN, VT, HasInt256 ? Legal : Custom);
1303      setOperationAction(ISD::UMIN, VT, HasInt256 ? Legal : Custom);
1304    }
1305
1306    for (auto VT : {MVT::v16i16, MVT::v8i32, MVT::v4i64}) {
1307      setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
1308      setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
1309    }
1310
1311    if (HasInt256) {
1312      // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1313      // when we have a 256bit-wide blend with immediate.
1314      setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1315      setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i32, Custom);
1316
1317      // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1318      for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1319        setLoadExtAction(LoadExtOp, MVT::v16i16, MVT::v16i8, Legal);
1320        setLoadExtAction(LoadExtOp, MVT::v8i32,  MVT::v8i8,  Legal);
1321        setLoadExtAction(LoadExtOp, MVT::v4i64,  MVT::v4i8,  Legal);
1322        setLoadExtAction(LoadExtOp, MVT::v8i32,  MVT::v8i16, Legal);
1323        setLoadExtAction(LoadExtOp, MVT::v4i64,  MVT::v4i16, Legal);
1324        setLoadExtAction(LoadExtOp, MVT::v4i64,  MVT::v4i32, Legal);
1325      }
1326    }
1327
1328    for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1329                     MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) {
1330      setOperationAction(ISD::MLOAD,  VT, Subtarget.hasVLX() ? Legal : Custom);
1331      setOperationAction(ISD::MSTORE, VT, Legal);
1332    }
1333
1334    // Extract subvector is special because the value type
1335    // (result) is 128-bit but the source is 256-bit wide.
1336    for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1337                     MVT::v4f32, MVT::v2f64 }) {
1338      setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1339    }
1340
1341    // Custom lower several nodes for 256-bit types.
1342    for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1343                    MVT::v8f32, MVT::v4f64 }) {
1344      setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
1345      setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
1346      setOperationAction(ISD::VSELECT,            VT, Custom);
1347      setOperationAction(ISD::INSERT_VECTOR_ELT,  VT, Custom);
1348      setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1349      setOperationAction(ISD::SCALAR_TO_VECTOR,   VT, Custom);
1350      setOperationAction(ISD::INSERT_SUBVECTOR,   VT, Legal);
1351      setOperationAction(ISD::CONCAT_VECTORS,     VT, Custom);
1352      setOperationAction(ISD::STORE,              VT, Custom);
1353    }
1354
1355    if (HasInt256) {
1356      setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1357
1358      // Custom legalize 2x32 to get a little better code.
1359      setOperationAction(ISD::MGATHER, MVT::v2f32, Custom);
1360      setOperationAction(ISD::MGATHER, MVT::v2i32, Custom);
1361
1362      for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1363                       MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
1364        setOperationAction(ISD::MGATHER,  VT, Custom);
1365    }
1366  }
1367
1368  // This block controls legalization of the mask vector sizes that are
1369  // available with AVX512. 512-bit vectors are in a separate block controlled
1370  // by useAVX512Regs.
1371  if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1372    addRegisterClass(MVT::v1i1,   &X86::VK1RegClass);
1373    addRegisterClass(MVT::v2i1,   &X86::VK2RegClass);
1374    addRegisterClass(MVT::v4i1,   &X86::VK4RegClass);
1375    addRegisterClass(MVT::v8i1,   &X86::VK8RegClass);
1376    addRegisterClass(MVT::v16i1,  &X86::VK16RegClass);
1377
1378    setOperationAction(ISD::SELECT,             MVT::v1i1, Custom);
1379    setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v1i1, Custom);
1380    setOperationAction(ISD::BUILD_VECTOR,       MVT::v1i1, Custom);
1381
1382    setOperationPromotedToType(ISD::FP_TO_SINT,        MVT::v8i1,  MVT::v8i32);
1383    setOperationPromotedToType(ISD::FP_TO_UINT,        MVT::v8i1,  MVT::v8i32);
1384    setOperationPromotedToType(ISD::FP_TO_SINT,        MVT::v4i1,  MVT::v4i32);
1385    setOperationPromotedToType(ISD::FP_TO_UINT,        MVT::v4i1,  MVT::v4i32);
1386    setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v8i1,  MVT::v8i32);
1387    setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v8i1,  MVT::v8i32);
1388    setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v4i1,  MVT::v4i32);
1389    setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v4i1,  MVT::v4i32);
1390    setOperationAction(ISD::FP_TO_SINT,                MVT::v2i1,  Custom);
1391    setOperationAction(ISD::FP_TO_UINT,                MVT::v2i1,  Custom);
1392    setOperationAction(ISD::STRICT_FP_TO_SINT,         MVT::v2i1,  Custom);
1393    setOperationAction(ISD::STRICT_FP_TO_UINT,         MVT::v2i1,  Custom);
1394
1395    // There is no byte sized k-register load or store without AVX512DQ.
1396    if (!Subtarget.hasDQI()) {
1397      setOperationAction(ISD::LOAD, MVT::v1i1, Custom);
1398      setOperationAction(ISD::LOAD, MVT::v2i1, Custom);
1399      setOperationAction(ISD::LOAD, MVT::v4i1, Custom);
1400      setOperationAction(ISD::LOAD, MVT::v8i1, Custom);
1401
1402      setOperationAction(ISD::STORE, MVT::v1i1, Custom);
1403      setOperationAction(ISD::STORE, MVT::v2i1, Custom);
1404      setOperationAction(ISD::STORE, MVT::v4i1, Custom);
1405      setOperationAction(ISD::STORE, MVT::v8i1, Custom);
1406    }
1407
1408    // Extends of v16i1/v8i1/v4i1/v2i1 to 128-bit vectors.
1409    for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1410      setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
1411      setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
1412      setOperationAction(ISD::ANY_EXTEND,  VT, Custom);
1413    }
1414
1415    for (auto VT : { MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 }) {
1416      setOperationAction(ISD::ADD,              VT, Custom);
1417      setOperationAction(ISD::SUB,              VT, Custom);
1418      setOperationAction(ISD::MUL,              VT, Custom);
1419      setOperationAction(ISD::SETCC,            VT, Custom);
1420      setOperationAction(ISD::STRICT_FSETCC,    VT, Custom);
1421      setOperationAction(ISD::STRICT_FSETCCS,   VT, Custom);
1422      setOperationAction(ISD::SELECT,           VT, Custom);
1423      setOperationAction(ISD::TRUNCATE,         VT, Custom);
1424      setOperationAction(ISD::UADDSAT,          VT, Custom);
1425      setOperationAction(ISD::SADDSAT,          VT, Custom);
1426      setOperationAction(ISD::USUBSAT,          VT, Custom);
1427      setOperationAction(ISD::SSUBSAT,          VT, Custom);
1428
1429      setOperationAction(ISD::BUILD_VECTOR,     VT, Custom);
1430      setOperationAction(ISD::CONCAT_VECTORS,   VT, Custom);
1431      setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1432      setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1433      setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1434      setOperationAction(ISD::VECTOR_SHUFFLE,   VT,  Custom);
1435      setOperationAction(ISD::VSELECT,          VT,  Expand);
1436    }
1437
1438    for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1 })
1439      setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1440  }
1441
1442  // This block controls legalization for 512-bit operations with 32/64 bit
1443  // elements. 512-bits can be disabled based on prefer-vector-width and
1444  // required-vector-width function attributes.
1445  if (!Subtarget.useSoftFloat() && Subtarget.useAVX512Regs()) {
1446    addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1447    addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1448    addRegisterClass(MVT::v8i64,  &X86::VR512RegClass);
1449    addRegisterClass(MVT::v8f64,  &X86::VR512RegClass);
1450
1451    for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) {
1452      setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i8,  Legal);
1453      setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i16, Legal);
1454      setLoadExtAction(ExtType, MVT::v8i64,  MVT::v8i8,   Legal);
1455      setLoadExtAction(ExtType, MVT::v8i64,  MVT::v8i16,  Legal);
1456      setLoadExtAction(ExtType, MVT::v8i64,  MVT::v8i32,  Legal);
1457    }
1458
1459    for (MVT VT : { MVT::v16f32, MVT::v8f64 }) {
1460      setOperationAction(ISD::FNEG,  VT, Custom);
1461      setOperationAction(ISD::FABS,  VT, Custom);
1462      setOperationAction(ISD::FMA,   VT, Legal);
1463      setOperationAction(ISD::STRICT_FMA, VT, Legal);
1464      setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1465    }
1466
1467    for (MVT VT : { MVT::v16i1, MVT::v16i8, MVT::v16i16 }) {
1468      setOperationPromotedToType(ISD::FP_TO_SINT       , VT, MVT::v16i32);
1469      setOperationPromotedToType(ISD::FP_TO_UINT       , VT, MVT::v16i32);
1470      setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, VT, MVT::v16i32);
1471      setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, VT, MVT::v16i32);
1472    }
1473    setOperationAction(ISD::FP_TO_SINT,        MVT::v16i32, Legal);
1474    setOperationAction(ISD::FP_TO_UINT,        MVT::v16i32, Legal);
1475    setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v16i32, Legal);
1476    setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v16i32, Legal);
1477    setOperationAction(ISD::SINT_TO_FP,        MVT::v16i32, Legal);
1478    setOperationAction(ISD::UINT_TO_FP,        MVT::v16i32, Legal);
1479    setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v16i32, Legal);
1480    setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v16i32, Legal);
1481
1482    setOperationAction(ISD::STRICT_FADD,      MVT::v16f32, Legal);
1483    setOperationAction(ISD::STRICT_FADD,      MVT::v8f64,  Legal);
1484    setOperationAction(ISD::STRICT_FSUB,      MVT::v16f32, Legal);
1485    setOperationAction(ISD::STRICT_FSUB,      MVT::v8f64,  Legal);
1486    setOperationAction(ISD::STRICT_FMUL,      MVT::v16f32, Legal);
1487    setOperationAction(ISD::STRICT_FMUL,      MVT::v8f64,  Legal);
1488    setOperationAction(ISD::STRICT_FDIV,      MVT::v16f32, Legal);
1489    setOperationAction(ISD::STRICT_FDIV,      MVT::v8f64,  Legal);
1490    setOperationAction(ISD::STRICT_FSQRT,     MVT::v16f32, Legal);
1491    setOperationAction(ISD::STRICT_FSQRT,     MVT::v8f64,  Legal);
1492    setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v8f64,  Legal);
1493    setOperationAction(ISD::STRICT_FP_ROUND,  MVT::v8f32,  Legal);
1494
1495    setTruncStoreAction(MVT::v8i64,   MVT::v8i8,   Legal);
1496    setTruncStoreAction(MVT::v8i64,   MVT::v8i16,  Legal);
1497    setTruncStoreAction(MVT::v8i64,   MVT::v8i32,  Legal);
1498    setTruncStoreAction(MVT::v16i32,  MVT::v16i8,  Legal);
1499    setTruncStoreAction(MVT::v16i32,  MVT::v16i16, Legal);
1500
1501    // With 512-bit vectors and no VLX, we prefer to widen MLOAD/MSTORE
1502    // to 512-bit rather than use the AVX2 instructions so that we can use
1503    // k-masks.
1504    if (!Subtarget.hasVLX()) {
1505      for (auto VT : {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1506           MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64}) {
1507        setOperationAction(ISD::MLOAD,  VT, Custom);
1508        setOperationAction(ISD::MSTORE, VT, Custom);
1509      }
1510    }
1511
1512    setOperationAction(ISD::TRUNCATE,           MVT::v8i32, Custom);
1513    setOperationAction(ISD::TRUNCATE,           MVT::v16i16, Custom);
1514    setOperationAction(ISD::ZERO_EXTEND,        MVT::v16i32, Custom);
1515    setOperationAction(ISD::ZERO_EXTEND,        MVT::v8i64, Custom);
1516    setOperationAction(ISD::ANY_EXTEND,         MVT::v16i32, Custom);
1517    setOperationAction(ISD::ANY_EXTEND,         MVT::v8i64, Custom);
1518    setOperationAction(ISD::SIGN_EXTEND,        MVT::v16i32, Custom);
1519    setOperationAction(ISD::SIGN_EXTEND,        MVT::v8i64, Custom);
1520
1521    // Need to custom widen this if we don't have AVX512BW.
1522    setOperationAction(ISD::ANY_EXTEND,         MVT::v8i8, Custom);
1523    setOperationAction(ISD::ZERO_EXTEND,        MVT::v8i8, Custom);
1524    setOperationAction(ISD::SIGN_EXTEND,        MVT::v8i8, Custom);
1525
1526    for (auto VT : { MVT::v16f32, MVT::v8f64 }) {
1527      setOperationAction(ISD::FFLOOR,            VT, Legal);
1528      setOperationAction(ISD::STRICT_FFLOOR,     VT, Legal);
1529      setOperationAction(ISD::FCEIL,             VT, Legal);
1530      setOperationAction(ISD::STRICT_FCEIL,      VT, Legal);
1531      setOperationAction(ISD::FTRUNC,            VT, Legal);
1532      setOperationAction(ISD::STRICT_FTRUNC,     VT, Legal);
1533      setOperationAction(ISD::FRINT,             VT, Legal);
1534      setOperationAction(ISD::STRICT_FRINT,      VT, Legal);
1535      setOperationAction(ISD::FNEARBYINT,        VT, Legal);
1536      setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
1537
1538      setOperationAction(ISD::SELECT,           VT, Custom);
1539    }
1540
1541    // Without BWI we need to use custom lowering to handle MVT::v64i8 input.
1542    for (auto VT : {MVT::v16i32, MVT::v8i64, MVT::v64i8}) {
1543      setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
1544      setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
1545    }
1546
1547    setOperationAction(ISD::CONCAT_VECTORS,     MVT::v8f64,  Custom);
1548    setOperationAction(ISD::CONCAT_VECTORS,     MVT::v8i64,  Custom);
1549    setOperationAction(ISD::CONCAT_VECTORS,     MVT::v16f32,  Custom);
1550    setOperationAction(ISD::CONCAT_VECTORS,     MVT::v16i32,  Custom);
1551
1552    setOperationAction(ISD::MUL,                MVT::v8i64, Custom);
1553    setOperationAction(ISD::MUL,                MVT::v16i32, Legal);
1554
1555    setOperationAction(ISD::MULHU,              MVT::v16i32,  Custom);
1556    setOperationAction(ISD::MULHS,              MVT::v16i32,  Custom);
1557
1558    for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
1559      setOperationAction(ISD::SMAX,             VT, Legal);
1560      setOperationAction(ISD::UMAX,             VT, Legal);
1561      setOperationAction(ISD::SMIN,             VT, Legal);
1562      setOperationAction(ISD::UMIN,             VT, Legal);
1563      setOperationAction(ISD::ABS,              VT, Legal);
1564      setOperationAction(ISD::SRL,              VT, Custom);
1565      setOperationAction(ISD::SHL,              VT, Custom);
1566      setOperationAction(ISD::SRA,              VT, Custom);
1567      setOperationAction(ISD::CTPOP,            VT, Custom);
1568      setOperationAction(ISD::ROTL,             VT, Custom);
1569      setOperationAction(ISD::ROTR,             VT, Custom);
1570      setOperationAction(ISD::SETCC,            VT, Custom);
1571      setOperationAction(ISD::STRICT_FSETCC,    VT, Custom);
1572      setOperationAction(ISD::STRICT_FSETCCS,   VT, Custom);
1573      setOperationAction(ISD::SELECT,           VT, Custom);
1574
1575      // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1576      // setcc all the way to isel and prefer SETGT in some isel patterns.
1577      setCondCodeAction(ISD::SETLT, VT, Custom);
1578      setCondCodeAction(ISD::SETLE, VT, Custom);
1579    }
1580
1581    if (Subtarget.hasDQI()) {
1582      setOperationAction(ISD::SINT_TO_FP, MVT::v8i64, Legal);
1583      setOperationAction(ISD::UINT_TO_FP, MVT::v8i64, Legal);
1584      setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v8i64, Legal);
1585      setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i64, Legal);
1586      setOperationAction(ISD::FP_TO_SINT, MVT::v8i64, Legal);
1587      setOperationAction(ISD::FP_TO_UINT, MVT::v8i64, Legal);
1588      setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v8i64, Legal);
1589      setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v8i64, Legal);
1590
1591      setOperationAction(ISD::MUL,        MVT::v8i64, Legal);
1592    }
1593
1594    if (Subtarget.hasCDI()) {
1595      // NonVLX sub-targets extend 128/256 vectors to use the 512 version.
1596      for (auto VT : { MVT::v16i32, MVT::v8i64} ) {
1597        setOperationAction(ISD::CTLZ,            VT, Legal);
1598      }
1599    } // Subtarget.hasCDI()
1600
1601    if (Subtarget.hasVPOPCNTDQ()) {
1602      for (auto VT : { MVT::v16i32, MVT::v8i64 })
1603        setOperationAction(ISD::CTPOP, VT, Legal);
1604    }
1605
1606    // Extract subvector is special because the value type
1607    // (result) is 256-bit but the source is 512-bit wide.
1608    // 128-bit was made Legal under AVX1.
1609    for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1610                     MVT::v8f32, MVT::v4f64 })
1611      setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1612
1613    for (auto VT : { MVT::v16i32, MVT::v8i64, MVT::v16f32, MVT::v8f64 }) {
1614      setOperationAction(ISD::VECTOR_SHUFFLE,      VT, Custom);
1615      setOperationAction(ISD::INSERT_VECTOR_ELT,   VT, Custom);
1616      setOperationAction(ISD::BUILD_VECTOR,        VT, Custom);
1617      setOperationAction(ISD::VSELECT,             VT, Custom);
1618      setOperationAction(ISD::EXTRACT_VECTOR_ELT,  VT, Custom);
1619      setOperationAction(ISD::SCALAR_TO_VECTOR,    VT, Custom);
1620      setOperationAction(ISD::INSERT_SUBVECTOR,    VT, Legal);
1621      setOperationAction(ISD::MLOAD,               VT, Legal);
1622      setOperationAction(ISD::MSTORE,              VT, Legal);
1623      setOperationAction(ISD::MGATHER,             VT, Custom);
1624      setOperationAction(ISD::MSCATTER,            VT, Custom);
1625    }
1626    if (!Subtarget.hasBWI()) {
1627      // Need to custom split v32i16/v64i8 bitcasts.
1628      setOperationAction(ISD::BITCAST, MVT::v32i16, Custom);
1629      setOperationAction(ISD::BITCAST, MVT::v64i8,  Custom);
1630
1631      // Better to split these into two 256-bit ops.
1632      setOperationAction(ISD::BITREVERSE, MVT::v8i64, Custom);
1633      setOperationAction(ISD::BITREVERSE, MVT::v16i32, Custom);
1634    }
1635
1636    if (Subtarget.hasVBMI2()) {
1637      for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
1638        setOperationAction(ISD::FSHL, VT, Custom);
1639        setOperationAction(ISD::FSHR, VT, Custom);
1640      }
1641    }
1642  }// has  AVX-512
1643
1644  // This block controls legalization for operations that don't have
1645  // pre-AVX512 equivalents. Without VLX we use 512-bit operations for
1646  // narrower widths.
1647  if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1648    // These operations are handled on non-VLX by artificially widening in
1649    // isel patterns.
1650
1651    setOperationAction(ISD::FP_TO_UINT, MVT::v8i32,
1652                       Subtarget.hasVLX() ? Legal : Custom);
1653    setOperationAction(ISD::FP_TO_UINT, MVT::v4i32,
1654                       Subtarget.hasVLX() ? Legal : Custom);
1655    setOperationAction(ISD::FP_TO_UINT,         MVT::v2i32, Custom);
1656    setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v8i32,
1657                       Subtarget.hasVLX() ? Legal : Custom);
1658    setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32,
1659                       Subtarget.hasVLX() ? Legal : Custom);
1660    setOperationAction(ISD::STRICT_FP_TO_UINT,  MVT::v2i32, Custom);
1661    setOperationAction(ISD::UINT_TO_FP, MVT::v8i32,
1662                       Subtarget.hasVLX() ? Legal : Custom);
1663    setOperationAction(ISD::UINT_TO_FP, MVT::v4i32,
1664                       Subtarget.hasVLX() ? Legal : Custom);
1665    setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i32,
1666                       Subtarget.hasVLX() ? Legal : Custom);
1667    setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32,
1668                       Subtarget.hasVLX() ? Legal : Custom);
1669
1670    for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
1671      setOperationAction(ISD::SMAX, VT, Legal);
1672      setOperationAction(ISD::UMAX, VT, Legal);
1673      setOperationAction(ISD::SMIN, VT, Legal);
1674      setOperationAction(ISD::UMIN, VT, Legal);
1675      setOperationAction(ISD::ABS,  VT, Legal);
1676    }
1677
1678    for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
1679      setOperationAction(ISD::ROTL,     VT, Custom);
1680      setOperationAction(ISD::ROTR,     VT, Custom);
1681    }
1682
1683    // Custom legalize 2x32 to get a little better code.
1684    setOperationAction(ISD::MSCATTER, MVT::v2f32, Custom);
1685    setOperationAction(ISD::MSCATTER, MVT::v2i32, Custom);
1686
1687    for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1688                     MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
1689      setOperationAction(ISD::MSCATTER, VT, Custom);
1690
1691    if (Subtarget.hasDQI()) {
1692      for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
1693        setOperationAction(ISD::SINT_TO_FP, VT,
1694                           Subtarget.hasVLX() ? Legal : Custom);
1695        setOperationAction(ISD::UINT_TO_FP, VT,
1696                           Subtarget.hasVLX() ? Legal : Custom);
1697        setOperationAction(ISD::STRICT_SINT_TO_FP, VT,
1698                           Subtarget.hasVLX() ? Legal : Custom);
1699        setOperationAction(ISD::STRICT_UINT_TO_FP, VT,
1700                           Subtarget.hasVLX() ? Legal : Custom);
1701        setOperationAction(ISD::FP_TO_SINT, VT,
1702                           Subtarget.hasVLX() ? Legal : Custom);
1703        setOperationAction(ISD::FP_TO_UINT, VT,
1704                           Subtarget.hasVLX() ? Legal : Custom);
1705        setOperationAction(ISD::STRICT_FP_TO_SINT, VT,
1706                           Subtarget.hasVLX() ? Legal : Custom);
1707        setOperationAction(ISD::STRICT_FP_TO_UINT, VT,
1708                           Subtarget.hasVLX() ? Legal : Custom);
1709        setOperationAction(ISD::MUL,               VT, Legal);
1710      }
1711    }
1712
1713    if (Subtarget.hasCDI()) {
1714      for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
1715        setOperationAction(ISD::CTLZ,            VT, Legal);
1716      }
1717    } // Subtarget.hasCDI()
1718
1719    if (Subtarget.hasVPOPCNTDQ()) {
1720      for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 })
1721        setOperationAction(ISD::CTPOP, VT, Legal);
1722    }
1723  }
1724
1725  // This block control legalization of v32i1/v64i1 which are available with
1726  // AVX512BW. 512-bit v32i16 and v64i8 vector legalization is controlled with
1727  // useBWIRegs.
1728  if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
1729    addRegisterClass(MVT::v32i1,  &X86::VK32RegClass);
1730    addRegisterClass(MVT::v64i1,  &X86::VK64RegClass);
1731
1732    for (auto VT : { MVT::v32i1, MVT::v64i1 }) {
1733      setOperationAction(ISD::ADD,                VT, Custom);
1734      setOperationAction(ISD::SUB,                VT, Custom);
1735      setOperationAction(ISD::MUL,                VT, Custom);
1736      setOperationAction(ISD::VSELECT,            VT, Expand);
1737      setOperationAction(ISD::UADDSAT,            VT, Custom);
1738      setOperationAction(ISD::SADDSAT,            VT, Custom);
1739      setOperationAction(ISD::USUBSAT,            VT, Custom);
1740      setOperationAction(ISD::SSUBSAT,            VT, Custom);
1741
1742      setOperationAction(ISD::TRUNCATE,           VT, Custom);
1743      setOperationAction(ISD::SETCC,              VT, Custom);
1744      setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1745      setOperationAction(ISD::INSERT_VECTOR_ELT,  VT, Custom);
1746      setOperationAction(ISD::SELECT,             VT, Custom);
1747      setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
1748      setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
1749    }
1750
1751    setOperationAction(ISD::CONCAT_VECTORS,     MVT::v32i1, Custom);
1752    setOperationAction(ISD::CONCAT_VECTORS,     MVT::v64i1, Custom);
1753    setOperationAction(ISD::INSERT_SUBVECTOR,   MVT::v32i1, Custom);
1754    setOperationAction(ISD::INSERT_SUBVECTOR,   MVT::v64i1, Custom);
1755    for (auto VT : { MVT::v16i1, MVT::v32i1 })
1756      setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1757
1758    // Extends from v32i1 masks to 256-bit vectors.
1759    setOperationAction(ISD::SIGN_EXTEND,        MVT::v32i8, Custom);
1760    setOperationAction(ISD::ZERO_EXTEND,        MVT::v32i8, Custom);
1761    setOperationAction(ISD::ANY_EXTEND,         MVT::v32i8, Custom);
1762  }
1763
1764  // This block controls legalization for v32i16 and v64i8. 512-bits can be
1765  // disabled based on prefer-vector-width and required-vector-width function
1766  // attributes.
1767  if (!Subtarget.useSoftFloat() && Subtarget.useBWIRegs()) {
1768    addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1769    addRegisterClass(MVT::v64i8,  &X86::VR512RegClass);
1770
1771    // Extends from v64i1 masks to 512-bit vectors.
1772    setOperationAction(ISD::SIGN_EXTEND,        MVT::v64i8, Custom);
1773    setOperationAction(ISD::ZERO_EXTEND,        MVT::v64i8, Custom);
1774    setOperationAction(ISD::ANY_EXTEND,         MVT::v64i8, Custom);
1775
1776    setOperationAction(ISD::MUL,                MVT::v32i16, Legal);
1777    setOperationAction(ISD::MUL,                MVT::v64i8, Custom);
1778    setOperationAction(ISD::MULHS,              MVT::v32i16, Legal);
1779    setOperationAction(ISD::MULHU,              MVT::v32i16, Legal);
1780    setOperationAction(ISD::MULHS,              MVT::v64i8, Custom);
1781    setOperationAction(ISD::MULHU,              MVT::v64i8, Custom);
1782    setOperationAction(ISD::CONCAT_VECTORS,     MVT::v32i16, Custom);
1783    setOperationAction(ISD::CONCAT_VECTORS,     MVT::v64i8, Custom);
1784    setOperationAction(ISD::INSERT_SUBVECTOR,   MVT::v32i16, Legal);
1785    setOperationAction(ISD::INSERT_SUBVECTOR,   MVT::v64i8, Legal);
1786    setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v32i16, Custom);
1787    setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v64i8, Custom);
1788    setOperationAction(ISD::SCALAR_TO_VECTOR,   MVT::v32i16, Custom);
1789    setOperationAction(ISD::SCALAR_TO_VECTOR,   MVT::v64i8, Custom);
1790    setOperationAction(ISD::SIGN_EXTEND,        MVT::v32i16, Custom);
1791    setOperationAction(ISD::ZERO_EXTEND,        MVT::v32i16, Custom);
1792    setOperationAction(ISD::ANY_EXTEND,         MVT::v32i16, Custom);
1793    setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v32i16, Custom);
1794    setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v64i8, Custom);
1795    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v32i16, Custom);
1796    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v64i8, Custom);
1797    setOperationAction(ISD::TRUNCATE,           MVT::v32i8, Custom);
1798    setOperationAction(ISD::BITREVERSE,         MVT::v64i8, Custom);
1799
1800    setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v32i16, Custom);
1801    setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, MVT::v32i16, Custom);
1802
1803    setTruncStoreAction(MVT::v32i16,  MVT::v32i8, Legal);
1804
1805    for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
1806      setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1807      setOperationAction(ISD::VSELECT,      VT, Custom);
1808      setOperationAction(ISD::ABS,          VT, Legal);
1809      setOperationAction(ISD::SRL,          VT, Custom);
1810      setOperationAction(ISD::SHL,          VT, Custom);
1811      setOperationAction(ISD::SRA,          VT, Custom);
1812      setOperationAction(ISD::MLOAD,        VT, Legal);
1813      setOperationAction(ISD::MSTORE,       VT, Legal);
1814      setOperationAction(ISD::CTPOP,        VT, Custom);
1815      setOperationAction(ISD::CTLZ,         VT, Custom);
1816      setOperationAction(ISD::SMAX,         VT, Legal);
1817      setOperationAction(ISD::UMAX,         VT, Legal);
1818      setOperationAction(ISD::SMIN,         VT, Legal);
1819      setOperationAction(ISD::UMIN,         VT, Legal);
1820      setOperationAction(ISD::SETCC,        VT, Custom);
1821      setOperationAction(ISD::UADDSAT,      VT, Legal);
1822      setOperationAction(ISD::SADDSAT,      VT, Legal);
1823      setOperationAction(ISD::USUBSAT,      VT, Legal);
1824      setOperationAction(ISD::SSUBSAT,      VT, Legal);
1825      setOperationAction(ISD::SELECT,       VT, Custom);
1826
1827      // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1828      // setcc all the way to isel and prefer SETGT in some isel patterns.
1829      setCondCodeAction(ISD::SETLT, VT, Custom);
1830      setCondCodeAction(ISD::SETLE, VT, Custom);
1831    }
1832
1833    for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) {
1834      setLoadExtAction(ExtType, MVT::v32i16, MVT::v32i8, Legal);
1835    }
1836
1837    if (Subtarget.hasBITALG()) {
1838      for (auto VT : { MVT::v64i8, MVT::v32i16 })
1839        setOperationAction(ISD::CTPOP, VT, Legal);
1840    }
1841
1842    if (Subtarget.hasVBMI2()) {
1843      setOperationAction(ISD::FSHL, MVT::v32i16, Custom);
1844      setOperationAction(ISD::FSHR, MVT::v32i16, Custom);
1845    }
1846  }
1847
1848  if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
1849    for (auto VT : { MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16 }) {
1850      setOperationAction(ISD::MLOAD,  VT, Subtarget.hasVLX() ? Legal : Custom);
1851      setOperationAction(ISD::MSTORE, VT, Subtarget.hasVLX() ? Legal : Custom);
1852    }
1853
1854    // These operations are handled on non-VLX by artificially widening in
1855    // isel patterns.
1856    // TODO: Custom widen in lowering on non-VLX and drop the isel patterns?
1857
1858    if (Subtarget.hasBITALG()) {
1859      for (auto VT : { MVT::v16i8, MVT::v32i8, MVT::v8i16, MVT::v16i16 })
1860        setOperationAction(ISD::CTPOP, VT, Legal);
1861    }
1862  }
1863
1864  if (!Subtarget.useSoftFloat() && Subtarget.hasVLX()) {
1865    setTruncStoreAction(MVT::v4i64, MVT::v4i8,  Legal);
1866    setTruncStoreAction(MVT::v4i64, MVT::v4i16, Legal);
1867    setTruncStoreAction(MVT::v4i64, MVT::v4i32, Legal);
1868    setTruncStoreAction(MVT::v8i32, MVT::v8i8,  Legal);
1869    setTruncStoreAction(MVT::v8i32, MVT::v8i16, Legal);
1870
1871    setTruncStoreAction(MVT::v2i64, MVT::v2i8,  Legal);
1872    setTruncStoreAction(MVT::v2i64, MVT::v2i16, Legal);
1873    setTruncStoreAction(MVT::v2i64, MVT::v2i32, Legal);
1874    setTruncStoreAction(MVT::v4i32, MVT::v4i8,  Legal);
1875    setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
1876
1877    if (Subtarget.hasDQI()) {
1878      // Fast v2f32 SINT_TO_FP( v2i64 ) custom conversion.
1879      // v2f32 UINT_TO_FP is already custom under SSE2.
1880      assert(isOperationCustom(ISD::UINT_TO_FP, MVT::v2f32) &&
1881             isOperationCustom(ISD::STRICT_UINT_TO_FP, MVT::v2f32) &&
1882             "Unexpected operation action!");
1883      // v2i64 FP_TO_S/UINT(v2f32) custom conversion.
1884      setOperationAction(ISD::FP_TO_SINT,        MVT::v2f32, Custom);
1885      setOperationAction(ISD::FP_TO_UINT,        MVT::v2f32, Custom);
1886      setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f32, Custom);
1887      setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f32, Custom);
1888    }
1889
1890    if (Subtarget.hasBWI()) {
1891      setTruncStoreAction(MVT::v16i16,  MVT::v16i8, Legal);
1892      setTruncStoreAction(MVT::v8i16,   MVT::v8i8,  Legal);
1893    }
1894
1895    if (Subtarget.hasVBMI2()) {
1896      // TODO: Make these legal even without VLX?
1897      for (auto VT : { MVT::v8i16,  MVT::v4i32, MVT::v2i64,
1898                       MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1899        setOperationAction(ISD::FSHL, VT, Custom);
1900        setOperationAction(ISD::FSHR, VT, Custom);
1901      }
1902    }
1903
1904    setOperationAction(ISD::TRUNCATE, MVT::v16i32, Custom);
1905    setOperationAction(ISD::TRUNCATE, MVT::v8i64, Custom);
1906    setOperationAction(ISD::TRUNCATE, MVT::v16i64, Custom);
1907  }
1908
1909  // We want to custom lower some of our intrinsics.
1910  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1911  setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1912  setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1913  if (!Subtarget.is64Bit()) {
1914    setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
1915  }
1916
1917  // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
1918  // handle type legalization for these operations here.
1919  //
1920  // FIXME: We really should do custom legalization for addition and
1921  // subtraction on x86-32 once PR3203 is fixed.  We really can't do much better
1922  // than generic legalization for 64-bit multiplication-with-overflow, though.
1923  for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
1924    if (VT == MVT::i64 && !Subtarget.is64Bit())
1925      continue;
1926    // Add/Sub/Mul with overflow operations are custom lowered.
1927    setOperationAction(ISD::SADDO, VT, Custom);
1928    setOperationAction(ISD::UADDO, VT, Custom);
1929    setOperationAction(ISD::SSUBO, VT, Custom);
1930    setOperationAction(ISD::USUBO, VT, Custom);
1931    setOperationAction(ISD::SMULO, VT, Custom);
1932    setOperationAction(ISD::UMULO, VT, Custom);
1933
1934    // Support carry in as value rather than glue.
1935    setOperationAction(ISD::ADDCARRY, VT, Custom);
1936    setOperationAction(ISD::SUBCARRY, VT, Custom);
1937    setOperationAction(ISD::SETCCCARRY, VT, Custom);
1938  }
1939
1940  if (!Subtarget.is64Bit()) {
1941    // These libcalls are not available in 32-bit.
1942    setLibcallName(RTLIB::SHL_I128, nullptr);
1943    setLibcallName(RTLIB::SRL_I128, nullptr);
1944    setLibcallName(RTLIB::SRA_I128, nullptr);
1945    setLibcallName(RTLIB::MUL_I128, nullptr);
1946  }
1947
1948  // Combine sin / cos into _sincos_stret if it is available.
1949  if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr &&
1950      getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) {
1951    setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1952    setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1953  }
1954
1955  if (Subtarget.isTargetWin64()) {
1956    setOperationAction(ISD::SDIV, MVT::i128, Custom);
1957    setOperationAction(ISD::UDIV, MVT::i128, Custom);
1958    setOperationAction(ISD::SREM, MVT::i128, Custom);
1959    setOperationAction(ISD::UREM, MVT::i128, Custom);
1960    setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
1961    setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
1962  }
1963
1964  // On 32 bit MSVC, `fmodf(f32)` is not defined - only `fmod(f64)`
1965  // is. We should promote the value to 64-bits to solve this.
1966  // This is what the CRT headers do - `fmodf` is an inline header
1967  // function casting to f64 and calling `fmod`.
1968  if (Subtarget.is32Bit() &&
1969      (Subtarget.isTargetWindowsMSVC() || Subtarget.isTargetWindowsItanium()))
1970    for (ISD::NodeType Op :
1971         {ISD::FCEIL,  ISD::STRICT_FCEIL,
1972          ISD::FCOS,   ISD::STRICT_FCOS,
1973          ISD::FEXP,   ISD::STRICT_FEXP,
1974          ISD::FFLOOR, ISD::STRICT_FFLOOR,
1975          ISD::FREM,   ISD::STRICT_FREM,
1976          ISD::FLOG,   ISD::STRICT_FLOG,
1977          ISD::FLOG10, ISD::STRICT_FLOG10,
1978          ISD::FPOW,   ISD::STRICT_FPOW,
1979          ISD::FSIN,   ISD::STRICT_FSIN})
1980      if (isOperationExpand(Op, MVT::f32))
1981        setOperationAction(Op, MVT::f32, Promote);
1982
1983  // We have target-specific dag combine patterns for the following nodes:
1984  setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1985  setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
1986  setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
1987  setTargetDAGCombine(ISD::CONCAT_VECTORS);
1988  setTargetDAGCombine(ISD::INSERT_SUBVECTOR);
1989  setTargetDAGCombine(ISD::EXTRACT_SUBVECTOR);
1990  setTargetDAGCombine(ISD::BITCAST);
1991  setTargetDAGCombine(ISD::VSELECT);
1992  setTargetDAGCombine(ISD::SELECT);
1993  setTargetDAGCombine(ISD::SHL);
1994  setTargetDAGCombine(ISD::SRA);
1995  setTargetDAGCombine(ISD::SRL);
1996  setTargetDAGCombine(ISD::OR);
1997  setTargetDAGCombine(ISD::AND);
1998  setTargetDAGCombine(ISD::ADD);
1999  setTargetDAGCombine(ISD::FADD);
2000  setTargetDAGCombine(ISD::FSUB);
2001  setTargetDAGCombine(ISD::FNEG);
2002  setTargetDAGCombine(ISD::FMA);
2003  setTargetDAGCombine(ISD::FMINNUM);
2004  setTargetDAGCombine(ISD::FMAXNUM);
2005  setTargetDAGCombine(ISD::SUB);
2006  setTargetDAGCombine(ISD::LOAD);
2007  setTargetDAGCombine(ISD::MLOAD);
2008  setTargetDAGCombine(ISD::STORE);
2009  setTargetDAGCombine(ISD::MSTORE);
2010  setTargetDAGCombine(ISD::TRUNCATE);
2011  setTargetDAGCombine(ISD::ZERO_EXTEND);
2012  setTargetDAGCombine(ISD::ANY_EXTEND);
2013  setTargetDAGCombine(ISD::SIGN_EXTEND);
2014  setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
2015  setTargetDAGCombine(ISD::ANY_EXTEND_VECTOR_INREG);
2016  setTargetDAGCombine(ISD::SIGN_EXTEND_VECTOR_INREG);
2017  setTargetDAGCombine(ISD::ZERO_EXTEND_VECTOR_INREG);
2018  setTargetDAGCombine(ISD::SINT_TO_FP);
2019  setTargetDAGCombine(ISD::UINT_TO_FP);
2020  setTargetDAGCombine(ISD::STRICT_SINT_TO_FP);
2021  setTargetDAGCombine(ISD::STRICT_UINT_TO_FP);
2022  setTargetDAGCombine(ISD::SETCC);
2023  setTargetDAGCombine(ISD::MUL);
2024  setTargetDAGCombine(ISD::XOR);
2025  setTargetDAGCombine(ISD::MSCATTER);
2026  setTargetDAGCombine(ISD::MGATHER);
2027
2028  computeRegisterProperties(Subtarget.getRegisterInfo());
2029
2030  MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
2031  MaxStoresPerMemsetOptSize = 8;
2032  MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
2033  MaxStoresPerMemcpyOptSize = 4;
2034  MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
2035  MaxStoresPerMemmoveOptSize = 4;
2036
2037  // TODO: These control memcmp expansion in CGP and could be raised higher, but
2038  // that needs to benchmarked and balanced with the potential use of vector
2039  // load/store types (PR33329, PR33914).
2040  MaxLoadsPerMemcmp = 2;
2041  MaxLoadsPerMemcmpOptSize = 2;
2042
2043  // Set loop alignment to 2^ExperimentalPrefLoopAlignment bytes (default: 2^4).
2044  setPrefLoopAlignment(Align(1ULL << ExperimentalPrefLoopAlignment));
2045
2046  // An out-of-order CPU can speculatively execute past a predictable branch,
2047  // but a conditional move could be stalled by an expensive earlier operation.
2048  PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder();
2049  EnableExtLdPromotion = true;
2050  setPrefFunctionAlignment(Align(16));
2051
2052  verifyIntrinsicTables();
2053
2054  // Default to having -disable-strictnode-mutation on
2055  IsStrictFPEnabled = true;
2056}
2057
2058// This has so far only been implemented for 64-bit MachO.
2059bool X86TargetLowering::useLoadStackGuardNode() const {
2060  return Subtarget.isTargetMachO() && Subtarget.is64Bit();
2061}
2062
2063bool X86TargetLowering::useStackGuardXorFP() const {
2064  // Currently only MSVC CRTs XOR the frame pointer into the stack guard value.
2065  return Subtarget.getTargetTriple().isOSMSVCRT() && !Subtarget.isTargetMachO();
2066}
2067
2068SDValue X86TargetLowering::emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
2069                                               const SDLoc &DL) const {
2070  EVT PtrTy = getPointerTy(DAG.getDataLayout());
2071  unsigned XorOp = Subtarget.is64Bit() ? X86::XOR64_FP : X86::XOR32_FP;
2072  MachineSDNode *Node = DAG.getMachineNode(XorOp, DL, PtrTy, Val);
2073  return SDValue(Node, 0);
2074}
2075
2076TargetLoweringBase::LegalizeTypeAction
2077X86TargetLowering::getPreferredVectorAction(MVT VT) const {
2078  if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI())
2079    return TypeSplitVector;
2080
2081  if (VT.getVectorNumElements() != 1 &&
2082      VT.getVectorElementType() != MVT::i1)
2083    return TypeWidenVector;
2084
2085  return TargetLoweringBase::getPreferredVectorAction(VT);
2086}
2087
2088MVT X86TargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
2089                                                     CallingConv::ID CC,
2090                                                     EVT VT) const {
2091  // v32i1 vectors should be promoted to v32i8 to match avx2.
2092  if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI())
2093    return MVT::v32i8;
2094  // Break wide or odd vXi1 vectors into scalars to match avx2 behavior.
2095  if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
2096      Subtarget.hasAVX512() &&
2097      (!isPowerOf2_32(VT.getVectorNumElements()) ||
2098       (VT.getVectorNumElements() > 16 && !Subtarget.hasBWI()) ||
2099       (VT.getVectorNumElements() > 64 && Subtarget.hasBWI())))
2100    return MVT::i8;
2101  // Split v64i1 vectors if we don't have v64i8 available.
2102  if (VT == MVT::v64i1 && Subtarget.hasBWI() && !Subtarget.useAVX512Regs() &&
2103      CC != CallingConv::X86_RegCall)
2104    return MVT::v32i1;
2105  // FIXME: Should we just make these types legal and custom split operations?
2106  if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !EnableOldKNLABI &&
2107      Subtarget.useAVX512Regs() && !Subtarget.hasBWI())
2108    return MVT::v16i32;
2109  return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
2110}
2111
2112unsigned X86TargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
2113                                                          CallingConv::ID CC,
2114                                                          EVT VT) const {
2115  // v32i1 vectors should be promoted to v32i8 to match avx2.
2116  if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI())
2117    return 1;
2118  // Break wide or odd vXi1 vectors into scalars to match avx2 behavior.
2119  if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
2120      Subtarget.hasAVX512() &&
2121      (!isPowerOf2_32(VT.getVectorNumElements()) ||
2122       (VT.getVectorNumElements() > 16 && !Subtarget.hasBWI()) ||
2123       (VT.getVectorNumElements() > 64 && Subtarget.hasBWI())))
2124    return VT.getVectorNumElements();
2125  // Split v64i1 vectors if we don't have v64i8 available.
2126  if (VT == MVT::v64i1 && Subtarget.hasBWI() && !Subtarget.useAVX512Regs() &&
2127      CC != CallingConv::X86_RegCall)
2128    return 2;
2129  // FIXME: Should we just make these types legal and custom split operations?
2130  if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !EnableOldKNLABI &&
2131      Subtarget.useAVX512Regs() && !Subtarget.hasBWI())
2132    return 1;
2133  return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
2134}
2135
2136unsigned X86TargetLowering::getVectorTypeBreakdownForCallingConv(
2137    LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
2138    unsigned &NumIntermediates, MVT &RegisterVT) const {
2139  // Break wide or odd vXi1 vectors into scalars to match avx2 behavior.
2140  if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
2141      Subtarget.hasAVX512() &&
2142      (!isPowerOf2_32(VT.getVectorNumElements()) ||
2143       (VT.getVectorNumElements() > 16 && !Subtarget.hasBWI()) ||
2144       (VT.getVectorNumElements() > 64 && Subtarget.hasBWI()))) {
2145    RegisterVT = MVT::i8;
2146    IntermediateVT = MVT::i1;
2147    NumIntermediates = VT.getVectorNumElements();
2148    return NumIntermediates;
2149  }
2150
2151  // Split v64i1 vectors if we don't have v64i8 available.
2152  if (VT == MVT::v64i1 && Subtarget.hasBWI() && !Subtarget.useAVX512Regs() &&
2153      CC != CallingConv::X86_RegCall) {
2154    RegisterVT = MVT::v32i1;
2155    IntermediateVT = MVT::v32i1;
2156    NumIntermediates = 2;
2157    return 2;
2158  }
2159
2160  return TargetLowering::getVectorTypeBreakdownForCallingConv(Context, CC, VT, IntermediateVT,
2161                                              NumIntermediates, RegisterVT);
2162}
2163
2164EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL,
2165                                          LLVMContext& Context,
2166                                          EVT VT) const {
2167  if (!VT.isVector())
2168    return MVT::i8;
2169
2170  if (Subtarget.hasAVX512()) {
2171    const unsigned NumElts = VT.getVectorNumElements();
2172
2173    // Figure out what this type will be legalized to.
2174    EVT LegalVT = VT;
2175    while (getTypeAction(Context, LegalVT) != TypeLegal)
2176      LegalVT = getTypeToTransformTo(Context, LegalVT);
2177
2178    // If we got a 512-bit vector then we'll definitely have a vXi1 compare.
2179    if (LegalVT.getSimpleVT().is512BitVector())
2180      return EVT::getVectorVT(Context, MVT::i1, NumElts);
2181
2182    if (LegalVT.getSimpleVT().isVector() && Subtarget.hasVLX()) {
2183      // If we legalized to less than a 512-bit vector, then we will use a vXi1
2184      // compare for vXi32/vXi64 for sure. If we have BWI we will also support
2185      // vXi16/vXi8.
2186      MVT EltVT = LegalVT.getSimpleVT().getVectorElementType();
2187      if (Subtarget.hasBWI() || EltVT.getSizeInBits() >= 32)
2188        return EVT::getVectorVT(Context, MVT::i1, NumElts);
2189    }
2190  }
2191
2192  return VT.changeVectorElementTypeToInteger();
2193}
2194
2195/// Helper for getByValTypeAlignment to determine
2196/// the desired ByVal argument alignment.
2197static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
2198  if (MaxAlign == 16)
2199    return;
2200  if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
2201    if (VTy->getBitWidth() == 128)
2202      MaxAlign = 16;
2203  } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
2204    unsigned EltAlign = 0;
2205    getMaxByValAlign(ATy->getElementType(), EltAlign);
2206    if (EltAlign > MaxAlign)
2207      MaxAlign = EltAlign;
2208  } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
2209    for (auto *EltTy : STy->elements()) {
2210      unsigned EltAlign = 0;
2211      getMaxByValAlign(EltTy, EltAlign);
2212      if (EltAlign > MaxAlign)
2213        MaxAlign = EltAlign;
2214      if (MaxAlign == 16)
2215        break;
2216    }
2217  }
2218}
2219
2220/// Return the desired alignment for ByVal aggregate
2221/// function arguments in the caller parameter area. For X86, aggregates
2222/// that contain SSE vectors are placed at 16-byte boundaries while the rest
2223/// are at 4-byte boundaries.
2224unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty,
2225                                                  const DataLayout &DL) const {
2226  if (Subtarget.is64Bit()) {
2227    // Max of 8 and alignment of type.
2228    unsigned TyAlign = DL.getABITypeAlignment(Ty);
2229    if (TyAlign > 8)
2230      return TyAlign;
2231    return 8;
2232  }
2233
2234  unsigned Align = 4;
2235  if (Subtarget.hasSSE1())
2236    getMaxByValAlign(Ty, Align);
2237  return Align;
2238}
2239
2240/// Returns the target specific optimal type for load
2241/// and store operations as a result of memset, memcpy, and memmove
2242/// lowering. If DstAlign is zero that means it's safe to destination
2243/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
2244/// means there isn't a need to check it against alignment requirement,
2245/// probably because the source does not need to be loaded. If 'IsMemset' is
2246/// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
2247/// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
2248/// source is constant so it does not need to be loaded.
2249/// It returns EVT::Other if the type should be determined using generic
2250/// target-independent logic.
2251/// For vector ops we check that the overall size isn't larger than our
2252/// preferred vector width.
2253EVT X86TargetLowering::getOptimalMemOpType(
2254    uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset,
2255    bool ZeroMemset, bool MemcpyStrSrc,
2256    const AttributeList &FuncAttributes) const {
2257  if (!FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat)) {
2258    if (Size >= 16 && (!Subtarget.isUnalignedMem16Slow() ||
2259                       ((DstAlign == 0 || DstAlign >= 16) &&
2260                        (SrcAlign == 0 || SrcAlign >= 16)))) {
2261      // FIXME: Check if unaligned 64-byte accesses are slow.
2262      if (Size >= 64 && Subtarget.hasAVX512() &&
2263          (Subtarget.getPreferVectorWidth() >= 512)) {
2264        return Subtarget.hasBWI() ? MVT::v64i8 : MVT::v16i32;
2265      }
2266      // FIXME: Check if unaligned 32-byte accesses are slow.
2267      if (Size >= 32 && Subtarget.hasAVX() &&
2268          (Subtarget.getPreferVectorWidth() >= 256)) {
2269        // Although this isn't a well-supported type for AVX1, we'll let
2270        // legalization and shuffle lowering produce the optimal codegen. If we
2271        // choose an optimal type with a vector element larger than a byte,
2272        // getMemsetStores() may create an intermediate splat (using an integer
2273        // multiply) before we splat as a vector.
2274        return MVT::v32i8;
2275      }
2276      if (Subtarget.hasSSE2() && (Subtarget.getPreferVectorWidth() >= 128))
2277        return MVT::v16i8;
2278      // TODO: Can SSE1 handle a byte vector?
2279      // If we have SSE1 registers we should be able to use them.
2280      if (Subtarget.hasSSE1() && (Subtarget.is64Bit() || Subtarget.hasX87()) &&
2281          (Subtarget.getPreferVectorWidth() >= 128))
2282        return MVT::v4f32;
2283    } else if ((!IsMemset || ZeroMemset) && !MemcpyStrSrc && Size >= 8 &&
2284               !Subtarget.is64Bit() && Subtarget.hasSSE2()) {
2285      // Do not use f64 to lower memcpy if source is string constant. It's
2286      // better to use i32 to avoid the loads.
2287      // Also, do not use f64 to lower memset unless this is a memset of zeros.
2288      // The gymnastics of splatting a byte value into an XMM register and then
2289      // only using 8-byte stores (because this is a CPU with slow unaligned
2290      // 16-byte accesses) makes that a loser.
2291      return MVT::f64;
2292    }
2293  }
2294  // This is a compromise. If we reach here, unaligned accesses may be slow on
2295  // this target. However, creating smaller, aligned accesses could be even
2296  // slower and would certainly be a lot more code.
2297  if (Subtarget.is64Bit() && Size >= 8)
2298    return MVT::i64;
2299  return MVT::i32;
2300}
2301
2302bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
2303  if (VT == MVT::f32)
2304    return X86ScalarSSEf32;
2305  else if (VT == MVT::f64)
2306    return X86ScalarSSEf64;
2307  return true;
2308}
2309
2310bool X86TargetLowering::allowsMisalignedMemoryAccesses(
2311    EVT VT, unsigned, unsigned Align, MachineMemOperand::Flags Flags,
2312    bool *Fast) const {
2313  if (Fast) {
2314    switch (VT.getSizeInBits()) {
2315    default:
2316      // 8-byte and under are always assumed to be fast.
2317      *Fast = true;
2318      break;
2319    case 128:
2320      *Fast = !Subtarget.isUnalignedMem16Slow();
2321      break;
2322    case 256:
2323      *Fast = !Subtarget.isUnalignedMem32Slow();
2324      break;
2325    // TODO: What about AVX-512 (512-bit) accesses?
2326    }
2327  }
2328  // NonTemporal vector memory ops must be aligned.
2329  if (!!(Flags & MachineMemOperand::MONonTemporal) && VT.isVector()) {
2330    // NT loads can only be vector aligned, so if its less aligned than the
2331    // minimum vector size (which we can split the vector down to), we might as
2332    // well use a regular unaligned vector load.
2333    // We don't have any NT loads pre-SSE41.
2334    if (!!(Flags & MachineMemOperand::MOLoad))
2335      return (Align < 16 || !Subtarget.hasSSE41());
2336    return false;
2337  }
2338  // Misaligned accesses of any size are always allowed.
2339  return true;
2340}
2341
2342/// Return the entry encoding for a jump table in the
2343/// current function.  The returned value is a member of the
2344/// MachineJumpTableInfo::JTEntryKind enum.
2345unsigned X86TargetLowering::getJumpTableEncoding() const {
2346  // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
2347  // symbol.
2348  if (isPositionIndependent() && Subtarget.isPICStyleGOT())
2349    return MachineJumpTableInfo::EK_Custom32;
2350
2351  // Otherwise, use the normal jump table encoding heuristics.
2352  return TargetLowering::getJumpTableEncoding();
2353}
2354
2355bool X86TargetLowering::useSoftFloat() const {
2356  return Subtarget.useSoftFloat();
2357}
2358
2359void X86TargetLowering::markLibCallAttributes(MachineFunction *MF, unsigned CC,
2360                                              ArgListTy &Args) const {
2361
2362  // Only relabel X86-32 for C / Stdcall CCs.
2363  if (Subtarget.is64Bit())
2364    return;
2365  if (CC != CallingConv::C && CC != CallingConv::X86_StdCall)
2366    return;
2367  unsigned ParamRegs = 0;
2368  if (auto *M = MF->getFunction().getParent())
2369    ParamRegs = M->getNumberRegisterParameters();
2370
2371  // Mark the first N int arguments as having reg
2372  for (unsigned Idx = 0; Idx < Args.size(); Idx++) {
2373    Type *T = Args[Idx].Ty;
2374    if (T->isIntOrPtrTy())
2375      if (MF->getDataLayout().getTypeAllocSize(T) <= 8) {
2376        unsigned numRegs = 1;
2377        if (MF->getDataLayout().getTypeAllocSize(T) > 4)
2378          numRegs = 2;
2379        if (ParamRegs < numRegs)
2380          return;
2381        ParamRegs -= numRegs;
2382        Args[Idx].IsInReg = true;
2383      }
2384  }
2385}
2386
2387const MCExpr *
2388X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
2389                                             const MachineBasicBlock *MBB,
2390                                             unsigned uid,MCContext &Ctx) const{
2391  assert(isPositionIndependent() && Subtarget.isPICStyleGOT());
2392  // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
2393  // entries.
2394  return MCSymbolRefExpr::create(MBB->getSymbol(),
2395                                 MCSymbolRefExpr::VK_GOTOFF, Ctx);
2396}
2397
2398/// Returns relocation base for the given PIC jumptable.
2399SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
2400                                                    SelectionDAG &DAG) const {
2401  if (!Subtarget.is64Bit())
2402    // This doesn't have SDLoc associated with it, but is not really the
2403    // same as a Register.
2404    return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
2405                       getPointerTy(DAG.getDataLayout()));
2406  return Table;
2407}
2408
2409/// This returns the relocation base for the given PIC jumptable,
2410/// the same as getPICJumpTableRelocBase, but as an MCExpr.
2411const MCExpr *X86TargetLowering::
2412getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
2413                             MCContext &Ctx) const {
2414  // X86-64 uses RIP relative addressing based on the jump table label.
2415  if (Subtarget.isPICStyleRIPRel())
2416    return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2417
2418  // Otherwise, the reference is relative to the PIC base.
2419  return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
2420}
2421
2422std::pair<const TargetRegisterClass *, uint8_t>
2423X86TargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
2424                                           MVT VT) const {
2425  const TargetRegisterClass *RRC = nullptr;
2426  uint8_t Cost = 1;
2427  switch (VT.SimpleTy) {
2428  default:
2429    return TargetLowering::findRepresentativeClass(TRI, VT);
2430  case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
2431    RRC = Subtarget.is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
2432    break;
2433  case MVT::x86mmx:
2434    RRC = &X86::VR64RegClass;
2435    break;
2436  case MVT::f32: case MVT::f64:
2437  case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
2438  case MVT::v4f32: case MVT::v2f64:
2439  case MVT::v32i8: case MVT::v16i16: case MVT::v8i32: case MVT::v4i64:
2440  case MVT::v8f32: case MVT::v4f64:
2441  case MVT::v64i8: case MVT::v32i16: case MVT::v16i32: case MVT::v8i64:
2442  case MVT::v16f32: case MVT::v8f64:
2443    RRC = &X86::VR128XRegClass;
2444    break;
2445  }
2446  return std::make_pair(RRC, Cost);
2447}
2448
2449unsigned X86TargetLowering::getAddressSpace() const {
2450  if (Subtarget.is64Bit())
2451    return (getTargetMachine().getCodeModel() == CodeModel::Kernel) ? 256 : 257;
2452  return 256;
2453}
2454
2455static bool hasStackGuardSlotTLS(const Triple &TargetTriple) {
2456  return TargetTriple.isOSGlibc() || TargetTriple.isOSFuchsia() ||
2457         (TargetTriple.isAndroid() && !TargetTriple.isAndroidVersionLT(17));
2458}
2459
2460static Constant* SegmentOffset(IRBuilder<> &IRB,
2461                               unsigned Offset, unsigned AddressSpace) {
2462  return ConstantExpr::getIntToPtr(
2463      ConstantInt::get(Type::getInt32Ty(IRB.getContext()), Offset),
2464      Type::getInt8PtrTy(IRB.getContext())->getPointerTo(AddressSpace));
2465}
2466
2467Value *X86TargetLowering::getIRStackGuard(IRBuilder<> &IRB) const {
2468  // glibc, bionic, and Fuchsia have a special slot for the stack guard in
2469  // tcbhead_t; use it instead of the usual global variable (see
2470  // sysdeps/{i386,x86_64}/nptl/tls.h)
2471  if (hasStackGuardSlotTLS(Subtarget.getTargetTriple())) {
2472    if (Subtarget.isTargetFuchsia()) {
2473      // <zircon/tls.h> defines ZX_TLS_STACK_GUARD_OFFSET with this value.
2474      return SegmentOffset(IRB, 0x10, getAddressSpace());
2475    } else {
2476      // %fs:0x28, unless we're using a Kernel code model, in which case
2477      // it's %gs:0x28.  gs:0x14 on i386.
2478      unsigned Offset = (Subtarget.is64Bit()) ? 0x28 : 0x14;
2479      return SegmentOffset(IRB, Offset, getAddressSpace());
2480    }
2481  }
2482
2483  return TargetLowering::getIRStackGuard(IRB);
2484}
2485
2486void X86TargetLowering::insertSSPDeclarations(Module &M) const {
2487  // MSVC CRT provides functionalities for stack protection.
2488  if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
2489      Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
2490    // MSVC CRT has a global variable holding security cookie.
2491    M.getOrInsertGlobal("__security_cookie",
2492                        Type::getInt8PtrTy(M.getContext()));
2493
2494    // MSVC CRT has a function to validate security cookie.
2495    FunctionCallee SecurityCheckCookie = M.getOrInsertFunction(
2496        "__security_check_cookie", Type::getVoidTy(M.getContext()),
2497        Type::getInt8PtrTy(M.getContext()));
2498    if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee())) {
2499      F->setCallingConv(CallingConv::X86_FastCall);
2500      F->addAttribute(1, Attribute::AttrKind::InReg);
2501    }
2502    return;
2503  }
2504  // glibc, bionic, and Fuchsia have a special slot for the stack guard.
2505  if (hasStackGuardSlotTLS(Subtarget.getTargetTriple()))
2506    return;
2507  TargetLowering::insertSSPDeclarations(M);
2508}
2509
2510Value *X86TargetLowering::getSDagStackGuard(const Module &M) const {
2511  // MSVC CRT has a global variable holding security cookie.
2512  if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
2513      Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
2514    return M.getGlobalVariable("__security_cookie");
2515  }
2516  return TargetLowering::getSDagStackGuard(M);
2517}
2518
2519Function *X86TargetLowering::getSSPStackGuardCheck(const Module &M) const {
2520  // MSVC CRT has a function to validate security cookie.
2521  if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
2522      Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
2523    return M.getFunction("__security_check_cookie");
2524  }
2525  return TargetLowering::getSSPStackGuardCheck(M);
2526}
2527
2528Value *X86TargetLowering::getSafeStackPointerLocation(IRBuilder<> &IRB) const {
2529  if (Subtarget.getTargetTriple().isOSContiki())
2530    return getDefaultSafeStackPointerLocation(IRB, false);
2531
2532  // Android provides a fixed TLS slot for the SafeStack pointer. See the
2533  // definition of TLS_SLOT_SAFESTACK in
2534  // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
2535  if (Subtarget.isTargetAndroid()) {
2536    // %fs:0x48, unless we're using a Kernel code model, in which case it's %gs:
2537    // %gs:0x24 on i386
2538    unsigned Offset = (Subtarget.is64Bit()) ? 0x48 : 0x24;
2539    return SegmentOffset(IRB, Offset, getAddressSpace());
2540  }
2541
2542  // Fuchsia is similar.
2543  if (Subtarget.isTargetFuchsia()) {
2544    // <zircon/tls.h> defines ZX_TLS_UNSAFE_SP_OFFSET with this value.
2545    return SegmentOffset(IRB, 0x18, getAddressSpace());
2546  }
2547
2548  return TargetLowering::getSafeStackPointerLocation(IRB);
2549}
2550
2551bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
2552                                            unsigned DestAS) const {
2553  assert(SrcAS != DestAS && "Expected different address spaces!");
2554
2555  const TargetMachine &TM = getTargetMachine();
2556  if (TM.getPointerSize(SrcAS) != TM.getPointerSize(DestAS))
2557    return false;
2558
2559  return SrcAS < 256 && DestAS < 256;
2560}
2561
2562//===----------------------------------------------------------------------===//
2563//               Return Value Calling Convention Implementation
2564//===----------------------------------------------------------------------===//
2565
2566bool X86TargetLowering::CanLowerReturn(
2567    CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
2568    const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
2569  SmallVector<CCValAssign, 16> RVLocs;
2570  CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2571  return CCInfo.CheckReturn(Outs, RetCC_X86);
2572}
2573
2574const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
2575  static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
2576  return ScratchRegs;
2577}
2578
2579/// Lowers masks values (v*i1) to the local register values
2580/// \returns DAG node after lowering to register type
2581static SDValue lowerMasksToReg(const SDValue &ValArg, const EVT &ValLoc,
2582                               const SDLoc &Dl, SelectionDAG &DAG) {
2583  EVT ValVT = ValArg.getValueType();
2584
2585  if (ValVT == MVT::v1i1)
2586    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, Dl, ValLoc, ValArg,
2587                       DAG.getIntPtrConstant(0, Dl));
2588
2589  if ((ValVT == MVT::v8i1 && (ValLoc == MVT::i8 || ValLoc == MVT::i32)) ||
2590      (ValVT == MVT::v16i1 && (ValLoc == MVT::i16 || ValLoc == MVT::i32))) {
2591    // Two stage lowering might be required
2592    // bitcast:   v8i1 -> i8 / v16i1 -> i16
2593    // anyextend: i8   -> i32 / i16   -> i32
2594    EVT TempValLoc = ValVT == MVT::v8i1 ? MVT::i8 : MVT::i16;
2595    SDValue ValToCopy = DAG.getBitcast(TempValLoc, ValArg);
2596    if (ValLoc == MVT::i32)
2597      ValToCopy = DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValToCopy);
2598    return ValToCopy;
2599  }
2600
2601  if ((ValVT == MVT::v32i1 && ValLoc == MVT::i32) ||
2602      (ValVT == MVT::v64i1 && ValLoc == MVT::i64)) {
2603    // One stage lowering is required
2604    // bitcast:   v32i1 -> i32 / v64i1 -> i64
2605    return DAG.getBitcast(ValLoc, ValArg);
2606  }
2607
2608  return DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValArg);
2609}
2610
2611/// Breaks v64i1 value into two registers and adds the new node to the DAG
2612static void Passv64i1ArgInRegs(
2613    const SDLoc &Dl, SelectionDAG &DAG, SDValue &Arg,
2614    SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass, CCValAssign &VA,
2615    CCValAssign &NextVA, const X86Subtarget &Subtarget) {
2616  assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
2617  assert(Subtarget.is32Bit() && "Expecting 32 bit target");
2618  assert(Arg.getValueType() == MVT::i64 && "Expecting 64 bit value");
2619  assert(VA.isRegLoc() && NextVA.isRegLoc() &&
2620         "The value should reside in two registers");
2621
2622  // Before splitting the value we cast it to i64
2623  Arg = DAG.getBitcast(MVT::i64, Arg);
2624
2625  // Splitting the value into two i32 types
2626  SDValue Lo, Hi;
2627  Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg,
2628                   DAG.getConstant(0, Dl, MVT::i32));
2629  Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg,
2630                   DAG.getConstant(1, Dl, MVT::i32));
2631
2632  // Attach the two i32 types into corresponding registers
2633  RegsToPass.push_back(std::make_pair(VA.getLocReg(), Lo));
2634  RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Hi));
2635}
2636
2637SDValue
2638X86TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2639                               bool isVarArg,
2640                               const SmallVectorImpl<ISD::OutputArg> &Outs,
2641                               const SmallVectorImpl<SDValue> &OutVals,
2642                               const SDLoc &dl, SelectionDAG &DAG) const {
2643  MachineFunction &MF = DAG.getMachineFunction();
2644  X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2645
2646  // In some cases we need to disable registers from the default CSR list.
2647  // For example, when they are used for argument passing.
2648  bool ShouldDisableCalleeSavedRegister =
2649      CallConv == CallingConv::X86_RegCall ||
2650      MF.getFunction().hasFnAttribute("no_caller_saved_registers");
2651
2652  if (CallConv == CallingConv::X86_INTR && !Outs.empty())
2653    report_fatal_error("X86 interrupts may not return any value");
2654
2655  SmallVector<CCValAssign, 16> RVLocs;
2656  CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
2657  CCInfo.AnalyzeReturn(Outs, RetCC_X86);
2658
2659  SDValue Flag;
2660  SmallVector<SDValue, 6> RetOps;
2661  RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2662  // Operand #1 = Bytes To Pop
2663  RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), dl,
2664                   MVT::i32));
2665
2666  // Copy the result values into the output registers.
2667  for (unsigned I = 0, OutsIndex = 0, E = RVLocs.size(); I != E;
2668       ++I, ++OutsIndex) {
2669    CCValAssign &VA = RVLocs[I];
2670    assert(VA.isRegLoc() && "Can only return in registers!");
2671
2672    // Add the register to the CalleeSaveDisableRegs list.
2673    if (ShouldDisableCalleeSavedRegister)
2674      MF.getRegInfo().disableCalleeSavedRegister(VA.getLocReg());
2675
2676    SDValue ValToCopy = OutVals[OutsIndex];
2677    EVT ValVT = ValToCopy.getValueType();
2678
2679    // Promote values to the appropriate types.
2680    if (VA.getLocInfo() == CCValAssign::SExt)
2681      ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
2682    else if (VA.getLocInfo() == CCValAssign::ZExt)
2683      ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
2684    else if (VA.getLocInfo() == CCValAssign::AExt) {
2685      if (ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1)
2686        ValToCopy = lowerMasksToReg(ValToCopy, VA.getLocVT(), dl, DAG);
2687      else
2688        ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
2689    }
2690    else if (VA.getLocInfo() == CCValAssign::BCvt)
2691      ValToCopy = DAG.getBitcast(VA.getLocVT(), ValToCopy);
2692
2693    assert(VA.getLocInfo() != CCValAssign::FPExt &&
2694           "Unexpected FP-extend for return value.");
2695
2696    // Report an error if we have attempted to return a value via an XMM
2697    // register and SSE was disabled.
2698    if (!Subtarget.hasSSE1() && X86::FR32XRegClass.contains(VA.getLocReg())) {
2699      errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
2700      VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
2701    } else if (!Subtarget.hasSSE2() &&
2702               X86::FR64XRegClass.contains(VA.getLocReg()) &&
2703               ValVT == MVT::f64) {
2704      // When returning a double via an XMM register, report an error if SSE2 is
2705      // not enabled.
2706      errorUnsupported(DAG, dl, "SSE2 register return with SSE2 disabled");
2707      VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
2708    }
2709
2710    // Returns in ST0/ST1 are handled specially: these are pushed as operands to
2711    // the RET instruction and handled by the FP Stackifier.
2712    if (VA.getLocReg() == X86::FP0 ||
2713        VA.getLocReg() == X86::FP1) {
2714      // If this is a copy from an xmm register to ST(0), use an FPExtend to
2715      // change the value to the FP stack register class.
2716      if (isScalarFPTypeInSSEReg(VA.getValVT()))
2717        ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
2718      RetOps.push_back(ValToCopy);
2719      // Don't emit a copytoreg.
2720      continue;
2721    }
2722
2723    // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
2724    // which is returned in RAX / RDX.
2725    if (Subtarget.is64Bit()) {
2726      if (ValVT == MVT::x86mmx) {
2727        if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
2728          ValToCopy = DAG.getBitcast(MVT::i64, ValToCopy);
2729          ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
2730                                  ValToCopy);
2731          // If we don't have SSE2 available, convert to v4f32 so the generated
2732          // register is legal.
2733          if (!Subtarget.hasSSE2())
2734            ValToCopy = DAG.getBitcast(MVT::v4f32, ValToCopy);
2735        }
2736      }
2737    }
2738
2739    SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2740
2741    if (VA.needsCustom()) {
2742      assert(VA.getValVT() == MVT::v64i1 &&
2743             "Currently the only custom case is when we split v64i1 to 2 regs");
2744
2745      Passv64i1ArgInRegs(dl, DAG, ValToCopy, RegsToPass, VA, RVLocs[++I],
2746                         Subtarget);
2747
2748      assert(2 == RegsToPass.size() &&
2749             "Expecting two registers after Pass64BitArgInRegs");
2750
2751      // Add the second register to the CalleeSaveDisableRegs list.
2752      if (ShouldDisableCalleeSavedRegister)
2753        MF.getRegInfo().disableCalleeSavedRegister(RVLocs[I].getLocReg());
2754    } else {
2755      RegsToPass.push_back(std::make_pair(VA.getLocReg(), ValToCopy));
2756    }
2757
2758    // Add nodes to the DAG and add the values into the RetOps list
2759    for (auto &Reg : RegsToPass) {
2760      Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, Flag);
2761      Flag = Chain.getValue(1);
2762      RetOps.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
2763    }
2764  }
2765
2766  // Swift calling convention does not require we copy the sret argument
2767  // into %rax/%eax for the return, and SRetReturnReg is not set for Swift.
2768
2769  // All x86 ABIs require that for returning structs by value we copy
2770  // the sret argument into %rax/%eax (depending on ABI) for the return.
2771  // We saved the argument into a virtual register in the entry block,
2772  // so now we copy the value out and into %rax/%eax.
2773  //
2774  // Checking Function.hasStructRetAttr() here is insufficient because the IR
2775  // may not have an explicit sret argument. If FuncInfo.CanLowerReturn is
2776  // false, then an sret argument may be implicitly inserted in the SelDAG. In
2777  // either case FuncInfo->setSRetReturnReg() will have been called.
2778  if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
2779    // When we have both sret and another return value, we should use the
2780    // original Chain stored in RetOps[0], instead of the current Chain updated
2781    // in the above loop. If we only have sret, RetOps[0] equals to Chain.
2782
2783    // For the case of sret and another return value, we have
2784    //   Chain_0 at the function entry
2785    //   Chain_1 = getCopyToReg(Chain_0) in the above loop
2786    // If we use Chain_1 in getCopyFromReg, we will have
2787    //   Val = getCopyFromReg(Chain_1)
2788    //   Chain_2 = getCopyToReg(Chain_1, Val) from below
2789
2790    // getCopyToReg(Chain_0) will be glued together with
2791    // getCopyToReg(Chain_1, Val) into Unit A, getCopyFromReg(Chain_1) will be
2792    // in Unit B, and we will have cyclic dependency between Unit A and Unit B:
2793    //   Data dependency from Unit B to Unit A due to usage of Val in
2794    //     getCopyToReg(Chain_1, Val)
2795    //   Chain dependency from Unit A to Unit B
2796
2797    // So here, we use RetOps[0] (i.e Chain_0) for getCopyFromReg.
2798    SDValue Val = DAG.getCopyFromReg(RetOps[0], dl, SRetReg,
2799                                     getPointerTy(MF.getDataLayout()));
2800
2801    unsigned RetValReg
2802        = (Subtarget.is64Bit() && !Subtarget.isTarget64BitILP32()) ?
2803          X86::RAX : X86::EAX;
2804    Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
2805    Flag = Chain.getValue(1);
2806
2807    // RAX/EAX now acts like a return value.
2808    RetOps.push_back(
2809        DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
2810
2811    // Add the returned register to the CalleeSaveDisableRegs list.
2812    if (ShouldDisableCalleeSavedRegister)
2813      MF.getRegInfo().disableCalleeSavedRegister(RetValReg);
2814  }
2815
2816  const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
2817  const MCPhysReg *I =
2818      TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
2819  if (I) {
2820    for (; *I; ++I) {
2821      if (X86::GR64RegClass.contains(*I))
2822        RetOps.push_back(DAG.getRegister(*I, MVT::i64));
2823      else
2824        llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2825    }
2826  }
2827
2828  RetOps[0] = Chain;  // Update chain.
2829
2830  // Add the flag if we have it.
2831  if (Flag.getNode())
2832    RetOps.push_back(Flag);
2833
2834  X86ISD::NodeType opcode = X86ISD::RET_FLAG;
2835  if (CallConv == CallingConv::X86_INTR)
2836    opcode = X86ISD::IRET;
2837  return DAG.getNode(opcode, dl, MVT::Other, RetOps);
2838}
2839
2840bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2841  if (N->getNumValues() != 1 || !N->hasNUsesOfValue(1, 0))
2842    return false;
2843
2844  SDValue TCChain = Chain;
2845  SDNode *Copy = *N->use_begin();
2846  if (Copy->getOpcode() == ISD::CopyToReg) {
2847    // If the copy has a glue operand, we conservatively assume it isn't safe to
2848    // perform a tail call.
2849    if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2850      return false;
2851    TCChain = Copy->getOperand(0);
2852  } else if (Copy->getOpcode() != ISD::FP_EXTEND)
2853    return false;
2854
2855  bool HasRet = false;
2856  for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2857       UI != UE; ++UI) {
2858    if (UI->getOpcode() != X86ISD::RET_FLAG)
2859      return false;
2860    // If we are returning more than one value, we can definitely
2861    // not make a tail call see PR19530
2862    if (UI->getNumOperands() > 4)
2863      return false;
2864    if (UI->getNumOperands() == 4 &&
2865        UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
2866      return false;
2867    HasRet = true;
2868  }
2869
2870  if (!HasRet)
2871    return false;
2872
2873  Chain = TCChain;
2874  return true;
2875}
2876
2877EVT X86TargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT,
2878                                           ISD::NodeType ExtendKind) const {
2879  MVT ReturnMVT = MVT::i32;
2880
2881  bool Darwin = Subtarget.getTargetTriple().isOSDarwin();
2882  if (VT == MVT::i1 || (!Darwin && (VT == MVT::i8 || VT == MVT::i16))) {
2883    // The ABI does not require i1, i8 or i16 to be extended.
2884    //
2885    // On Darwin, there is code in the wild relying on Clang's old behaviour of
2886    // always extending i8/i16 return values, so keep doing that for now.
2887    // (PR26665).
2888    ReturnMVT = MVT::i8;
2889  }
2890
2891  EVT MinVT = getRegisterType(Context, ReturnMVT);
2892  return VT.bitsLT(MinVT) ? MinVT : VT;
2893}
2894
2895/// Reads two 32 bit registers and creates a 64 bit mask value.
2896/// \param VA The current 32 bit value that need to be assigned.
2897/// \param NextVA The next 32 bit value that need to be assigned.
2898/// \param Root The parent DAG node.
2899/// \param [in,out] InFlag Represents SDvalue in the parent DAG node for
2900///                        glue purposes. In the case the DAG is already using
2901///                        physical register instead of virtual, we should glue
2902///                        our new SDValue to InFlag SDvalue.
2903/// \return a new SDvalue of size 64bit.
2904static SDValue getv64i1Argument(CCValAssign &VA, CCValAssign &NextVA,
2905                                SDValue &Root, SelectionDAG &DAG,
2906                                const SDLoc &Dl, const X86Subtarget &Subtarget,
2907                                SDValue *InFlag = nullptr) {
2908  assert((Subtarget.hasBWI()) && "Expected AVX512BW target!");
2909  assert(Subtarget.is32Bit() && "Expecting 32 bit target");
2910  assert(VA.getValVT() == MVT::v64i1 &&
2911         "Expecting first location of 64 bit width type");
2912  assert(NextVA.getValVT() == VA.getValVT() &&
2913         "The locations should have the same type");
2914  assert(VA.isRegLoc() && NextVA.isRegLoc() &&
2915         "The values should reside in two registers");
2916
2917  SDValue Lo, Hi;
2918  SDValue ArgValueLo, ArgValueHi;
2919
2920  MachineFunction &MF = DAG.getMachineFunction();
2921  const TargetRegisterClass *RC = &X86::GR32RegClass;
2922
2923  // Read a 32 bit value from the registers.
2924  if (nullptr == InFlag) {
2925    // When no physical register is present,
2926    // create an intermediate virtual register.
2927    unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2928    ArgValueLo = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32);
2929    Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
2930    ArgValueHi = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32);
2931  } else {
2932    // When a physical register is available read the value from it and glue
2933    // the reads together.
2934    ArgValueLo =
2935      DAG.getCopyFromReg(Root, Dl, VA.getLocReg(), MVT::i32, *InFlag);
2936    *InFlag = ArgValueLo.getValue(2);
2937    ArgValueHi =
2938      DAG.getCopyFromReg(Root, Dl, NextVA.getLocReg(), MVT::i32, *InFlag);
2939    *InFlag = ArgValueHi.getValue(2);
2940  }
2941
2942  // Convert the i32 type into v32i1 type.
2943  Lo = DAG.getBitcast(MVT::v32i1, ArgValueLo);
2944
2945  // Convert the i32 type into v32i1 type.
2946  Hi = DAG.getBitcast(MVT::v32i1, ArgValueHi);
2947
2948  // Concatenate the two values together.
2949  return DAG.getNode(ISD::CONCAT_VECTORS, Dl, MVT::v64i1, Lo, Hi);
2950}
2951
2952/// The function will lower a register of various sizes (8/16/32/64)
2953/// to a mask value of the expected size (v8i1/v16i1/v32i1/v64i1)
2954/// \returns a DAG node contains the operand after lowering to mask type.
2955static SDValue lowerRegToMasks(const SDValue &ValArg, const EVT &ValVT,
2956                               const EVT &ValLoc, const SDLoc &Dl,
2957                               SelectionDAG &DAG) {
2958  SDValue ValReturned = ValArg;
2959
2960  if (ValVT == MVT::v1i1)
2961    return DAG.getNode(ISD::SCALAR_TO_VECTOR, Dl, MVT::v1i1, ValReturned);
2962
2963  if (ValVT == MVT::v64i1) {
2964    // In 32 bit machine, this case is handled by getv64i1Argument
2965    assert(ValLoc == MVT::i64 && "Expecting only i64 locations");
2966    // In 64 bit machine, There is no need to truncate the value only bitcast
2967  } else {
2968    MVT maskLen;
2969    switch (ValVT.getSimpleVT().SimpleTy) {
2970    case MVT::v8i1:
2971      maskLen = MVT::i8;
2972      break;
2973    case MVT::v16i1:
2974      maskLen = MVT::i16;
2975      break;
2976    case MVT::v32i1:
2977      maskLen = MVT::i32;
2978      break;
2979    default:
2980      llvm_unreachable("Expecting a vector of i1 types");
2981    }
2982
2983    ValReturned = DAG.getNode(ISD::TRUNCATE, Dl, maskLen, ValReturned);
2984  }
2985  return DAG.getBitcast(ValVT, ValReturned);
2986}
2987
2988/// Lower the result values of a call into the
2989/// appropriate copies out of appropriate physical registers.
2990///
2991SDValue X86TargetLowering::LowerCallResult(
2992    SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
2993    const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
2994    SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
2995    uint32_t *RegMask) const {
2996
2997  const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
2998  // Assign locations to each value returned by this call.
2999  SmallVector<CCValAssign, 16> RVLocs;
3000  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
3001                 *DAG.getContext());
3002  CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
3003
3004  // Copy all of the result registers out of their specified physreg.
3005  for (unsigned I = 0, InsIndex = 0, E = RVLocs.size(); I != E;
3006       ++I, ++InsIndex) {
3007    CCValAssign &VA = RVLocs[I];
3008    EVT CopyVT = VA.getLocVT();
3009
3010    // In some calling conventions we need to remove the used registers
3011    // from the register mask.
3012    if (RegMask) {
3013      for (MCSubRegIterator SubRegs(VA.getLocReg(), TRI, /*IncludeSelf=*/true);
3014           SubRegs.isValid(); ++SubRegs)
3015        RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32));
3016    }
3017
3018    // Report an error if there was an attempt to return FP values via XMM
3019    // registers.
3020    if (!Subtarget.hasSSE1() && X86::FR32XRegClass.contains(VA.getLocReg())) {
3021      errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
3022      if (VA.getLocReg() == X86::XMM1)
3023        VA.convertToReg(X86::FP1); // Set reg to FP1, avoid hitting asserts.
3024      else
3025        VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
3026    } else if (!Subtarget.hasSSE2() &&
3027               X86::FR64XRegClass.contains(VA.getLocReg()) &&
3028               CopyVT == MVT::f64) {
3029      errorUnsupported(DAG, dl, "SSE2 register return with SSE2 disabled");
3030      if (VA.getLocReg() == X86::XMM1)
3031        VA.convertToReg(X86::FP1); // Set reg to FP1, avoid hitting asserts.
3032      else
3033        VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
3034    }
3035
3036    // If we prefer to use the value in xmm registers, copy it out as f80 and
3037    // use a truncate to move it from fp stack reg to xmm reg.
3038    bool RoundAfterCopy = false;
3039    if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
3040        isScalarFPTypeInSSEReg(VA.getValVT())) {
3041      if (!Subtarget.hasX87())
3042        report_fatal_error("X87 register return with X87 disabled");
3043      CopyVT = MVT::f80;
3044      RoundAfterCopy = (CopyVT != VA.getLocVT());
3045    }
3046
3047    SDValue Val;
3048    if (VA.needsCustom()) {
3049      assert(VA.getValVT() == MVT::v64i1 &&
3050             "Currently the only custom case is when we split v64i1 to 2 regs");
3051      Val =
3052          getv64i1Argument(VA, RVLocs[++I], Chain, DAG, dl, Subtarget, &InFlag);
3053    } else {
3054      Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), CopyVT, InFlag)
3055                  .getValue(1);
3056      Val = Chain.getValue(0);
3057      InFlag = Chain.getValue(2);
3058    }
3059
3060    if (RoundAfterCopy)
3061      Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
3062                        // This truncation won't change the value.
3063                        DAG.getIntPtrConstant(1, dl));
3064
3065    if (VA.isExtInLoc() && (VA.getValVT().getScalarType() == MVT::i1)) {
3066      if (VA.getValVT().isVector() &&
3067          ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
3068           (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
3069        // promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
3070        Val = lowerRegToMasks(Val, VA.getValVT(), VA.getLocVT(), dl, DAG);
3071      } else
3072        Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
3073    }
3074
3075    if (VA.getLocInfo() == CCValAssign::BCvt)
3076      Val = DAG.getBitcast(VA.getValVT(), Val);
3077
3078    InVals.push_back(Val);
3079  }
3080
3081  return Chain;
3082}
3083
3084//===----------------------------------------------------------------------===//
3085//                C & StdCall & Fast Calling Convention implementation
3086//===----------------------------------------------------------------------===//
3087//  StdCall calling convention seems to be standard for many Windows' API
3088//  routines and around. It differs from C calling convention just a little:
3089//  callee should clean up the stack, not caller. Symbols should be also
3090//  decorated in some fancy way :) It doesn't support any vector arguments.
3091//  For info on fast calling convention see Fast Calling Convention (tail call)
3092//  implementation LowerX86_32FastCCCallTo.
3093
3094/// CallIsStructReturn - Determines whether a call uses struct return
3095/// semantics.
3096enum StructReturnType {
3097  NotStructReturn,
3098  RegStructReturn,
3099  StackStructReturn
3100};
3101static StructReturnType
3102callIsStructReturn(ArrayRef<ISD::OutputArg> Outs, bool IsMCU) {
3103  if (Outs.empty())
3104    return NotStructReturn;
3105
3106  const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
3107  if (!Flags.isSRet())
3108    return NotStructReturn;
3109  if (Flags.isInReg() || IsMCU)
3110    return RegStructReturn;
3111  return StackStructReturn;
3112}
3113
3114/// Determines whether a function uses struct return semantics.
3115static StructReturnType
3116argsAreStructReturn(ArrayRef<ISD::InputArg> Ins, bool IsMCU) {
3117  if (Ins.empty())
3118    return NotStructReturn;
3119
3120  const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
3121  if (!Flags.isSRet())
3122    return NotStructReturn;
3123  if (Flags.isInReg() || IsMCU)
3124    return RegStructReturn;
3125  return StackStructReturn;
3126}
3127
3128/// Make a copy of an aggregate at address specified by "Src" to address
3129/// "Dst" with size and alignment information specified by the specific
3130/// parameter attribute. The copy will be passed as a byval function parameter.
3131static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
3132                                         SDValue Chain, ISD::ArgFlagsTy Flags,
3133                                         SelectionDAG &DAG, const SDLoc &dl) {
3134  SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
3135
3136  return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
3137                       /*isVolatile*/false, /*AlwaysInline=*/true,
3138                       /*isTailCall*/false,
3139                       MachinePointerInfo(), MachinePointerInfo());
3140}
3141
3142/// Return true if the calling convention is one that we can guarantee TCO for.
3143static bool canGuaranteeTCO(CallingConv::ID CC) {
3144  return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
3145          CC == CallingConv::X86_RegCall || CC == CallingConv::HiPE ||
3146          CC == CallingConv::HHVM || CC == CallingConv::Tail);
3147}
3148
3149/// Return true if we might ever do TCO for calls with this calling convention.
3150static bool mayTailCallThisCC(CallingConv::ID CC) {
3151  switch (CC) {
3152  // C calling conventions:
3153  case CallingConv::C:
3154  case CallingConv::Win64:
3155  case CallingConv::X86_64_SysV:
3156  // Callee pop conventions:
3157  case CallingConv::X86_ThisCall:
3158  case CallingConv::X86_StdCall:
3159  case CallingConv::X86_VectorCall:
3160  case CallingConv::X86_FastCall:
3161  // Swift:
3162  case CallingConv::Swift:
3163    return true;
3164  default:
3165    return canGuaranteeTCO(CC);
3166  }
3167}
3168
3169/// Return true if the function is being made into a tailcall target by
3170/// changing its ABI.
3171static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt) {
3172  return (GuaranteedTailCallOpt && canGuaranteeTCO(CC)) || CC == CallingConv::Tail;
3173}
3174
3175bool X86TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
3176  if (!CI->isTailCall())
3177    return false;
3178
3179  ImmutableCallSite CS(CI);
3180  CallingConv::ID CalleeCC = CS.getCallingConv();
3181  if (!mayTailCallThisCC(CalleeCC))
3182    return false;
3183
3184  return true;
3185}
3186
3187SDValue
3188X86TargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
3189                                    const SmallVectorImpl<ISD::InputArg> &Ins,
3190                                    const SDLoc &dl, SelectionDAG &DAG,
3191                                    const CCValAssign &VA,
3192                                    MachineFrameInfo &MFI, unsigned i) const {
3193  // Create the nodes corresponding to a load from this parameter slot.
3194  ISD::ArgFlagsTy Flags = Ins[i].Flags;
3195  bool AlwaysUseMutable = shouldGuaranteeTCO(
3196      CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
3197  bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
3198  EVT ValVT;
3199  MVT PtrVT = getPointerTy(DAG.getDataLayout());
3200
3201  // If value is passed by pointer we have address passed instead of the value
3202  // itself. No need to extend if the mask value and location share the same
3203  // absolute size.
3204  bool ExtendedInMem =
3205      VA.isExtInLoc() && VA.getValVT().getScalarType() == MVT::i1 &&
3206      VA.getValVT().getSizeInBits() != VA.getLocVT().getSizeInBits();
3207
3208  if (VA.getLocInfo() == CCValAssign::Indirect || ExtendedInMem)
3209    ValVT = VA.getLocVT();
3210  else
3211    ValVT = VA.getValVT();
3212
3213  // FIXME: For now, all byval parameter objects are marked mutable. This can be
3214  // changed with more analysis.
3215  // In case of tail call optimization mark all arguments mutable. Since they
3216  // could be overwritten by lowering of arguments in case of a tail call.
3217  if (Flags.isByVal()) {
3218    unsigned Bytes = Flags.getByValSize();
3219    if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
3220
3221    // FIXME: For now, all byval parameter objects are marked as aliasing. This
3222    // can be improved with deeper analysis.
3223    int FI = MFI.CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable,
3224                                   /*isAliased=*/true);
3225    return DAG.getFrameIndex(FI, PtrVT);
3226  }
3227
3228  // This is an argument in memory. We might be able to perform copy elision.
3229  // If the argument is passed directly in memory without any extension, then we
3230  // can perform copy elision. Large vector types, for example, may be passed
3231  // indirectly by pointer.
3232  if (Flags.isCopyElisionCandidate() &&
3233      VA.getLocInfo() != CCValAssign::Indirect && !ExtendedInMem) {
3234    EVT ArgVT = Ins[i].ArgVT;
3235    SDValue PartAddr;
3236    if (Ins[i].PartOffset == 0) {
3237      // If this is a one-part value or the first part of a multi-part value,
3238      // create a stack object for the entire argument value type and return a
3239      // load from our portion of it. This assumes that if the first part of an
3240      // argument is in memory, the rest will also be in memory.
3241      int FI = MFI.CreateFixedObject(ArgVT.getStoreSize(), VA.getLocMemOffset(),
3242                                     /*IsImmutable=*/false);
3243      PartAddr = DAG.getFrameIndex(FI, PtrVT);
3244      return DAG.getLoad(
3245          ValVT, dl, Chain, PartAddr,
3246          MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3247    } else {
3248      // This is not the first piece of an argument in memory. See if there is
3249      // already a fixed stack object including this offset. If so, assume it
3250      // was created by the PartOffset == 0 branch above and create a load from
3251      // the appropriate offset into it.
3252      int64_t PartBegin = VA.getLocMemOffset();
3253      int64_t PartEnd = PartBegin + ValVT.getSizeInBits() / 8;
3254      int FI = MFI.getObjectIndexBegin();
3255      for (; MFI.isFixedObjectIndex(FI); ++FI) {
3256        int64_t ObjBegin = MFI.getObjectOffset(FI);
3257        int64_t ObjEnd = ObjBegin + MFI.getObjectSize(FI);
3258        if (ObjBegin <= PartBegin && PartEnd <= ObjEnd)
3259          break;
3260      }
3261      if (MFI.isFixedObjectIndex(FI)) {
3262        SDValue Addr =
3263            DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getFrameIndex(FI, PtrVT),
3264                        DAG.getIntPtrConstant(Ins[i].PartOffset, dl));
3265        return DAG.getLoad(
3266            ValVT, dl, Chain, Addr,
3267            MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI,
3268                                              Ins[i].PartOffset));
3269      }
3270    }
3271  }
3272
3273  int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
3274                                 VA.getLocMemOffset(), isImmutable);
3275
3276  // Set SExt or ZExt flag.
3277  if (VA.getLocInfo() == CCValAssign::ZExt) {
3278    MFI.setObjectZExt(FI, true);
3279  } else if (VA.getLocInfo() == CCValAssign::SExt) {
3280    MFI.setObjectSExt(FI, true);
3281  }
3282
3283  SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3284  SDValue Val = DAG.getLoad(
3285      ValVT, dl, Chain, FIN,
3286      MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3287  return ExtendedInMem
3288             ? (VA.getValVT().isVector()
3289                    ? DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VA.getValVT(), Val)
3290                    : DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val))
3291             : Val;
3292}
3293
3294// FIXME: Get this from tablegen.
3295static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
3296                                                const X86Subtarget &Subtarget) {
3297  assert(Subtarget.is64Bit());
3298
3299  if (Subtarget.isCallingConvWin64(CallConv)) {
3300    static const MCPhysReg GPR64ArgRegsWin64[] = {
3301      X86::RCX, X86::RDX, X86::R8,  X86::R9
3302    };
3303    return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
3304  }
3305
3306  static const MCPhysReg GPR64ArgRegs64Bit[] = {
3307    X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
3308  };
3309  return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
3310}
3311
3312// FIXME: Get this from tablegen.
3313static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
3314                                                CallingConv::ID CallConv,
3315                                                const X86Subtarget &Subtarget) {
3316  assert(Subtarget.is64Bit());
3317  if (Subtarget.isCallingConvWin64(CallConv)) {
3318    // The XMM registers which might contain var arg parameters are shadowed
3319    // in their paired GPR.  So we only need to save the GPR to their home
3320    // slots.
3321    // TODO: __vectorcall will change this.
3322    return None;
3323  }
3324
3325  const Function &F = MF.getFunction();
3326  bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
3327  bool isSoftFloat = Subtarget.useSoftFloat();
3328  assert(!(isSoftFloat && NoImplicitFloatOps) &&
3329         "SSE register cannot be used when SSE is disabled!");
3330  if (isSoftFloat || NoImplicitFloatOps || !Subtarget.hasSSE1())
3331    // Kernel mode asks for SSE to be disabled, so there are no XMM argument
3332    // registers.
3333    return None;
3334
3335  static const MCPhysReg XMMArgRegs64Bit[] = {
3336    X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3337    X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3338  };
3339  return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
3340}
3341
3342#ifndef NDEBUG
3343static bool isSortedByValueNo(ArrayRef<CCValAssign> ArgLocs) {
3344  return std::is_sorted(ArgLocs.begin(), ArgLocs.end(),
3345                        [](const CCValAssign &A, const CCValAssign &B) -> bool {
3346                          return A.getValNo() < B.getValNo();
3347                        });
3348}
3349#endif
3350
3351SDValue X86TargetLowering::LowerFormalArguments(
3352    SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3353    const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3354    SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3355  MachineFunction &MF = DAG.getMachineFunction();
3356  X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3357  const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
3358
3359  const Function &F = MF.getFunction();
3360  if (F.hasExternalLinkage() && Subtarget.isTargetCygMing() &&
3361      F.getName() == "main")
3362    FuncInfo->setForceFramePointer(true);
3363
3364  MachineFrameInfo &MFI = MF.getFrameInfo();
3365  bool Is64Bit = Subtarget.is64Bit();
3366  bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
3367
3368  assert(
3369      !(isVarArg && canGuaranteeTCO(CallConv)) &&
3370      "Var args not supported with calling conv' regcall, fastcc, ghc or hipe");
3371
3372  // Assign locations to all of the incoming arguments.
3373  SmallVector<CCValAssign, 16> ArgLocs;
3374  CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
3375
3376  // Allocate shadow area for Win64.
3377  if (IsWin64)
3378    CCInfo.AllocateStack(32, 8);
3379
3380  CCInfo.AnalyzeArguments(Ins, CC_X86);
3381
3382  // In vectorcall calling convention a second pass is required for the HVA
3383  // types.
3384  if (CallingConv::X86_VectorCall == CallConv) {
3385    CCInfo.AnalyzeArgumentsSecondPass(Ins, CC_X86);
3386  }
3387
3388  // The next loop assumes that the locations are in the same order of the
3389  // input arguments.
3390  assert(isSortedByValueNo(ArgLocs) &&
3391         "Argument Location list must be sorted before lowering");
3392
3393  SDValue ArgValue;
3394  for (unsigned I = 0, InsIndex = 0, E = ArgLocs.size(); I != E;
3395       ++I, ++InsIndex) {
3396    assert(InsIndex < Ins.size() && "Invalid Ins index");
3397    CCValAssign &VA = ArgLocs[I];
3398
3399    if (VA.isRegLoc()) {
3400      EVT RegVT = VA.getLocVT();
3401      if (VA.needsCustom()) {
3402        assert(
3403            VA.getValVT() == MVT::v64i1 &&
3404            "Currently the only custom case is when we split v64i1 to 2 regs");
3405
3406        // v64i1 values, in regcall calling convention, that are
3407        // compiled to 32 bit arch, are split up into two registers.
3408        ArgValue =
3409            getv64i1Argument(VA, ArgLocs[++I], Chain, DAG, dl, Subtarget);
3410      } else {
3411        const TargetRegisterClass *RC;
3412        if (RegVT == MVT::i8)
3413          RC = &X86::GR8RegClass;
3414        else if (RegVT == MVT::i16)
3415          RC = &X86::GR16RegClass;
3416        else if (RegVT == MVT::i32)
3417          RC = &X86::GR32RegClass;
3418        else if (Is64Bit && RegVT == MVT::i64)
3419          RC = &X86::GR64RegClass;
3420        else if (RegVT == MVT::f32)
3421          RC = Subtarget.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
3422        else if (RegVT == MVT::f64)
3423          RC = Subtarget.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
3424        else if (RegVT == MVT::f80)
3425          RC = &X86::RFP80RegClass;
3426        else if (RegVT == MVT::f128)
3427          RC = &X86::VR128RegClass;
3428        else if (RegVT.is512BitVector())
3429          RC = &X86::VR512RegClass;
3430        else if (RegVT.is256BitVector())
3431          RC = Subtarget.hasVLX() ? &X86::VR256XRegClass : &X86::VR256RegClass;
3432        else if (RegVT.is128BitVector())
3433          RC = Subtarget.hasVLX() ? &X86::VR128XRegClass : &X86::VR128RegClass;
3434        else if (RegVT == MVT::x86mmx)
3435          RC = &X86::VR64RegClass;
3436        else if (RegVT == MVT::v1i1)
3437          RC = &X86::VK1RegClass;
3438        else if (RegVT == MVT::v8i1)
3439          RC = &X86::VK8RegClass;
3440        else if (RegVT == MVT::v16i1)
3441          RC = &X86::VK16RegClass;
3442        else if (RegVT == MVT::v32i1)
3443          RC = &X86::VK32RegClass;
3444        else if (RegVT == MVT::v64i1)
3445          RC = &X86::VK64RegClass;
3446        else
3447          llvm_unreachable("Unknown argument type!");
3448
3449        unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3450        ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
3451      }
3452
3453      // If this is an 8 or 16-bit value, it is really passed promoted to 32
3454      // bits.  Insert an assert[sz]ext to capture this, then truncate to the
3455      // right size.
3456      if (VA.getLocInfo() == CCValAssign::SExt)
3457        ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
3458                               DAG.getValueType(VA.getValVT()));
3459      else if (VA.getLocInfo() == CCValAssign::ZExt)
3460        ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
3461                               DAG.getValueType(VA.getValVT()));
3462      else if (VA.getLocInfo() == CCValAssign::BCvt)
3463        ArgValue = DAG.getBitcast(VA.getValVT(), ArgValue);
3464
3465      if (VA.isExtInLoc()) {
3466        // Handle MMX values passed in XMM regs.
3467        if (RegVT.isVector() && VA.getValVT().getScalarType() != MVT::i1)
3468          ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
3469        else if (VA.getValVT().isVector() &&
3470                 VA.getValVT().getScalarType() == MVT::i1 &&
3471                 ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
3472                  (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
3473          // Promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
3474          ArgValue = lowerRegToMasks(ArgValue, VA.getValVT(), RegVT, dl, DAG);
3475        } else
3476          ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
3477      }
3478    } else {
3479      assert(VA.isMemLoc());
3480      ArgValue =
3481          LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, InsIndex);
3482    }
3483
3484    // If value is passed via pointer - do a load.
3485    if (VA.getLocInfo() == CCValAssign::Indirect && !Ins[I].Flags.isByVal())
3486      ArgValue =
3487          DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, MachinePointerInfo());
3488
3489    InVals.push_back(ArgValue);
3490  }
3491
3492  for (unsigned I = 0, E = Ins.size(); I != E; ++I) {
3493    // Swift calling convention does not require we copy the sret argument
3494    // into %rax/%eax for the return. We don't set SRetReturnReg for Swift.
3495    if (CallConv == CallingConv::Swift)
3496      continue;
3497
3498    // All x86 ABIs require that for returning structs by value we copy the
3499    // sret argument into %rax/%eax (depending on ABI) for the return. Save
3500    // the argument into a virtual register so that we can access it from the
3501    // return points.
3502    if (Ins[I].Flags.isSRet()) {
3503      unsigned Reg = FuncInfo->getSRetReturnReg();
3504      if (!Reg) {
3505        MVT PtrTy = getPointerTy(DAG.getDataLayout());
3506        Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
3507        FuncInfo->setSRetReturnReg(Reg);
3508      }
3509      SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[I]);
3510      Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
3511      break;
3512    }
3513  }
3514
3515  unsigned StackSize = CCInfo.getNextStackOffset();
3516  // Align stack specially for tail calls.
3517  if (shouldGuaranteeTCO(CallConv,
3518                         MF.getTarget().Options.GuaranteedTailCallOpt))
3519    StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
3520
3521  // If the function takes variable number of arguments, make a frame index for
3522  // the start of the first vararg value... for expansion of llvm.va_start. We
3523  // can skip this if there are no va_start calls.
3524  if (MFI.hasVAStart() &&
3525      (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
3526                   CallConv != CallingConv::X86_ThisCall))) {
3527    FuncInfo->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true));
3528  }
3529
3530  // Figure out if XMM registers are in use.
3531  assert(!(Subtarget.useSoftFloat() &&
3532           F.hasFnAttribute(Attribute::NoImplicitFloat)) &&
3533         "SSE register cannot be used when SSE is disabled!");
3534
3535  // 64-bit calling conventions support varargs and register parameters, so we
3536  // have to do extra work to spill them in the prologue.
3537  if (Is64Bit && isVarArg && MFI.hasVAStart()) {
3538    // Find the first unallocated argument registers.
3539    ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
3540    ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
3541    unsigned NumIntRegs = CCInfo.getFirstUnallocated(ArgGPRs);
3542    unsigned NumXMMRegs = CCInfo.getFirstUnallocated(ArgXMMs);
3543    assert(!(NumXMMRegs && !Subtarget.hasSSE1()) &&
3544           "SSE register cannot be used when SSE is disabled!");
3545
3546    // Gather all the live in physical registers.
3547    SmallVector<SDValue, 6> LiveGPRs;
3548    SmallVector<SDValue, 8> LiveXMMRegs;
3549    SDValue ALVal;
3550    for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
3551      unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
3552      LiveGPRs.push_back(
3553          DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64));
3554    }
3555    if (!ArgXMMs.empty()) {
3556      unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
3557      ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8);
3558      for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
3559        unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
3560        LiveXMMRegs.push_back(
3561            DAG.getCopyFromReg(Chain, dl, XMMReg, MVT::v4f32));
3562      }
3563    }
3564
3565    if (IsWin64) {
3566      // Get to the caller-allocated home save location.  Add 8 to account
3567      // for the return address.
3568      int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
3569      FuncInfo->setRegSaveFrameIndex(
3570          MFI.CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
3571      // Fixup to set vararg frame on shadow area (4 x i64).
3572      if (NumIntRegs < 4)
3573        FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
3574    } else {
3575      // For X86-64, if there are vararg parameters that are passed via
3576      // registers, then we must store them to their spots on the stack so
3577      // they may be loaded by dereferencing the result of va_next.
3578      FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
3579      FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
3580      FuncInfo->setRegSaveFrameIndex(MFI.CreateStackObject(
3581          ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
3582    }
3583
3584    // Store the integer parameter registers.
3585    SmallVector<SDValue, 8> MemOps;
3586    SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
3587                                      getPointerTy(DAG.getDataLayout()));
3588    unsigned Offset = FuncInfo->getVarArgsGPOffset();
3589    for (SDValue Val : LiveGPRs) {
3590      SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
3591                                RSFIN, DAG.getIntPtrConstant(Offset, dl));
3592      SDValue Store =
3593          DAG.getStore(Val.getValue(1), dl, Val, FIN,
3594                       MachinePointerInfo::getFixedStack(
3595                           DAG.getMachineFunction(),
3596                           FuncInfo->getRegSaveFrameIndex(), Offset));
3597      MemOps.push_back(Store);
3598      Offset += 8;
3599    }
3600
3601    if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
3602      // Now store the XMM (fp + vector) parameter registers.
3603      SmallVector<SDValue, 12> SaveXMMOps;
3604      SaveXMMOps.push_back(Chain);
3605      SaveXMMOps.push_back(ALVal);
3606      SaveXMMOps.push_back(DAG.getIntPtrConstant(
3607                             FuncInfo->getRegSaveFrameIndex(), dl));
3608      SaveXMMOps.push_back(DAG.getIntPtrConstant(
3609                             FuncInfo->getVarArgsFPOffset(), dl));
3610      SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
3611                        LiveXMMRegs.end());
3612      MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
3613                                   MVT::Other, SaveXMMOps));
3614    }
3615
3616    if (!MemOps.empty())
3617      Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3618  }
3619
3620  if (isVarArg && MFI.hasMustTailInVarArgFunc()) {
3621    // Find the largest legal vector type.
3622    MVT VecVT = MVT::Other;
3623    // FIXME: Only some x86_32 calling conventions support AVX512.
3624    if (Subtarget.useAVX512Regs() &&
3625        (Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
3626                     CallConv == CallingConv::Intel_OCL_BI)))
3627      VecVT = MVT::v16f32;
3628    else if (Subtarget.hasAVX())
3629      VecVT = MVT::v8f32;
3630    else if (Subtarget.hasSSE2())
3631      VecVT = MVT::v4f32;
3632
3633    // We forward some GPRs and some vector types.
3634    SmallVector<MVT, 2> RegParmTypes;
3635    MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32;
3636    RegParmTypes.push_back(IntVT);
3637    if (VecVT != MVT::Other)
3638      RegParmTypes.push_back(VecVT);
3639
3640    // Compute the set of forwarded registers. The rest are scratch.
3641    SmallVectorImpl<ForwardedRegister> &Forwards =
3642        FuncInfo->getForwardedMustTailRegParms();
3643    CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
3644
3645    // Forward AL for SysV x86_64 targets, since it is used for varargs.
3646    if (Is64Bit && !IsWin64 && !CCInfo.isAllocated(X86::AL)) {
3647      unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
3648      Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
3649    }
3650
3651    // Copy all forwards from physical to virtual registers.
3652    for (ForwardedRegister &FR : Forwards) {
3653      // FIXME: Can we use a less constrained schedule?
3654      SDValue RegVal = DAG.getCopyFromReg(Chain, dl, FR.VReg, FR.VT);
3655      FR.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(FR.VT));
3656      Chain = DAG.getCopyToReg(Chain, dl, FR.VReg, RegVal);
3657    }
3658  }
3659
3660  // Some CCs need callee pop.
3661  if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
3662                       MF.getTarget().Options.GuaranteedTailCallOpt)) {
3663    FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
3664  } else if (CallConv == CallingConv::X86_INTR && Ins.size() == 2) {
3665    // X86 interrupts must pop the error code (and the alignment padding) if
3666    // present.
3667    FuncInfo->setBytesToPopOnReturn(Is64Bit ? 16 : 4);
3668  } else {
3669    FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
3670    // If this is an sret function, the return should pop the hidden pointer.
3671    if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
3672        !Subtarget.getTargetTriple().isOSMSVCRT() &&
3673        argsAreStructReturn(Ins, Subtarget.isTargetMCU()) == StackStructReturn)
3674      FuncInfo->setBytesToPopOnReturn(4);
3675  }
3676
3677  if (!Is64Bit) {
3678    // RegSaveFrameIndex is X86-64 only.
3679    FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
3680    if (CallConv == CallingConv::X86_FastCall ||
3681        CallConv == CallingConv::X86_ThisCall)
3682      // fastcc functions can't have varargs.
3683      FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
3684  }
3685
3686  FuncInfo->setArgumentStackSize(StackSize);
3687
3688  if (WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo()) {
3689    EHPersonality Personality = classifyEHPersonality(F.getPersonalityFn());
3690    if (Personality == EHPersonality::CoreCLR) {
3691      assert(Is64Bit);
3692      // TODO: Add a mechanism to frame lowering that will allow us to indicate
3693      // that we'd prefer this slot be allocated towards the bottom of the frame
3694      // (i.e. near the stack pointer after allocating the frame).  Every
3695      // funclet needs a copy of this slot in its (mostly empty) frame, and the
3696      // offset from the bottom of this and each funclet's frame must be the
3697      // same, so the size of funclets' (mostly empty) frames is dictated by
3698      // how far this slot is from the bottom (since they allocate just enough
3699      // space to accommodate holding this slot at the correct offset).
3700      int PSPSymFI = MFI.CreateStackObject(8, 8, /*isSS=*/false);
3701      EHInfo->PSPSymFrameIdx = PSPSymFI;
3702    }
3703  }
3704
3705  if (CallConv == CallingConv::X86_RegCall ||
3706      F.hasFnAttribute("no_caller_saved_registers")) {
3707    MachineRegisterInfo &MRI = MF.getRegInfo();
3708    for (std::pair<unsigned, unsigned> Pair : MRI.liveins())
3709      MRI.disableCalleeSavedRegister(Pair.first);
3710  }
3711
3712  return Chain;
3713}
3714
3715SDValue X86TargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
3716                                            SDValue Arg, const SDLoc &dl,
3717                                            SelectionDAG &DAG,
3718                                            const CCValAssign &VA,
3719                                            ISD::ArgFlagsTy Flags) const {
3720  unsigned LocMemOffset = VA.getLocMemOffset();
3721  SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
3722  PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
3723                       StackPtr, PtrOff);
3724  if (Flags.isByVal())
3725    return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
3726
3727  return DAG.getStore(
3728      Chain, dl, Arg, PtrOff,
3729      MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset));
3730}
3731
3732/// Emit a load of return address if tail call
3733/// optimization is performed and it is required.
3734SDValue X86TargetLowering::EmitTailCallLoadRetAddr(
3735    SelectionDAG &DAG, SDValue &OutRetAddr, SDValue Chain, bool IsTailCall,
3736    bool Is64Bit, int FPDiff, const SDLoc &dl) const {
3737  // Adjust the Return address stack slot.
3738  EVT VT = getPointerTy(DAG.getDataLayout());
3739  OutRetAddr = getReturnAddressFrameIndex(DAG);
3740
3741  // Load the "old" Return address.
3742  OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo());
3743  return SDValue(OutRetAddr.getNode(), 1);
3744}
3745
3746/// Emit a store of the return address if tail call
3747/// optimization is performed and it is required (FPDiff!=0).
3748static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
3749                                        SDValue Chain, SDValue RetAddrFrIdx,
3750                                        EVT PtrVT, unsigned SlotSize,
3751                                        int FPDiff, const SDLoc &dl) {
3752  // Store the return address to the appropriate stack slot.
3753  if (!FPDiff) return Chain;
3754  // Calculate the new stack slot for the return address.
3755  int NewReturnAddrFI =
3756    MF.getFrameInfo().CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
3757                                         false);
3758  SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
3759  Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
3760                       MachinePointerInfo::getFixedStack(
3761                           DAG.getMachineFunction(), NewReturnAddrFI));
3762  return Chain;
3763}
3764
3765/// Returns a vector_shuffle mask for an movs{s|d}, movd
3766/// operation of specified width.
3767static SDValue getMOVL(SelectionDAG &DAG, const SDLoc &dl, MVT VT, SDValue V1,
3768                       SDValue V2) {
3769  unsigned NumElems = VT.getVectorNumElements();
3770  SmallVector<int, 8> Mask;
3771  Mask.push_back(NumElems);
3772  for (unsigned i = 1; i != NumElems; ++i)
3773    Mask.push_back(i);
3774  return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
3775}
3776
3777SDValue
3778X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
3779                             SmallVectorImpl<SDValue> &InVals) const {
3780  SelectionDAG &DAG                     = CLI.DAG;
3781  SDLoc &dl                             = CLI.DL;
3782  SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
3783  SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
3784  SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
3785  SDValue Chain                         = CLI.Chain;
3786  SDValue Callee                        = CLI.Callee;
3787  CallingConv::ID CallConv              = CLI.CallConv;
3788  bool &isTailCall                      = CLI.IsTailCall;
3789  bool isVarArg                         = CLI.IsVarArg;
3790
3791  MachineFunction &MF = DAG.getMachineFunction();
3792  bool Is64Bit        = Subtarget.is64Bit();
3793  bool IsWin64        = Subtarget.isCallingConvWin64(CallConv);
3794  StructReturnType SR = callIsStructReturn(Outs, Subtarget.isTargetMCU());
3795  bool IsSibcall      = false;
3796  bool IsGuaranteeTCO = MF.getTarget().Options.GuaranteedTailCallOpt ||
3797      CallConv == CallingConv::Tail;
3798  X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
3799  const auto *CI = dyn_cast_or_null<CallInst>(CLI.CS.getInstruction());
3800  const Function *Fn = CI ? CI->getCalledFunction() : nullptr;
3801  bool HasNCSR = (CI && CI->hasFnAttr("no_caller_saved_registers")) ||
3802                 (Fn && Fn->hasFnAttribute("no_caller_saved_registers"));
3803  const auto *II = dyn_cast_or_null<InvokeInst>(CLI.CS.getInstruction());
3804  bool HasNoCfCheck =
3805      (CI && CI->doesNoCfCheck()) || (II && II->doesNoCfCheck());
3806  const Module *M = MF.getMMI().getModule();
3807  Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
3808
3809  MachineFunction::CallSiteInfo CSInfo;
3810
3811  if (CallConv == CallingConv::X86_INTR)
3812    report_fatal_error("X86 interrupts may not be called directly");
3813
3814  if (Subtarget.isPICStyleGOT() && !IsGuaranteeTCO) {
3815    // If we are using a GOT, disable tail calls to external symbols with
3816    // default visibility. Tail calling such a symbol requires using a GOT
3817    // relocation, which forces early binding of the symbol. This breaks code
3818    // that require lazy function symbol resolution. Using musttail or
3819    // GuaranteedTailCallOpt will override this.
3820    GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
3821    if (!G || (!G->getGlobal()->hasLocalLinkage() &&
3822               G->getGlobal()->hasDefaultVisibility()))
3823      isTailCall = false;
3824  }
3825
3826  bool IsMustTail = CLI.CS && CLI.CS.isMustTailCall();
3827  if (IsMustTail) {
3828    // Force this to be a tail call.  The verifier rules are enough to ensure
3829    // that we can lower this successfully without moving the return address
3830    // around.
3831    isTailCall = true;
3832  } else if (isTailCall) {
3833    // Check if it's really possible to do a tail call.
3834    isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
3835                    isVarArg, SR != NotStructReturn,
3836                    MF.getFunction().hasStructRetAttr(), CLI.RetTy,
3837                    Outs, OutVals, Ins, DAG);
3838
3839    // Sibcalls are automatically detected tailcalls which do not require
3840    // ABI changes.
3841    if (!IsGuaranteeTCO && isTailCall)
3842      IsSibcall = true;
3843
3844    if (isTailCall)
3845      ++NumTailCalls;
3846  }
3847
3848  assert(!(isVarArg && canGuaranteeTCO(CallConv)) &&
3849         "Var args not supported with calling convention fastcc, ghc or hipe");
3850
3851  // Analyze operands of the call, assigning locations to each operand.
3852  SmallVector<CCValAssign, 16> ArgLocs;
3853  CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
3854
3855  // Allocate shadow area for Win64.
3856  if (IsWin64)
3857    CCInfo.AllocateStack(32, 8);
3858
3859  CCInfo.AnalyzeArguments(Outs, CC_X86);
3860
3861  // In vectorcall calling convention a second pass is required for the HVA
3862  // types.
3863  if (CallingConv::X86_VectorCall == CallConv) {
3864    CCInfo.AnalyzeArgumentsSecondPass(Outs, CC_X86);
3865  }
3866
3867  // Get a count of how many bytes are to be pushed on the stack.
3868  unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
3869  if (IsSibcall)
3870    // This is a sibcall. The memory operands are available in caller's
3871    // own caller's stack.
3872    NumBytes = 0;
3873  else if (IsGuaranteeTCO && canGuaranteeTCO(CallConv))
3874    NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
3875
3876  int FPDiff = 0;
3877  if (isTailCall && !IsSibcall && !IsMustTail) {
3878    // Lower arguments at fp - stackoffset + fpdiff.
3879    unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
3880
3881    FPDiff = NumBytesCallerPushed - NumBytes;
3882
3883    // Set the delta of movement of the returnaddr stackslot.
3884    // But only set if delta is greater than previous delta.
3885    if (FPDiff < X86Info->getTCReturnAddrDelta())
3886      X86Info->setTCReturnAddrDelta(FPDiff);
3887  }
3888
3889  unsigned NumBytesToPush = NumBytes;
3890  unsigned NumBytesToPop = NumBytes;
3891
3892  // If we have an inalloca argument, all stack space has already been allocated
3893  // for us and be right at the top of the stack.  We don't support multiple
3894  // arguments passed in memory when using inalloca.
3895  if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
3896    NumBytesToPush = 0;
3897    if (!ArgLocs.back().isMemLoc())
3898      report_fatal_error("cannot use inalloca attribute on a register "
3899                         "parameter");
3900    if (ArgLocs.back().getLocMemOffset() != 0)
3901      report_fatal_error("any parameter with the inalloca attribute must be "
3902                         "the only memory argument");
3903  }
3904
3905  if (!IsSibcall && !IsMustTail)
3906    Chain = DAG.getCALLSEQ_START(Chain, NumBytesToPush,
3907                                 NumBytes - NumBytesToPush, dl);
3908
3909  SDValue RetAddrFrIdx;
3910  // Load return address for tail calls.
3911  if (isTailCall && FPDiff)
3912    Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
3913                                    Is64Bit, FPDiff, dl);
3914
3915  SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
3916  SmallVector<SDValue, 8> MemOpChains;
3917  SDValue StackPtr;
3918
3919  // The next loop assumes that the locations are in the same order of the
3920  // input arguments.
3921  assert(isSortedByValueNo(ArgLocs) &&
3922         "Argument Location list must be sorted before lowering");
3923
3924  // Walk the register/memloc assignments, inserting copies/loads.  In the case
3925  // of tail call optimization arguments are handle later.
3926  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
3927  for (unsigned I = 0, OutIndex = 0, E = ArgLocs.size(); I != E;
3928       ++I, ++OutIndex) {
3929    assert(OutIndex < Outs.size() && "Invalid Out index");
3930    // Skip inalloca arguments, they have already been written.
3931    ISD::ArgFlagsTy Flags = Outs[OutIndex].Flags;
3932    if (Flags.isInAlloca())
3933      continue;
3934
3935    CCValAssign &VA = ArgLocs[I];
3936    EVT RegVT = VA.getLocVT();
3937    SDValue Arg = OutVals[OutIndex];
3938    bool isByVal = Flags.isByVal();
3939
3940    // Promote the value if needed.
3941    switch (VA.getLocInfo()) {
3942    default: llvm_unreachable("Unknown loc info!");
3943    case CCValAssign::Full: break;
3944    case CCValAssign::SExt:
3945      Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
3946      break;
3947    case CCValAssign::ZExt:
3948      Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
3949      break;
3950    case CCValAssign::AExt:
3951      if (Arg.getValueType().isVector() &&
3952          Arg.getValueType().getVectorElementType() == MVT::i1)
3953        Arg = lowerMasksToReg(Arg, RegVT, dl, DAG);
3954      else if (RegVT.is128BitVector()) {
3955        // Special case: passing MMX values in XMM registers.
3956        Arg = DAG.getBitcast(MVT::i64, Arg);
3957        Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
3958        Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
3959      } else
3960        Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
3961      break;
3962    case CCValAssign::BCvt:
3963      Arg = DAG.getBitcast(RegVT, Arg);
3964      break;
3965    case CCValAssign::Indirect: {
3966      if (isByVal) {
3967        // Memcpy the argument to a temporary stack slot to prevent
3968        // the caller from seeing any modifications the callee may make
3969        // as guaranteed by the `byval` attribute.
3970        int FrameIdx = MF.getFrameInfo().CreateStackObject(
3971            Flags.getByValSize(), std::max(16, (int)Flags.getByValAlign()),
3972            false);
3973        SDValue StackSlot =
3974            DAG.getFrameIndex(FrameIdx, getPointerTy(DAG.getDataLayout()));
3975        Chain =
3976            CreateCopyOfByValArgument(Arg, StackSlot, Chain, Flags, DAG, dl);
3977        // From now on treat this as a regular pointer
3978        Arg = StackSlot;
3979        isByVal = false;
3980      } else {
3981        // Store the argument.
3982        SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
3983        int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
3984        Chain = DAG.getStore(
3985            Chain, dl, Arg, SpillSlot,
3986            MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3987        Arg = SpillSlot;
3988      }
3989      break;
3990    }
3991    }
3992
3993    if (VA.needsCustom()) {
3994      assert(VA.getValVT() == MVT::v64i1 &&
3995             "Currently the only custom case is when we split v64i1 to 2 regs");
3996      // Split v64i1 value into two registers
3997      Passv64i1ArgInRegs(dl, DAG, Arg, RegsToPass, VA, ArgLocs[++I], Subtarget);
3998    } else if (VA.isRegLoc()) {
3999      RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
4000      const TargetOptions &Options = DAG.getTarget().Options;
4001      if (Options.EnableDebugEntryValues)
4002        CSInfo.emplace_back(VA.getLocReg(), I);
4003      if (isVarArg && IsWin64) {
4004        // Win64 ABI requires argument XMM reg to be copied to the corresponding
4005        // shadow reg if callee is a varargs function.
4006        unsigned ShadowReg = 0;
4007        switch (VA.getLocReg()) {
4008        case X86::XMM0: ShadowReg = X86::RCX; break;
4009        case X86::XMM1: ShadowReg = X86::RDX; break;
4010        case X86::XMM2: ShadowReg = X86::R8; break;
4011        case X86::XMM3: ShadowReg = X86::R9; break;
4012        }
4013        if (ShadowReg)
4014          RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
4015      }
4016    } else if (!IsSibcall && (!isTailCall || isByVal)) {
4017      assert(VA.isMemLoc());
4018      if (!StackPtr.getNode())
4019        StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
4020                                      getPointerTy(DAG.getDataLayout()));
4021      MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
4022                                             dl, DAG, VA, Flags));
4023    }
4024  }
4025
4026  if (!MemOpChains.empty())
4027    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
4028
4029  if (Subtarget.isPICStyleGOT()) {
4030    // ELF / PIC requires GOT in the EBX register before function calls via PLT
4031    // GOT pointer.
4032    if (!isTailCall) {
4033      RegsToPass.push_back(std::make_pair(
4034          unsigned(X86::EBX), DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
4035                                          getPointerTy(DAG.getDataLayout()))));
4036    } else {
4037      // If we are tail calling and generating PIC/GOT style code load the
4038      // address of the callee into ECX. The value in ecx is used as target of
4039      // the tail jump. This is done to circumvent the ebx/callee-saved problem
4040      // for tail calls on PIC/GOT architectures. Normally we would just put the
4041      // address of GOT into ebx and then call target@PLT. But for tail calls
4042      // ebx would be restored (since ebx is callee saved) before jumping to the
4043      // target@PLT.
4044
4045      // Note: The actual moving to ECX is done further down.
4046      GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4047      if (G && !G->getGlobal()->hasLocalLinkage() &&
4048          G->getGlobal()->hasDefaultVisibility())
4049        Callee = LowerGlobalAddress(Callee, DAG);
4050      else if (isa<ExternalSymbolSDNode>(Callee))
4051        Callee = LowerExternalSymbol(Callee, DAG);
4052    }
4053  }
4054
4055  if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
4056    // From AMD64 ABI document:
4057    // For calls that may call functions that use varargs or stdargs
4058    // (prototype-less calls or calls to functions containing ellipsis (...) in
4059    // the declaration) %al is used as hidden argument to specify the number
4060    // of SSE registers used. The contents of %al do not need to match exactly
4061    // the number of registers, but must be an ubound on the number of SSE
4062    // registers used and is in the range 0 - 8 inclusive.
4063
4064    // Count the number of XMM registers allocated.
4065    static const MCPhysReg XMMArgRegs[] = {
4066      X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
4067      X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
4068    };
4069    unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
4070    assert((Subtarget.hasSSE1() || !NumXMMRegs)
4071           && "SSE registers cannot be used when SSE is disabled");
4072
4073    RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
4074                                        DAG.getConstant(NumXMMRegs, dl,
4075                                                        MVT::i8)));
4076  }
4077
4078  if (isVarArg && IsMustTail) {
4079    const auto &Forwards = X86Info->getForwardedMustTailRegParms();
4080    for (const auto &F : Forwards) {
4081      SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
4082      RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
4083    }
4084  }
4085
4086  // For tail calls lower the arguments to the 'real' stack slots.  Sibcalls
4087  // don't need this because the eligibility check rejects calls that require
4088  // shuffling arguments passed in memory.
4089  if (!IsSibcall && isTailCall) {
4090    // Force all the incoming stack arguments to be loaded from the stack
4091    // before any new outgoing arguments are stored to the stack, because the
4092    // outgoing stack slots may alias the incoming argument stack slots, and
4093    // the alias isn't otherwise explicit. This is slightly more conservative
4094    // than necessary, because it means that each store effectively depends
4095    // on every argument instead of just those arguments it would clobber.
4096    SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
4097
4098    SmallVector<SDValue, 8> MemOpChains2;
4099    SDValue FIN;
4100    int FI = 0;
4101    for (unsigned I = 0, OutsIndex = 0, E = ArgLocs.size(); I != E;
4102         ++I, ++OutsIndex) {
4103      CCValAssign &VA = ArgLocs[I];
4104
4105      if (VA.isRegLoc()) {
4106        if (VA.needsCustom()) {
4107          assert((CallConv == CallingConv::X86_RegCall) &&
4108                 "Expecting custom case only in regcall calling convention");
4109          // This means that we are in special case where one argument was
4110          // passed through two register locations - Skip the next location
4111          ++I;
4112        }
4113
4114        continue;
4115      }
4116
4117      assert(VA.isMemLoc());
4118      SDValue Arg = OutVals[OutsIndex];
4119      ISD::ArgFlagsTy Flags = Outs[OutsIndex].Flags;
4120      // Skip inalloca arguments.  They don't require any work.
4121      if (Flags.isInAlloca())
4122        continue;
4123      // Create frame index.
4124      int32_t Offset = VA.getLocMemOffset()+FPDiff;
4125      uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
4126      FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
4127      FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
4128
4129      if (Flags.isByVal()) {
4130        // Copy relative to framepointer.
4131        SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset(), dl);
4132        if (!StackPtr.getNode())
4133          StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
4134                                        getPointerTy(DAG.getDataLayout()));
4135        Source = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
4136                             StackPtr, Source);
4137
4138        MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
4139                                                         ArgChain,
4140                                                         Flags, DAG, dl));
4141      } else {
4142        // Store relative to framepointer.
4143        MemOpChains2.push_back(DAG.getStore(
4144            ArgChain, dl, Arg, FIN,
4145            MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
4146      }
4147    }
4148
4149    if (!MemOpChains2.empty())
4150      Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
4151
4152    // Store the return address to the appropriate stack slot.
4153    Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
4154                                     getPointerTy(DAG.getDataLayout()),
4155                                     RegInfo->getSlotSize(), FPDiff, dl);
4156  }
4157
4158  // Build a sequence of copy-to-reg nodes chained together with token chain
4159  // and flag operands which copy the outgoing args into registers.
4160  SDValue InFlag;
4161  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
4162    Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
4163                             RegsToPass[i].second, InFlag);
4164    InFlag = Chain.getValue(1);
4165  }
4166
4167  if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
4168    assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
4169    // In the 64-bit large code model, we have to make all calls
4170    // through a register, since the call instruction's 32-bit
4171    // pc-relative offset may not be large enough to hold the whole
4172    // address.
4173  } else if (Callee->getOpcode() == ISD::GlobalAddress ||
4174             Callee->getOpcode() == ISD::ExternalSymbol) {
4175    // Lower direct calls to global addresses and external symbols. Setting
4176    // ForCall to true here has the effect of removing WrapperRIP when possible
4177    // to allow direct calls to be selected without first materializing the
4178    // address into a register.
4179    Callee = LowerGlobalOrExternal(Callee, DAG, /*ForCall=*/true);
4180  } else if (Subtarget.isTarget64BitILP32() &&
4181             Callee->getValueType(0) == MVT::i32) {
4182    // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
4183    Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
4184  }
4185
4186  // Returns a chain & a flag for retval copy to use.
4187  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
4188  SmallVector<SDValue, 8> Ops;
4189
4190  if (!IsSibcall && isTailCall && !IsMustTail) {
4191    Chain = DAG.getCALLSEQ_END(Chain,
4192                               DAG.getIntPtrConstant(NumBytesToPop, dl, true),
4193                               DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
4194    InFlag = Chain.getValue(1);
4195  }
4196
4197  Ops.push_back(Chain);
4198  Ops.push_back(Callee);
4199
4200  if (isTailCall)
4201    Ops.push_back(DAG.getConstant(FPDiff, dl, MVT::i32));
4202
4203  // Add argument registers to the end of the list so that they are known live
4204  // into the call.
4205  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
4206    Ops.push_back(DAG.getRegister(RegsToPass[i].first,
4207                                  RegsToPass[i].second.getValueType()));
4208
4209  // Add a register mask operand representing the call-preserved registers.
4210  // If HasNCSR is asserted (attribute NoCallerSavedRegisters exists) then we
4211  // set X86_INTR calling convention because it has the same CSR mask
4212  // (same preserved registers).
4213  const uint32_t *Mask = RegInfo->getCallPreservedMask(
4214      MF, HasNCSR ? (CallingConv::ID)CallingConv::X86_INTR : CallConv);
4215  assert(Mask && "Missing call preserved mask for calling convention");
4216
4217  // If this is an invoke in a 32-bit function using a funclet-based
4218  // personality, assume the function clobbers all registers. If an exception
4219  // is thrown, the runtime will not restore CSRs.
4220  // FIXME: Model this more precisely so that we can register allocate across
4221  // the normal edge and spill and fill across the exceptional edge.
4222  if (!Is64Bit && CLI.CS && CLI.CS.isInvoke()) {
4223    const Function &CallerFn = MF.getFunction();
4224    EHPersonality Pers =
4225        CallerFn.hasPersonalityFn()
4226            ? classifyEHPersonality(CallerFn.getPersonalityFn())
4227            : EHPersonality::Unknown;
4228    if (isFuncletEHPersonality(Pers))
4229      Mask = RegInfo->getNoPreservedMask();
4230  }
4231
4232  // Define a new register mask from the existing mask.
4233  uint32_t *RegMask = nullptr;
4234
4235  // In some calling conventions we need to remove the used physical registers
4236  // from the reg mask.
4237  if (CallConv == CallingConv::X86_RegCall || HasNCSR) {
4238    const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
4239
4240    // Allocate a new Reg Mask and copy Mask.
4241    RegMask = MF.allocateRegMask();
4242    unsigned RegMaskSize = MachineOperand::getRegMaskSize(TRI->getNumRegs());
4243    memcpy(RegMask, Mask, sizeof(RegMask[0]) * RegMaskSize);
4244
4245    // Make sure all sub registers of the argument registers are reset
4246    // in the RegMask.
4247    for (auto const &RegPair : RegsToPass)
4248      for (MCSubRegIterator SubRegs(RegPair.first, TRI, /*IncludeSelf=*/true);
4249           SubRegs.isValid(); ++SubRegs)
4250        RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32));
4251
4252    // Create the RegMask Operand according to our updated mask.
4253    Ops.push_back(DAG.getRegisterMask(RegMask));
4254  } else {
4255    // Create the RegMask Operand according to the static mask.
4256    Ops.push_back(DAG.getRegisterMask(Mask));
4257  }
4258
4259  if (InFlag.getNode())
4260    Ops.push_back(InFlag);
4261
4262  if (isTailCall) {
4263    // We used to do:
4264    //// If this is the first return lowered for this function, add the regs
4265    //// to the liveout set for the function.
4266    // This isn't right, although it's probably harmless on x86; liveouts
4267    // should be computed from returns not tail calls.  Consider a void
4268    // function making a tail call to a function returning int.
4269    MF.getFrameInfo().setHasTailCall();
4270    SDValue Ret = DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
4271    DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
4272    return Ret;
4273  }
4274
4275  if (HasNoCfCheck && IsCFProtectionSupported) {
4276    Chain = DAG.getNode(X86ISD::NT_CALL, dl, NodeTys, Ops);
4277  } else {
4278    Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
4279  }
4280  InFlag = Chain.getValue(1);
4281  DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
4282
4283  // Save heapallocsite metadata.
4284  if (CLI.CS)
4285    if (MDNode *HeapAlloc = CLI.CS->getMetadata("heapallocsite"))
4286      DAG.addHeapAllocSite(Chain.getNode(), HeapAlloc);
4287
4288  // Create the CALLSEQ_END node.
4289  unsigned NumBytesForCalleeToPop;
4290  if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
4291                       DAG.getTarget().Options.GuaranteedTailCallOpt))
4292    NumBytesForCalleeToPop = NumBytes;    // Callee pops everything
4293  else if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
4294           !Subtarget.getTargetTriple().isOSMSVCRT() &&
4295           SR == StackStructReturn)
4296    // If this is a call to a struct-return function, the callee
4297    // pops the hidden struct pointer, so we have to push it back.
4298    // This is common for Darwin/X86, Linux & Mingw32 targets.
4299    // For MSVC Win32 targets, the caller pops the hidden struct pointer.
4300    NumBytesForCalleeToPop = 4;
4301  else
4302    NumBytesForCalleeToPop = 0;  // Callee pops nothing.
4303
4304  if (CLI.DoesNotReturn && !getTargetMachine().Options.TrapUnreachable) {
4305    // No need to reset the stack after the call if the call doesn't return. To
4306    // make the MI verify, we'll pretend the callee does it for us.
4307    NumBytesForCalleeToPop = NumBytes;
4308  }
4309
4310  // Returns a flag for retval copy to use.
4311  if (!IsSibcall) {
4312    Chain = DAG.getCALLSEQ_END(Chain,
4313                               DAG.getIntPtrConstant(NumBytesToPop, dl, true),
4314                               DAG.getIntPtrConstant(NumBytesForCalleeToPop, dl,
4315                                                     true),
4316                               InFlag, dl);
4317    InFlag = Chain.getValue(1);
4318  }
4319
4320  // Handle result values, copying them out of physregs into vregs that we
4321  // return.
4322  return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
4323                         InVals, RegMask);
4324}
4325
4326//===----------------------------------------------------------------------===//
4327//                Fast Calling Convention (tail call) implementation
4328//===----------------------------------------------------------------------===//
4329
4330//  Like std call, callee cleans arguments, convention except that ECX is
4331//  reserved for storing the tail called function address. Only 2 registers are
4332//  free for argument passing (inreg). Tail call optimization is performed
4333//  provided:
4334//                * tailcallopt is enabled
4335//                * caller/callee are fastcc
4336//  On X86_64 architecture with GOT-style position independent code only local
4337//  (within module) calls are supported at the moment.
4338//  To keep the stack aligned according to platform abi the function
4339//  GetAlignedArgumentStackSize ensures that argument delta is always multiples
4340//  of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
4341//  If a tail called function callee has more arguments than the caller the
4342//  caller needs to make sure that there is room to move the RETADDR to. This is
4343//  achieved by reserving an area the size of the argument delta right after the
4344//  original RETADDR, but before the saved framepointer or the spilled registers
4345//  e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
4346//  stack layout:
4347//    arg1
4348//    arg2
4349//    RETADDR
4350//    [ new RETADDR
4351//      move area ]
4352//    (possible EBP)
4353//    ESI
4354//    EDI
4355//    local1 ..
4356
4357/// Make the stack size align e.g 16n + 12 aligned for a 16-byte align
4358/// requirement.
4359unsigned
4360X86TargetLowering::GetAlignedArgumentStackSize(const unsigned StackSize,
4361                                               SelectionDAG &DAG) const {
4362  const Align StackAlignment(Subtarget.getFrameLowering()->getStackAlignment());
4363  const uint64_t SlotSize = Subtarget.getRegisterInfo()->getSlotSize();
4364  assert(StackSize % SlotSize == 0 &&
4365         "StackSize must be a multiple of SlotSize");
4366  return alignTo(StackSize + SlotSize, StackAlignment) - SlotSize;
4367}
4368
4369/// Return true if the given stack call argument is already available in the
4370/// same position (relatively) of the caller's incoming argument stack.
4371static
4372bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
4373                         MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
4374                         const X86InstrInfo *TII, const CCValAssign &VA) {
4375  unsigned Bytes = Arg.getValueSizeInBits() / 8;
4376
4377  for (;;) {
4378    // Look through nodes that don't alter the bits of the incoming value.
4379    unsigned Op = Arg.getOpcode();
4380    if (Op == ISD::ZERO_EXTEND || Op == ISD::ANY_EXTEND || Op == ISD::BITCAST) {
4381      Arg = Arg.getOperand(0);
4382      continue;
4383    }
4384    if (Op == ISD::TRUNCATE) {
4385      const SDValue &TruncInput = Arg.getOperand(0);
4386      if (TruncInput.getOpcode() == ISD::AssertZext &&
4387          cast<VTSDNode>(TruncInput.getOperand(1))->getVT() ==
4388              Arg.getValueType()) {
4389        Arg = TruncInput.getOperand(0);
4390        continue;
4391      }
4392    }
4393    break;
4394  }
4395
4396  int FI = INT_MAX;
4397  if (Arg.getOpcode() == ISD::CopyFromReg) {
4398    unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
4399    if (!Register::isVirtualRegister(VR))
4400      return false;
4401    MachineInstr *Def = MRI->getVRegDef(VR);
4402    if (!Def)
4403      return false;
4404    if (!Flags.isByVal()) {
4405      if (!TII->isLoadFromStackSlot(*Def, FI))
4406        return false;
4407    } else {
4408      unsigned Opcode = Def->getOpcode();
4409      if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
4410           Opcode == X86::LEA64_32r) &&
4411          Def->getOperand(1).isFI()) {
4412        FI = Def->getOperand(1).getIndex();
4413        Bytes = Flags.getByValSize();
4414      } else
4415        return false;
4416    }
4417  } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
4418    if (Flags.isByVal())
4419      // ByVal argument is passed in as a pointer but it's now being
4420      // dereferenced. e.g.
4421      // define @foo(%struct.X* %A) {
4422      //   tail call @bar(%struct.X* byval %A)
4423      // }
4424      return false;
4425    SDValue Ptr = Ld->getBasePtr();
4426    FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
4427    if (!FINode)
4428      return false;
4429    FI = FINode->getIndex();
4430  } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
4431    FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
4432    FI = FINode->getIndex();
4433    Bytes = Flags.getByValSize();
4434  } else
4435    return false;
4436
4437  assert(FI != INT_MAX);
4438  if (!MFI.isFixedObjectIndex(FI))
4439    return false;
4440
4441  if (Offset != MFI.getObjectOffset(FI))
4442    return false;
4443
4444  // If this is not byval, check that the argument stack object is immutable.
4445  // inalloca and argument copy elision can create mutable argument stack
4446  // objects. Byval objects can be mutated, but a byval call intends to pass the
4447  // mutated memory.
4448  if (!Flags.isByVal() && !MFI.isImmutableObjectIndex(FI))
4449    return false;
4450
4451  if (VA.getLocVT().getSizeInBits() > Arg.getValueSizeInBits()) {
4452    // If the argument location is wider than the argument type, check that any
4453    // extension flags match.
4454    if (Flags.isZExt() != MFI.isObjectZExt(FI) ||
4455        Flags.isSExt() != MFI.isObjectSExt(FI)) {
4456      return false;
4457    }
4458  }
4459
4460  return Bytes == MFI.getObjectSize(FI);
4461}
4462
4463/// Check whether the call is eligible for tail call optimization. Targets
4464/// that want to do tail call optimization should implement this function.
4465bool X86TargetLowering::IsEligibleForTailCallOptimization(
4466    SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
4467    bool isCalleeStructRet, bool isCallerStructRet, Type *RetTy,
4468    const SmallVectorImpl<ISD::OutputArg> &Outs,
4469    const SmallVectorImpl<SDValue> &OutVals,
4470    const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
4471  if (!mayTailCallThisCC(CalleeCC))
4472    return false;
4473
4474  // If -tailcallopt is specified, make fastcc functions tail-callable.
4475  MachineFunction &MF = DAG.getMachineFunction();
4476  const Function &CallerF = MF.getFunction();
4477
4478  // If the function return type is x86_fp80 and the callee return type is not,
4479  // then the FP_EXTEND of the call result is not a nop. It's not safe to
4480  // perform a tailcall optimization here.
4481  if (CallerF.getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
4482    return false;
4483
4484  CallingConv::ID CallerCC = CallerF.getCallingConv();
4485  bool CCMatch = CallerCC == CalleeCC;
4486  bool IsCalleeWin64 = Subtarget.isCallingConvWin64(CalleeCC);
4487  bool IsCallerWin64 = Subtarget.isCallingConvWin64(CallerCC);
4488  bool IsGuaranteeTCO = DAG.getTarget().Options.GuaranteedTailCallOpt ||
4489      CalleeCC == CallingConv::Tail;
4490
4491  // Win64 functions have extra shadow space for argument homing. Don't do the
4492  // sibcall if the caller and callee have mismatched expectations for this
4493  // space.
4494  if (IsCalleeWin64 != IsCallerWin64)
4495    return false;
4496
4497  if (IsGuaranteeTCO) {
4498    if (canGuaranteeTCO(CalleeCC) && CCMatch)
4499      return true;
4500    return false;
4501  }
4502
4503  // Look for obvious safe cases to perform tail call optimization that do not
4504  // require ABI changes. This is what gcc calls sibcall.
4505
4506  // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
4507  // emit a special epilogue.
4508  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
4509  if (RegInfo->needsStackRealignment(MF))
4510    return false;
4511
4512  // Also avoid sibcall optimization if either caller or callee uses struct
4513  // return semantics.
4514  if (isCalleeStructRet || isCallerStructRet)
4515    return false;
4516
4517  // Do not sibcall optimize vararg calls unless all arguments are passed via
4518  // registers.
4519  LLVMContext &C = *DAG.getContext();
4520  if (isVarArg && !Outs.empty()) {
4521    // Optimizing for varargs on Win64 is unlikely to be safe without
4522    // additional testing.
4523    if (IsCalleeWin64 || IsCallerWin64)
4524      return false;
4525
4526    SmallVector<CCValAssign, 16> ArgLocs;
4527    CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
4528
4529    CCInfo.AnalyzeCallOperands(Outs, CC_X86);
4530    for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
4531      if (!ArgLocs[i].isRegLoc())
4532        return false;
4533  }
4534
4535  // If the call result is in ST0 / ST1, it needs to be popped off the x87
4536  // stack.  Therefore, if it's not used by the call it is not safe to optimize
4537  // this into a sibcall.
4538  bool Unused = false;
4539  for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
4540    if (!Ins[i].Used) {
4541      Unused = true;
4542      break;
4543    }
4544  }
4545  if (Unused) {
4546    SmallVector<CCValAssign, 16> RVLocs;
4547    CCState CCInfo(CalleeCC, false, MF, RVLocs, C);
4548    CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
4549    for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
4550      CCValAssign &VA = RVLocs[i];
4551      if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
4552        return false;
4553    }
4554  }
4555
4556  // Check that the call results are passed in the same way.
4557  if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
4558                                  RetCC_X86, RetCC_X86))
4559    return false;
4560  // The callee has to preserve all registers the caller needs to preserve.
4561  const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
4562  const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
4563  if (!CCMatch) {
4564    const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
4565    if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
4566      return false;
4567  }
4568
4569  unsigned StackArgsSize = 0;
4570
4571  // If the callee takes no arguments then go on to check the results of the
4572  // call.
4573  if (!Outs.empty()) {
4574    // Check if stack adjustment is needed. For now, do not do this if any
4575    // argument is passed on the stack.
4576    SmallVector<CCValAssign, 16> ArgLocs;
4577    CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
4578
4579    // Allocate shadow area for Win64
4580    if (IsCalleeWin64)
4581      CCInfo.AllocateStack(32, 8);
4582
4583    CCInfo.AnalyzeCallOperands(Outs, CC_X86);
4584    StackArgsSize = CCInfo.getNextStackOffset();
4585
4586    if (CCInfo.getNextStackOffset()) {
4587      // Check if the arguments are already laid out in the right way as
4588      // the caller's fixed stack objects.
4589      MachineFrameInfo &MFI = MF.getFrameInfo();
4590      const MachineRegisterInfo *MRI = &MF.getRegInfo();
4591      const X86InstrInfo *TII = Subtarget.getInstrInfo();
4592      for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
4593        CCValAssign &VA = ArgLocs[i];
4594        SDValue Arg = OutVals[i];
4595        ISD::ArgFlagsTy Flags = Outs[i].Flags;
4596        if (VA.getLocInfo() == CCValAssign::Indirect)
4597          return false;
4598        if (!VA.isRegLoc()) {
4599          if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
4600                                   MFI, MRI, TII, VA))
4601            return false;
4602        }
4603      }
4604    }
4605
4606    bool PositionIndependent = isPositionIndependent();
4607    // If the tailcall address may be in a register, then make sure it's
4608    // possible to register allocate for it. In 32-bit, the call address can
4609    // only target EAX, EDX, or ECX since the tail call must be scheduled after
4610    // callee-saved registers are restored. These happen to be the same
4611    // registers used to pass 'inreg' arguments so watch out for those.
4612    if (!Subtarget.is64Bit() && ((!isa<GlobalAddressSDNode>(Callee) &&
4613                                  !isa<ExternalSymbolSDNode>(Callee)) ||
4614                                 PositionIndependent)) {
4615      unsigned NumInRegs = 0;
4616      // In PIC we need an extra register to formulate the address computation
4617      // for the callee.
4618      unsigned MaxInRegs = PositionIndependent ? 2 : 3;
4619
4620      for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
4621        CCValAssign &VA = ArgLocs[i];
4622        if (!VA.isRegLoc())
4623          continue;
4624        Register Reg = VA.getLocReg();
4625        switch (Reg) {
4626        default: break;
4627        case X86::EAX: case X86::EDX: case X86::ECX:
4628          if (++NumInRegs == MaxInRegs)
4629            return false;
4630          break;
4631        }
4632      }
4633    }
4634
4635    const MachineRegisterInfo &MRI = MF.getRegInfo();
4636    if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
4637      return false;
4638  }
4639
4640  bool CalleeWillPop =
4641      X86::isCalleePop(CalleeCC, Subtarget.is64Bit(), isVarArg,
4642                       MF.getTarget().Options.GuaranteedTailCallOpt);
4643
4644  if (unsigned BytesToPop =
4645          MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) {
4646    // If we have bytes to pop, the callee must pop them.
4647    bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;
4648    if (!CalleePopMatches)
4649      return false;
4650  } else if (CalleeWillPop && StackArgsSize > 0) {
4651    // If we don't have bytes to pop, make sure the callee doesn't pop any.
4652    return false;
4653  }
4654
4655  return true;
4656}
4657
4658FastISel *
4659X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
4660                                  const TargetLibraryInfo *libInfo) const {
4661  return X86::createFastISel(funcInfo, libInfo);
4662}
4663
4664//===----------------------------------------------------------------------===//
4665//                           Other Lowering Hooks
4666//===----------------------------------------------------------------------===//
4667
4668static bool MayFoldLoad(SDValue Op) {
4669  return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
4670}
4671
4672static bool MayFoldIntoStore(SDValue Op) {
4673  return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
4674}
4675
4676static bool MayFoldIntoZeroExtend(SDValue Op) {
4677  if (Op.hasOneUse()) {
4678    unsigned Opcode = Op.getNode()->use_begin()->getOpcode();
4679    return (ISD::ZERO_EXTEND == Opcode);
4680  }
4681  return false;
4682}
4683
4684static bool isTargetShuffle(unsigned Opcode) {
4685  switch(Opcode) {
4686  default: return false;
4687  case X86ISD::BLENDI:
4688  case X86ISD::PSHUFB:
4689  case X86ISD::PSHUFD:
4690  case X86ISD::PSHUFHW:
4691  case X86ISD::PSHUFLW:
4692  case X86ISD::SHUFP:
4693  case X86ISD::INSERTPS:
4694  case X86ISD::EXTRQI:
4695  case X86ISD::INSERTQI:
4696  case X86ISD::PALIGNR:
4697  case X86ISD::VSHLDQ:
4698  case X86ISD::VSRLDQ:
4699  case X86ISD::MOVLHPS:
4700  case X86ISD::MOVHLPS:
4701  case X86ISD::MOVSHDUP:
4702  case X86ISD::MOVSLDUP:
4703  case X86ISD::MOVDDUP:
4704  case X86ISD::MOVSS:
4705  case X86ISD::MOVSD:
4706  case X86ISD::UNPCKL:
4707  case X86ISD::UNPCKH:
4708  case X86ISD::VBROADCAST:
4709  case X86ISD::VPERMILPI:
4710  case X86ISD::VPERMILPV:
4711  case X86ISD::VPERM2X128:
4712  case X86ISD::SHUF128:
4713  case X86ISD::VPERMIL2:
4714  case X86ISD::VPERMI:
4715  case X86ISD::VPPERM:
4716  case X86ISD::VPERMV:
4717  case X86ISD::VPERMV3:
4718  case X86ISD::VZEXT_MOVL:
4719    return true;
4720  }
4721}
4722
4723static bool isTargetShuffleVariableMask(unsigned Opcode) {
4724  switch (Opcode) {
4725  default: return false;
4726  // Target Shuffles.
4727  case X86ISD::PSHUFB:
4728  case X86ISD::VPERMILPV:
4729  case X86ISD::VPERMIL2:
4730  case X86ISD::VPPERM:
4731  case X86ISD::VPERMV:
4732  case X86ISD::VPERMV3:
4733    return true;
4734  // 'Faux' Target Shuffles.
4735  case ISD::OR:
4736  case ISD::AND:
4737  case X86ISD::ANDNP:
4738    return true;
4739  }
4740}
4741
4742SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
4743  MachineFunction &MF = DAG.getMachineFunction();
4744  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
4745  X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
4746  int ReturnAddrIndex = FuncInfo->getRAIndex();
4747
4748  if (ReturnAddrIndex == 0) {
4749    // Set up a frame object for the return address.
4750    unsigned SlotSize = RegInfo->getSlotSize();
4751    ReturnAddrIndex = MF.getFrameInfo().CreateFixedObject(SlotSize,
4752                                                          -(int64_t)SlotSize,
4753                                                          false);
4754    FuncInfo->setRAIndex(ReturnAddrIndex);
4755  }
4756
4757  return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy(DAG.getDataLayout()));
4758}
4759
4760bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
4761                                       bool hasSymbolicDisplacement) {
4762  // Offset should fit into 32 bit immediate field.
4763  if (!isInt<32>(Offset))
4764    return false;
4765
4766  // If we don't have a symbolic displacement - we don't have any extra
4767  // restrictions.
4768  if (!hasSymbolicDisplacement)
4769    return true;
4770
4771  // FIXME: Some tweaks might be needed for medium code model.
4772  if (M != CodeModel::Small && M != CodeModel::Kernel)
4773    return false;
4774
4775  // For small code model we assume that latest object is 16MB before end of 31
4776  // bits boundary. We may also accept pretty large negative constants knowing
4777  // that all objects are in the positive half of address space.
4778  if (M == CodeModel::Small && Offset < 16*1024*1024)
4779    return true;
4780
4781  // For kernel code model we know that all object resist in the negative half
4782  // of 32bits address space. We may not accept negative offsets, since they may
4783  // be just off and we may accept pretty large positive ones.
4784  if (M == CodeModel::Kernel && Offset >= 0)
4785    return true;
4786
4787  return false;
4788}
4789
4790/// Determines whether the callee is required to pop its own arguments.
4791/// Callee pop is necessary to support tail calls.
4792bool X86::isCalleePop(CallingConv::ID CallingConv,
4793                      bool is64Bit, bool IsVarArg, bool GuaranteeTCO) {
4794  // If GuaranteeTCO is true, we force some calls to be callee pop so that we
4795  // can guarantee TCO.
4796  if (!IsVarArg && shouldGuaranteeTCO(CallingConv, GuaranteeTCO))
4797    return true;
4798
4799  switch (CallingConv) {
4800  default:
4801    return false;
4802  case CallingConv::X86_StdCall:
4803  case CallingConv::X86_FastCall:
4804  case CallingConv::X86_ThisCall:
4805  case CallingConv::X86_VectorCall:
4806    return !is64Bit;
4807  }
4808}
4809
4810/// Return true if the condition is an signed comparison operation.
4811static bool isX86CCSigned(unsigned X86CC) {
4812  switch (X86CC) {
4813  default:
4814    llvm_unreachable("Invalid integer condition!");
4815  case X86::COND_E:
4816  case X86::COND_NE:
4817  case X86::COND_B:
4818  case X86::COND_A:
4819  case X86::COND_BE:
4820  case X86::COND_AE:
4821    return false;
4822  case X86::COND_G:
4823  case X86::COND_GE:
4824  case X86::COND_L:
4825  case X86::COND_LE:
4826    return true;
4827  }
4828}
4829
4830static X86::CondCode TranslateIntegerX86CC(ISD::CondCode SetCCOpcode) {
4831  switch (SetCCOpcode) {
4832  default: llvm_unreachable("Invalid integer condition!");
4833  case ISD::SETEQ:  return X86::COND_E;
4834  case ISD::SETGT:  return X86::COND_G;
4835  case ISD::SETGE:  return X86::COND_GE;
4836  case ISD::SETLT:  return X86::COND_L;
4837  case ISD::SETLE:  return X86::COND_LE;
4838  case ISD::SETNE:  return X86::COND_NE;
4839  case ISD::SETULT: return X86::COND_B;
4840  case ISD::SETUGT: return X86::COND_A;
4841  case ISD::SETULE: return X86::COND_BE;
4842  case ISD::SETUGE: return X86::COND_AE;
4843  }
4844}
4845
4846/// Do a one-to-one translation of a ISD::CondCode to the X86-specific
4847/// condition code, returning the condition code and the LHS/RHS of the
4848/// comparison to make.
4849static X86::CondCode TranslateX86CC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
4850                               bool isFP, SDValue &LHS, SDValue &RHS,
4851                               SelectionDAG &DAG) {
4852  if (!isFP) {
4853    if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
4854      if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
4855        // X > -1   -> X == 0, jump !sign.
4856        RHS = DAG.getConstant(0, DL, RHS.getValueType());
4857        return X86::COND_NS;
4858      }
4859      if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
4860        // X < 0   -> X == 0, jump on sign.
4861        return X86::COND_S;
4862      }
4863      if (SetCCOpcode == ISD::SETGE && RHSC->isNullValue()) {
4864        // X >= 0   -> X == 0, jump on !sign.
4865        return X86::COND_NS;
4866      }
4867      if (SetCCOpcode == ISD::SETLT && RHSC->isOne()) {
4868        // X < 1   -> X <= 0
4869        RHS = DAG.getConstant(0, DL, RHS.getValueType());
4870        return X86::COND_LE;
4871      }
4872    }
4873
4874    return TranslateIntegerX86CC(SetCCOpcode);
4875  }
4876
4877  // First determine if it is required or is profitable to flip the operands.
4878
4879  // If LHS is a foldable load, but RHS is not, flip the condition.
4880  if (ISD::isNON_EXTLoad(LHS.getNode()) &&
4881      !ISD::isNON_EXTLoad(RHS.getNode())) {
4882    SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
4883    std::swap(LHS, RHS);
4884  }
4885
4886  switch (SetCCOpcode) {
4887  default: break;
4888  case ISD::SETOLT:
4889  case ISD::SETOLE:
4890  case ISD::SETUGT:
4891  case ISD::SETUGE:
4892    std::swap(LHS, RHS);
4893    break;
4894  }
4895
4896  // On a floating point condition, the flags are set as follows:
4897  // ZF  PF  CF   op
4898  //  0 | 0 | 0 | X > Y
4899  //  0 | 0 | 1 | X < Y
4900  //  1 | 0 | 0 | X == Y
4901  //  1 | 1 | 1 | unordered
4902  switch (SetCCOpcode) {
4903  default: llvm_unreachable("Condcode should be pre-legalized away");
4904  case ISD::SETUEQ:
4905  case ISD::SETEQ:   return X86::COND_E;
4906  case ISD::SETOLT:              // flipped
4907  case ISD::SETOGT:
4908  case ISD::SETGT:   return X86::COND_A;
4909  case ISD::SETOLE:              // flipped
4910  case ISD::SETOGE:
4911  case ISD::SETGE:   return X86::COND_AE;
4912  case ISD::SETUGT:              // flipped
4913  case ISD::SETULT:
4914  case ISD::SETLT:   return X86::COND_B;
4915  case ISD::SETUGE:              // flipped
4916  case ISD::SETULE:
4917  case ISD::SETLE:   return X86::COND_BE;
4918  case ISD::SETONE:
4919  case ISD::SETNE:   return X86::COND_NE;
4920  case ISD::SETUO:   return X86::COND_P;
4921  case ISD::SETO:    return X86::COND_NP;
4922  case ISD::SETOEQ:
4923  case ISD::SETUNE:  return X86::COND_INVALID;
4924  }
4925}
4926
4927/// Is there a floating point cmov for the specific X86 condition code?
4928/// Current x86 isa includes the following FP cmov instructions:
4929/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
4930static bool hasFPCMov(unsigned X86CC) {
4931  switch (X86CC) {
4932  default:
4933    return false;
4934  case X86::COND_B:
4935  case X86::COND_BE:
4936  case X86::COND_E:
4937  case X86::COND_P:
4938  case X86::COND_A:
4939  case X86::COND_AE:
4940  case X86::COND_NE:
4941  case X86::COND_NP:
4942    return true;
4943  }
4944}
4945
4946
4947bool X86TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
4948                                           const CallInst &I,
4949                                           MachineFunction &MF,
4950                                           unsigned Intrinsic) const {
4951
4952  const IntrinsicData* IntrData = getIntrinsicWithChain(Intrinsic);
4953  if (!IntrData)
4954    return false;
4955
4956  Info.flags = MachineMemOperand::MONone;
4957  Info.offset = 0;
4958
4959  switch (IntrData->Type) {
4960  case TRUNCATE_TO_MEM_VI8:
4961  case TRUNCATE_TO_MEM_VI16:
4962  case TRUNCATE_TO_MEM_VI32: {
4963    Info.opc = ISD::INTRINSIC_VOID;
4964    Info.ptrVal = I.getArgOperand(0);
4965    MVT VT  = MVT::getVT(I.getArgOperand(1)->getType());
4966    MVT ScalarVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
4967    if (IntrData->Type == TRUNCATE_TO_MEM_VI8)
4968      ScalarVT = MVT::i8;
4969    else if (IntrData->Type == TRUNCATE_TO_MEM_VI16)
4970      ScalarVT = MVT::i16;
4971    else if (IntrData->Type == TRUNCATE_TO_MEM_VI32)
4972      ScalarVT = MVT::i32;
4973
4974    Info.memVT = MVT::getVectorVT(ScalarVT, VT.getVectorNumElements());
4975    Info.align = Align::None();
4976    Info.flags |= MachineMemOperand::MOStore;
4977    break;
4978  }
4979  case GATHER:
4980  case GATHER_AVX2: {
4981    Info.opc = ISD::INTRINSIC_W_CHAIN;
4982    Info.ptrVal = nullptr;
4983    MVT DataVT = MVT::getVT(I.getType());
4984    MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
4985    unsigned NumElts = std::min(DataVT.getVectorNumElements(),
4986                                IndexVT.getVectorNumElements());
4987    Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
4988    Info.align = Align::None();
4989    Info.flags |= MachineMemOperand::MOLoad;
4990    break;
4991  }
4992  case SCATTER: {
4993    Info.opc = ISD::INTRINSIC_VOID;
4994    Info.ptrVal = nullptr;
4995    MVT DataVT = MVT::getVT(I.getArgOperand(3)->getType());
4996    MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
4997    unsigned NumElts = std::min(DataVT.getVectorNumElements(),
4998                                IndexVT.getVectorNumElements());
4999    Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
5000    Info.align = Align::None();
5001    Info.flags |= MachineMemOperand::MOStore;
5002    break;
5003  }
5004  default:
5005    return false;
5006  }
5007
5008  return true;
5009}
5010
5011/// Returns true if the target can instruction select the
5012/// specified FP immediate natively. If false, the legalizer will
5013/// materialize the FP immediate as a load from a constant pool.
5014bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
5015                                     bool ForCodeSize) const {
5016  for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
5017    if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
5018      return true;
5019  }
5020  return false;
5021}
5022
5023bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
5024                                              ISD::LoadExtType ExtTy,
5025                                              EVT NewVT) const {
5026  assert(cast<LoadSDNode>(Load)->isSimple() && "illegal to narrow");
5027
5028  // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
5029  // relocation target a movq or addq instruction: don't let the load shrink.
5030  SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
5031  if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
5032    if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
5033      return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
5034
5035  // If this is an (1) AVX vector load with (2) multiple uses and (3) all of
5036  // those uses are extracted directly into a store, then the extract + store
5037  // can be store-folded. Therefore, it's probably not worth splitting the load.
5038  EVT VT = Load->getValueType(0);
5039  if ((VT.is256BitVector() || VT.is512BitVector()) && !Load->hasOneUse()) {
5040    for (auto UI = Load->use_begin(), UE = Load->use_end(); UI != UE; ++UI) {
5041      // Skip uses of the chain value. Result 0 of the node is the load value.
5042      if (UI.getUse().getResNo() != 0)
5043        continue;
5044
5045      // If this use is not an extract + store, it's probably worth splitting.
5046      if (UI->getOpcode() != ISD::EXTRACT_SUBVECTOR || !UI->hasOneUse() ||
5047          UI->use_begin()->getOpcode() != ISD::STORE)
5048        return true;
5049    }
5050    // All non-chain uses are extract + store.
5051    return false;
5052  }
5053
5054  return true;
5055}
5056
5057/// Returns true if it is beneficial to convert a load of a constant
5058/// to just the constant itself.
5059bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
5060                                                          Type *Ty) const {
5061  assert(Ty->isIntegerTy());
5062
5063  unsigned BitSize = Ty->getPrimitiveSizeInBits();
5064  if (BitSize == 0 || BitSize > 64)
5065    return false;
5066  return true;
5067}
5068
5069bool X86TargetLowering::reduceSelectOfFPConstantLoads(EVT CmpOpVT) const {
5070  // If we are using XMM registers in the ABI and the condition of the select is
5071  // a floating-point compare and we have blendv or conditional move, then it is
5072  // cheaper to select instead of doing a cross-register move and creating a
5073  // load that depends on the compare result.
5074  bool IsFPSetCC = CmpOpVT.isFloatingPoint() && CmpOpVT != MVT::f128;
5075  return !IsFPSetCC || !Subtarget.isTarget64BitLP64() || !Subtarget.hasAVX();
5076}
5077
5078bool X86TargetLowering::convertSelectOfConstantsToMath(EVT VT) const {
5079  // TODO: It might be a win to ease or lift this restriction, but the generic
5080  // folds in DAGCombiner conflict with vector folds for an AVX512 target.
5081  if (VT.isVector() && Subtarget.hasAVX512())
5082    return false;
5083
5084  return true;
5085}
5086
5087bool X86TargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
5088                                               SDValue C) const {
5089  // TODO: We handle scalars using custom code, but generic combining could make
5090  // that unnecessary.
5091  APInt MulC;
5092  if (!ISD::isConstantSplatVector(C.getNode(), MulC))
5093    return false;
5094
5095  // Find the type this will be legalized too. Otherwise we might prematurely
5096  // convert this to shl+add/sub and then still have to type legalize those ops.
5097  // Another choice would be to defer the decision for illegal types until
5098  // after type legalization. But constant splat vectors of i64 can't make it
5099  // through type legalization on 32-bit targets so we would need to special
5100  // case vXi64.
5101  while (getTypeAction(Context, VT) != TypeLegal)
5102    VT = getTypeToTransformTo(Context, VT);
5103
5104  // If vector multiply is legal, assume that's faster than shl + add/sub.
5105  // TODO: Multiply is a complex op with higher latency and lower throughput in
5106  //       most implementations, so this check could be loosened based on type
5107  //       and/or a CPU attribute.
5108  if (isOperationLegal(ISD::MUL, VT))
5109    return false;
5110
5111  // shl+add, shl+sub, shl+add+neg
5112  return (MulC + 1).isPowerOf2() || (MulC - 1).isPowerOf2() ||
5113         (1 - MulC).isPowerOf2() || (-(MulC + 1)).isPowerOf2();
5114}
5115
5116bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
5117                                                unsigned Index) const {
5118  if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
5119    return false;
5120
5121  // Mask vectors support all subregister combinations and operations that
5122  // extract half of vector.
5123  if (ResVT.getVectorElementType() == MVT::i1)
5124    return Index == 0 || ((ResVT.getSizeInBits() == SrcVT.getSizeInBits()*2) &&
5125                          (Index == ResVT.getVectorNumElements()));
5126
5127  return (Index % ResVT.getVectorNumElements()) == 0;
5128}
5129
5130bool X86TargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
5131  unsigned Opc = VecOp.getOpcode();
5132
5133  // Assume target opcodes can't be scalarized.
5134  // TODO - do we have any exceptions?
5135  if (Opc >= ISD::BUILTIN_OP_END)
5136    return false;
5137
5138  // If the vector op is not supported, try to convert to scalar.
5139  EVT VecVT = VecOp.getValueType();
5140  if (!isOperationLegalOrCustomOrPromote(Opc, VecVT))
5141    return true;
5142
5143  // If the vector op is supported, but the scalar op is not, the transform may
5144  // not be worthwhile.
5145  EVT ScalarVT = VecVT.getScalarType();
5146  return isOperationLegalOrCustomOrPromote(Opc, ScalarVT);
5147}
5148
5149bool X86TargetLowering::shouldFormOverflowOp(unsigned Opcode, EVT VT) const {
5150  // TODO: Allow vectors?
5151  if (VT.isVector())
5152    return false;
5153  return VT.isSimple() || !isOperationExpand(Opcode, VT);
5154}
5155
5156bool X86TargetLowering::isCheapToSpeculateCttz() const {
5157  // Speculate cttz only if we can directly use TZCNT.
5158  return Subtarget.hasBMI();
5159}
5160
5161bool X86TargetLowering::isCheapToSpeculateCtlz() const {
5162  // Speculate ctlz only if we can directly use LZCNT.
5163  return Subtarget.hasLZCNT();
5164}
5165
5166bool X86TargetLowering::isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
5167                                                const SelectionDAG &DAG,
5168                                                const MachineMemOperand &MMO) const {
5169  if (!Subtarget.hasAVX512() && !LoadVT.isVector() && BitcastVT.isVector() &&
5170      BitcastVT.getVectorElementType() == MVT::i1)
5171    return false;
5172
5173  if (!Subtarget.hasDQI() && BitcastVT == MVT::v8i1 && LoadVT == MVT::i8)
5174    return false;
5175
5176  // If both types are legal vectors, it's always ok to convert them.
5177  if (LoadVT.isVector() && BitcastVT.isVector() &&
5178      isTypeLegal(LoadVT) && isTypeLegal(BitcastVT))
5179    return true;
5180
5181  return TargetLowering::isLoadBitCastBeneficial(LoadVT, BitcastVT, DAG, MMO);
5182}
5183
5184bool X86TargetLowering::canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
5185                                         const SelectionDAG &DAG) const {
5186  // Do not merge to float value size (128 bytes) if no implicit
5187  // float attribute is set.
5188  bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute(
5189      Attribute::NoImplicitFloat);
5190
5191  if (NoFloat) {
5192    unsigned MaxIntSize = Subtarget.is64Bit() ? 64 : 32;
5193    return (MemVT.getSizeInBits() <= MaxIntSize);
5194  }
5195  // Make sure we don't merge greater than our preferred vector
5196  // width.
5197  if (MemVT.getSizeInBits() > Subtarget.getPreferVectorWidth())
5198    return false;
5199  return true;
5200}
5201
5202bool X86TargetLowering::isCtlzFast() const {
5203  return Subtarget.hasFastLZCNT();
5204}
5205
5206bool X86TargetLowering::isMaskAndCmp0FoldingBeneficial(
5207    const Instruction &AndI) const {
5208  return true;
5209}
5210
5211bool X86TargetLowering::hasAndNotCompare(SDValue Y) const {
5212  EVT VT = Y.getValueType();
5213
5214  if (VT.isVector())
5215    return false;
5216
5217  if (!Subtarget.hasBMI())
5218    return false;
5219
5220  // There are only 32-bit and 64-bit forms for 'andn'.
5221  if (VT != MVT::i32 && VT != MVT::i64)
5222    return false;
5223
5224  return !isa<ConstantSDNode>(Y);
5225}
5226
5227bool X86TargetLowering::hasAndNot(SDValue Y) const {
5228  EVT VT = Y.getValueType();
5229
5230  if (!VT.isVector())
5231    return hasAndNotCompare(Y);
5232
5233  // Vector.
5234
5235  if (!Subtarget.hasSSE1() || VT.getSizeInBits() < 128)
5236    return false;
5237
5238  if (VT == MVT::v4i32)
5239    return true;
5240
5241  return Subtarget.hasSSE2();
5242}
5243
5244bool X86TargetLowering::hasBitTest(SDValue X, SDValue Y) const {
5245  return X.getValueType().isScalarInteger(); // 'bt'
5246}
5247
5248bool X86TargetLowering::
5249    shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
5250        SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
5251        unsigned OldShiftOpcode, unsigned NewShiftOpcode,
5252        SelectionDAG &DAG) const {
5253  // Does baseline recommend not to perform the fold by default?
5254  if (!TargetLowering::shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
5255          X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG))
5256    return false;
5257  // For scalars this transform is always beneficial.
5258  if (X.getValueType().isScalarInteger())
5259    return true;
5260  // If all the shift amounts are identical, then transform is beneficial even
5261  // with rudimentary SSE2 shifts.
5262  if (DAG.isSplatValue(Y, /*AllowUndefs=*/true))
5263    return true;
5264  // If we have AVX2 with it's powerful shift operations, then it's also good.
5265  if (Subtarget.hasAVX2())
5266    return true;
5267  // Pre-AVX2 vector codegen for this pattern is best for variant with 'shl'.
5268  return NewShiftOpcode == ISD::SHL;
5269}
5270
5271bool X86TargetLowering::shouldFoldConstantShiftPairToMask(
5272    const SDNode *N, CombineLevel Level) const {
5273  assert(((N->getOpcode() == ISD::SHL &&
5274           N->getOperand(0).getOpcode() == ISD::SRL) ||
5275          (N->getOpcode() == ISD::SRL &&
5276           N->getOperand(0).getOpcode() == ISD::SHL)) &&
5277         "Expected shift-shift mask");
5278  EVT VT = N->getValueType(0);
5279  if ((Subtarget.hasFastVectorShiftMasks() && VT.isVector()) ||
5280      (Subtarget.hasFastScalarShiftMasks() && !VT.isVector())) {
5281    // Only fold if the shift values are equal - so it folds to AND.
5282    // TODO - we should fold if either is a non-uniform vector but we don't do
5283    // the fold for non-splats yet.
5284    return N->getOperand(1) == N->getOperand(0).getOperand(1);
5285  }
5286  return TargetLoweringBase::shouldFoldConstantShiftPairToMask(N, Level);
5287}
5288
5289bool X86TargetLowering::shouldFoldMaskToVariableShiftPair(SDValue Y) const {
5290  EVT VT = Y.getValueType();
5291
5292  // For vectors, we don't have a preference, but we probably want a mask.
5293  if (VT.isVector())
5294    return false;
5295
5296  // 64-bit shifts on 32-bit targets produce really bad bloated code.
5297  if (VT == MVT::i64 && !Subtarget.is64Bit())
5298    return false;
5299
5300  return true;
5301}
5302
5303bool X86TargetLowering::shouldExpandShift(SelectionDAG &DAG,
5304                                          SDNode *N) const {
5305  if (DAG.getMachineFunction().getFunction().hasMinSize() &&
5306      !Subtarget.isOSWindows())
5307    return false;
5308  return true;
5309}
5310
5311bool X86TargetLowering::shouldSplatInsEltVarIndex(EVT VT) const {
5312  // Any legal vector type can be splatted more efficiently than
5313  // loading/spilling from memory.
5314  return isTypeLegal(VT);
5315}
5316
5317MVT X86TargetLowering::hasFastEqualityCompare(unsigned NumBits) const {
5318  MVT VT = MVT::getIntegerVT(NumBits);
5319  if (isTypeLegal(VT))
5320    return VT;
5321
5322  // PMOVMSKB can handle this.
5323  if (NumBits == 128 && isTypeLegal(MVT::v16i8))
5324    return MVT::v16i8;
5325
5326  // VPMOVMSKB can handle this.
5327  if (NumBits == 256 && isTypeLegal(MVT::v32i8))
5328    return MVT::v32i8;
5329
5330  // TODO: Allow 64-bit type for 32-bit target.
5331  // TODO: 512-bit types should be allowed, but make sure that those
5332  // cases are handled in combineVectorSizedSetCCEquality().
5333
5334  return MVT::INVALID_SIMPLE_VALUE_TYPE;
5335}
5336
5337/// Val is the undef sentinel value or equal to the specified value.
5338static bool isUndefOrEqual(int Val, int CmpVal) {
5339  return ((Val == SM_SentinelUndef) || (Val == CmpVal));
5340}
5341
5342/// Val is either the undef or zero sentinel value.
5343static bool isUndefOrZero(int Val) {
5344  return ((Val == SM_SentinelUndef) || (Val == SM_SentinelZero));
5345}
5346
5347/// Return true if every element in Mask, beginning from position Pos and ending
5348/// in Pos+Size is the undef sentinel value.
5349static bool isUndefInRange(ArrayRef<int> Mask, unsigned Pos, unsigned Size) {
5350  return llvm::all_of(Mask.slice(Pos, Size),
5351                      [](int M) { return M == SM_SentinelUndef; });
5352}
5353
5354/// Return true if the mask creates a vector whose lower half is undefined.
5355static bool isUndefLowerHalf(ArrayRef<int> Mask) {
5356  unsigned NumElts = Mask.size();
5357  return isUndefInRange(Mask, 0, NumElts / 2);
5358}
5359
5360/// Return true if the mask creates a vector whose upper half is undefined.
5361static bool isUndefUpperHalf(ArrayRef<int> Mask) {
5362  unsigned NumElts = Mask.size();
5363  return isUndefInRange(Mask, NumElts / 2, NumElts / 2);
5364}
5365
5366/// Return true if Val falls within the specified range (L, H].
5367static bool isInRange(int Val, int Low, int Hi) {
5368  return (Val >= Low && Val < Hi);
5369}
5370
5371/// Return true if the value of any element in Mask falls within the specified
5372/// range (L, H].
5373static bool isAnyInRange(ArrayRef<int> Mask, int Low, int Hi) {
5374  return llvm::any_of(Mask, [Low, Hi](int M) { return isInRange(M, Low, Hi); });
5375}
5376
5377/// Return true if Val is undef or if its value falls within the
5378/// specified range (L, H].
5379static bool isUndefOrInRange(int Val, int Low, int Hi) {
5380  return (Val == SM_SentinelUndef) || isInRange(Val, Low, Hi);
5381}
5382
5383/// Return true if every element in Mask is undef or if its value
5384/// falls within the specified range (L, H].
5385static bool isUndefOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
5386  return llvm::all_of(
5387      Mask, [Low, Hi](int M) { return isUndefOrInRange(M, Low, Hi); });
5388}
5389
5390/// Return true if Val is undef, zero or if its value falls within the
5391/// specified range (L, H].
5392static bool isUndefOrZeroOrInRange(int Val, int Low, int Hi) {
5393  return isUndefOrZero(Val) || isInRange(Val, Low, Hi);
5394}
5395
5396/// Return true if every element in Mask is undef, zero or if its value
5397/// falls within the specified range (L, H].
5398static bool isUndefOrZeroOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
5399  return llvm::all_of(
5400      Mask, [Low, Hi](int M) { return isUndefOrZeroOrInRange(M, Low, Hi); });
5401}
5402
5403/// Return true if every element in Mask, beginning
5404/// from position Pos and ending in Pos + Size, falls within the specified
5405/// sequence (Low, Low + Step, ..., Low + (Size - 1) * Step) or is undef.
5406static bool isSequentialOrUndefInRange(ArrayRef<int> Mask, unsigned Pos,
5407                                       unsigned Size, int Low, int Step = 1) {
5408  for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
5409    if (!isUndefOrEqual(Mask[i], Low))
5410      return false;
5411  return true;
5412}
5413
5414/// Return true if every element in Mask, beginning
5415/// from position Pos and ending in Pos+Size, falls within the specified
5416/// sequential range (Low, Low+Size], or is undef or is zero.
5417static bool isSequentialOrUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
5418                                             unsigned Size, int Low,
5419                                             int Step = 1) {
5420  for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
5421    if (!isUndefOrZero(Mask[i]) && Mask[i] != Low)
5422      return false;
5423  return true;
5424}
5425
5426/// Return true if every element in Mask, beginning
5427/// from position Pos and ending in Pos+Size is undef or is zero.
5428static bool isUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
5429                                 unsigned Size) {
5430  return llvm::all_of(Mask.slice(Pos, Size),
5431                      [](int M) { return isUndefOrZero(M); });
5432}
5433
5434/// Helper function to test whether a shuffle mask could be
5435/// simplified by widening the elements being shuffled.
5436///
5437/// Appends the mask for wider elements in WidenedMask if valid. Otherwise
5438/// leaves it in an unspecified state.
5439///
5440/// NOTE: This must handle normal vector shuffle masks and *target* vector
5441/// shuffle masks. The latter have the special property of a '-2' representing
5442/// a zero-ed lane of a vector.
5443static bool canWidenShuffleElements(ArrayRef<int> Mask,
5444                                    SmallVectorImpl<int> &WidenedMask) {
5445  WidenedMask.assign(Mask.size() / 2, 0);
5446  for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
5447    int M0 = Mask[i];
5448    int M1 = Mask[i + 1];
5449
5450    // If both elements are undef, its trivial.
5451    if (M0 == SM_SentinelUndef && M1 == SM_SentinelUndef) {
5452      WidenedMask[i / 2] = SM_SentinelUndef;
5453      continue;
5454    }
5455
5456    // Check for an undef mask and a mask value properly aligned to fit with
5457    // a pair of values. If we find such a case, use the non-undef mask's value.
5458    if (M0 == SM_SentinelUndef && M1 >= 0 && (M1 % 2) == 1) {
5459      WidenedMask[i / 2] = M1 / 2;
5460      continue;
5461    }
5462    if (M1 == SM_SentinelUndef && M0 >= 0 && (M0 % 2) == 0) {
5463      WidenedMask[i / 2] = M0 / 2;
5464      continue;
5465    }
5466
5467    // When zeroing, we need to spread the zeroing across both lanes to widen.
5468    if (M0 == SM_SentinelZero || M1 == SM_SentinelZero) {
5469      if ((M0 == SM_SentinelZero || M0 == SM_SentinelUndef) &&
5470          (M1 == SM_SentinelZero || M1 == SM_SentinelUndef)) {
5471        WidenedMask[i / 2] = SM_SentinelZero;
5472        continue;
5473      }
5474      return false;
5475    }
5476
5477    // Finally check if the two mask values are adjacent and aligned with
5478    // a pair.
5479    if (M0 != SM_SentinelUndef && (M0 % 2) == 0 && (M0 + 1) == M1) {
5480      WidenedMask[i / 2] = M0 / 2;
5481      continue;
5482    }
5483
5484    // Otherwise we can't safely widen the elements used in this shuffle.
5485    return false;
5486  }
5487  assert(WidenedMask.size() == Mask.size() / 2 &&
5488         "Incorrect size of mask after widening the elements!");
5489
5490  return true;
5491}
5492
5493static bool canWidenShuffleElements(ArrayRef<int> Mask,
5494                                    const APInt &Zeroable,
5495                                    bool V2IsZero,
5496                                    SmallVectorImpl<int> &WidenedMask) {
5497  // Create an alternative mask with info about zeroable elements.
5498  // Here we do not set undef elements as zeroable.
5499  SmallVector<int, 64> ZeroableMask(Mask.begin(), Mask.end());
5500  if (V2IsZero) {
5501    assert(!Zeroable.isNullValue() && "V2's non-undef elements are used?!");
5502    for (int i = 0, Size = Mask.size(); i != Size; ++i)
5503      if (Mask[i] != SM_SentinelUndef && Zeroable[i])
5504        ZeroableMask[i] = SM_SentinelZero;
5505  }
5506  return canWidenShuffleElements(ZeroableMask, WidenedMask);
5507}
5508
5509static bool canWidenShuffleElements(ArrayRef<int> Mask) {
5510  SmallVector<int, 32> WidenedMask;
5511  return canWidenShuffleElements(Mask, WidenedMask);
5512}
5513
5514/// Returns true if Elt is a constant zero or a floating point constant +0.0.
5515bool X86::isZeroNode(SDValue Elt) {
5516  return isNullConstant(Elt) || isNullFPConstant(Elt);
5517}
5518
5519// Build a vector of constants.
5520// Use an UNDEF node if MaskElt == -1.
5521// Split 64-bit constants in the 32-bit mode.
5522static SDValue getConstVector(ArrayRef<int> Values, MVT VT, SelectionDAG &DAG,
5523                              const SDLoc &dl, bool IsMask = false) {
5524
5525  SmallVector<SDValue, 32>  Ops;
5526  bool Split = false;
5527
5528  MVT ConstVecVT = VT;
5529  unsigned NumElts = VT.getVectorNumElements();
5530  bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
5531  if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
5532    ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
5533    Split = true;
5534  }
5535
5536  MVT EltVT = ConstVecVT.getVectorElementType();
5537  for (unsigned i = 0; i < NumElts; ++i) {
5538    bool IsUndef = Values[i] < 0 && IsMask;
5539    SDValue OpNode = IsUndef ? DAG.getUNDEF(EltVT) :
5540      DAG.getConstant(Values[i], dl, EltVT);
5541    Ops.push_back(OpNode);
5542    if (Split)
5543      Ops.push_back(IsUndef ? DAG.getUNDEF(EltVT) :
5544                    DAG.getConstant(0, dl, EltVT));
5545  }
5546  SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
5547  if (Split)
5548    ConstsNode = DAG.getBitcast(VT, ConstsNode);
5549  return ConstsNode;
5550}
5551
5552static SDValue getConstVector(ArrayRef<APInt> Bits, APInt &Undefs,
5553                              MVT VT, SelectionDAG &DAG, const SDLoc &dl) {
5554  assert(Bits.size() == Undefs.getBitWidth() &&
5555         "Unequal constant and undef arrays");
5556  SmallVector<SDValue, 32> Ops;
5557  bool Split = false;
5558
5559  MVT ConstVecVT = VT;
5560  unsigned NumElts = VT.getVectorNumElements();
5561  bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
5562  if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
5563    ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
5564    Split = true;
5565  }
5566
5567  MVT EltVT = ConstVecVT.getVectorElementType();
5568  for (unsigned i = 0, e = Bits.size(); i != e; ++i) {
5569    if (Undefs[i]) {
5570      Ops.append(Split ? 2 : 1, DAG.getUNDEF(EltVT));
5571      continue;
5572    }
5573    const APInt &V = Bits[i];
5574    assert(V.getBitWidth() == VT.getScalarSizeInBits() && "Unexpected sizes");
5575    if (Split) {
5576      Ops.push_back(DAG.getConstant(V.trunc(32), dl, EltVT));
5577      Ops.push_back(DAG.getConstant(V.lshr(32).trunc(32), dl, EltVT));
5578    } else if (EltVT == MVT::f32) {
5579      APFloat FV(APFloat::IEEEsingle(), V);
5580      Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
5581    } else if (EltVT == MVT::f64) {
5582      APFloat FV(APFloat::IEEEdouble(), V);
5583      Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
5584    } else {
5585      Ops.push_back(DAG.getConstant(V, dl, EltVT));
5586    }
5587  }
5588
5589  SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
5590  return DAG.getBitcast(VT, ConstsNode);
5591}
5592
5593/// Returns a vector of specified type with all zero elements.
5594static SDValue getZeroVector(MVT VT, const X86Subtarget &Subtarget,
5595                             SelectionDAG &DAG, const SDLoc &dl) {
5596  assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector() ||
5597          VT.getVectorElementType() == MVT::i1) &&
5598         "Unexpected vector type");
5599
5600  // Try to build SSE/AVX zero vectors as <N x i32> bitcasted to their dest
5601  // type. This ensures they get CSE'd. But if the integer type is not
5602  // available, use a floating-point +0.0 instead.
5603  SDValue Vec;
5604  if (!Subtarget.hasSSE2() && VT.is128BitVector()) {
5605    Vec = DAG.getConstantFP(+0.0, dl, MVT::v4f32);
5606  } else if (VT.isFloatingPoint()) {
5607    Vec = DAG.getConstantFP(+0.0, dl, VT);
5608  } else if (VT.getVectorElementType() == MVT::i1) {
5609    assert((Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) &&
5610           "Unexpected vector type");
5611    Vec = DAG.getConstant(0, dl, VT);
5612  } else {
5613    unsigned Num32BitElts = VT.getSizeInBits() / 32;
5614    Vec = DAG.getConstant(0, dl, MVT::getVectorVT(MVT::i32, Num32BitElts));
5615  }
5616  return DAG.getBitcast(VT, Vec);
5617}
5618
5619static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
5620                                const SDLoc &dl, unsigned vectorWidth) {
5621  EVT VT = Vec.getValueType();
5622  EVT ElVT = VT.getVectorElementType();
5623  unsigned Factor = VT.getSizeInBits()/vectorWidth;
5624  EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
5625                                  VT.getVectorNumElements()/Factor);
5626
5627  // Extract the relevant vectorWidth bits.  Generate an EXTRACT_SUBVECTOR
5628  unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
5629  assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
5630
5631  // This is the index of the first element of the vectorWidth-bit chunk
5632  // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
5633  IdxVal &= ~(ElemsPerChunk - 1);
5634
5635  // If the input is a buildvector just emit a smaller one.
5636  if (Vec.getOpcode() == ISD::BUILD_VECTOR)
5637    return DAG.getBuildVector(ResultVT, dl,
5638                              Vec->ops().slice(IdxVal, ElemsPerChunk));
5639
5640  SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
5641  return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
5642}
5643
5644/// Generate a DAG to grab 128-bits from a vector > 128 bits.  This
5645/// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
5646/// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
5647/// instructions or a simple subregister reference. Idx is an index in the
5648/// 128 bits we want.  It need not be aligned to a 128-bit boundary.  That makes
5649/// lowering EXTRACT_VECTOR_ELT operations easier.
5650static SDValue extract128BitVector(SDValue Vec, unsigned IdxVal,
5651                                   SelectionDAG &DAG, const SDLoc &dl) {
5652  assert((Vec.getValueType().is256BitVector() ||
5653          Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
5654  return extractSubVector(Vec, IdxVal, DAG, dl, 128);
5655}
5656
5657/// Generate a DAG to grab 256-bits from a 512-bit vector.
5658static SDValue extract256BitVector(SDValue Vec, unsigned IdxVal,
5659                                   SelectionDAG &DAG, const SDLoc &dl) {
5660  assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
5661  return extractSubVector(Vec, IdxVal, DAG, dl, 256);
5662}
5663
5664static SDValue insertSubVector(SDValue Result, SDValue Vec, unsigned IdxVal,
5665                               SelectionDAG &DAG, const SDLoc &dl,
5666                               unsigned vectorWidth) {
5667  assert((vectorWidth == 128 || vectorWidth == 256) &&
5668         "Unsupported vector width");
5669  // Inserting UNDEF is Result
5670  if (Vec.isUndef())
5671    return Result;
5672  EVT VT = Vec.getValueType();
5673  EVT ElVT = VT.getVectorElementType();
5674  EVT ResultVT = Result.getValueType();
5675
5676  // Insert the relevant vectorWidth bits.
5677  unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
5678  assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
5679
5680  // This is the index of the first element of the vectorWidth-bit chunk
5681  // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
5682  IdxVal &= ~(ElemsPerChunk - 1);
5683
5684  SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
5685  return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
5686}
5687
5688/// Generate a DAG to put 128-bits into a vector > 128 bits.  This
5689/// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
5690/// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
5691/// simple superregister reference.  Idx is an index in the 128 bits
5692/// we want.  It need not be aligned to a 128-bit boundary.  That makes
5693/// lowering INSERT_VECTOR_ELT operations easier.
5694static SDValue insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
5695                                  SelectionDAG &DAG, const SDLoc &dl) {
5696  assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
5697  return insertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
5698}
5699
5700/// Widen a vector to a larger size with the same scalar type, with the new
5701/// elements either zero or undef.
5702static SDValue widenSubVector(MVT VT, SDValue Vec, bool ZeroNewElements,
5703                              const X86Subtarget &Subtarget, SelectionDAG &DAG,
5704                              const SDLoc &dl) {
5705  assert(Vec.getValueSizeInBits() < VT.getSizeInBits() &&
5706         Vec.getValueType().getScalarType() == VT.getScalarType() &&
5707         "Unsupported vector widening type");
5708  SDValue Res = ZeroNewElements ? getZeroVector(VT, Subtarget, DAG, dl)
5709                                : DAG.getUNDEF(VT);
5710  return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, Vec,
5711                     DAG.getIntPtrConstant(0, dl));
5712}
5713
5714/// Widen a vector to a larger size with the same scalar type, with the new
5715/// elements either zero or undef.
5716static SDValue widenSubVector(SDValue Vec, bool ZeroNewElements,
5717                              const X86Subtarget &Subtarget, SelectionDAG &DAG,
5718                              const SDLoc &dl, unsigned WideSizeInBits) {
5719  assert(Vec.getValueSizeInBits() < WideSizeInBits &&
5720         (WideSizeInBits % Vec.getScalarValueSizeInBits()) == 0 &&
5721         "Unsupported vector widening type");
5722  unsigned WideNumElts = WideSizeInBits / Vec.getScalarValueSizeInBits();
5723  MVT SVT = Vec.getSimpleValueType().getScalarType();
5724  MVT VT = MVT::getVectorVT(SVT, WideNumElts);
5725  return widenSubVector(VT, Vec, ZeroNewElements, Subtarget, DAG, dl);
5726}
5727
5728// Helper function to collect subvector ops that are concated together,
5729// either by ISD::CONCAT_VECTORS or a ISD::INSERT_SUBVECTOR series.
5730// The subvectors in Ops are guaranteed to be the same type.
5731static bool collectConcatOps(SDNode *N, SmallVectorImpl<SDValue> &Ops) {
5732  assert(Ops.empty() && "Expected an empty ops vector");
5733
5734  if (N->getOpcode() == ISD::CONCAT_VECTORS) {
5735    Ops.append(N->op_begin(), N->op_end());
5736    return true;
5737  }
5738
5739  if (N->getOpcode() == ISD::INSERT_SUBVECTOR &&
5740      isa<ConstantSDNode>(N->getOperand(2))) {
5741    SDValue Src = N->getOperand(0);
5742    SDValue Sub = N->getOperand(1);
5743    const APInt &Idx = N->getConstantOperandAPInt(2);
5744    EVT VT = Src.getValueType();
5745    EVT SubVT = Sub.getValueType();
5746
5747    // TODO - Handle more general insert_subvector chains.
5748    if (VT.getSizeInBits() == (SubVT.getSizeInBits() * 2) &&
5749        Idx == (VT.getVectorNumElements() / 2) &&
5750        Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
5751        Src.getOperand(1).getValueType() == SubVT &&
5752        isNullConstant(Src.getOperand(2))) {
5753      Ops.push_back(Src.getOperand(1));
5754      Ops.push_back(Sub);
5755      return true;
5756    }
5757  }
5758
5759  return false;
5760}
5761
5762// Helper for splitting operands of an operation to legal target size and
5763// apply a function on each part.
5764// Useful for operations that are available on SSE2 in 128-bit, on AVX2 in
5765// 256-bit and on AVX512BW in 512-bit. The argument VT is the type used for
5766// deciding if/how to split Ops. Ops elements do *not* have to be of type VT.
5767// The argument Builder is a function that will be applied on each split part:
5768// SDValue Builder(SelectionDAG&G, SDLoc, ArrayRef<SDValue>)
5769template <typename F>
5770SDValue SplitOpsAndApply(SelectionDAG &DAG, const X86Subtarget &Subtarget,
5771                         const SDLoc &DL, EVT VT, ArrayRef<SDValue> Ops,
5772                         F Builder, bool CheckBWI = true) {
5773  assert(Subtarget.hasSSE2() && "Target assumed to support at least SSE2");
5774  unsigned NumSubs = 1;
5775  if ((CheckBWI && Subtarget.useBWIRegs()) ||
5776      (!CheckBWI && Subtarget.useAVX512Regs())) {
5777    if (VT.getSizeInBits() > 512) {
5778      NumSubs = VT.getSizeInBits() / 512;
5779      assert((VT.getSizeInBits() % 512) == 0 && "Illegal vector size");
5780    }
5781  } else if (Subtarget.hasAVX2()) {
5782    if (VT.getSizeInBits() > 256) {
5783      NumSubs = VT.getSizeInBits() / 256;
5784      assert((VT.getSizeInBits() % 256) == 0 && "Illegal vector size");
5785    }
5786  } else {
5787    if (VT.getSizeInBits() > 128) {
5788      NumSubs = VT.getSizeInBits() / 128;
5789      assert((VT.getSizeInBits() % 128) == 0 && "Illegal vector size");
5790    }
5791  }
5792
5793  if (NumSubs == 1)
5794    return Builder(DAG, DL, Ops);
5795
5796  SmallVector<SDValue, 4> Subs;
5797  for (unsigned i = 0; i != NumSubs; ++i) {
5798    SmallVector<SDValue, 2> SubOps;
5799    for (SDValue Op : Ops) {
5800      EVT OpVT = Op.getValueType();
5801      unsigned NumSubElts = OpVT.getVectorNumElements() / NumSubs;
5802      unsigned SizeSub = OpVT.getSizeInBits() / NumSubs;
5803      SubOps.push_back(extractSubVector(Op, i * NumSubElts, DAG, DL, SizeSub));
5804    }
5805    Subs.push_back(Builder(DAG, DL, SubOps));
5806  }
5807  return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Subs);
5808}
5809
5810/// Insert i1-subvector to i1-vector.
5811static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
5812                                const X86Subtarget &Subtarget) {
5813
5814  SDLoc dl(Op);
5815  SDValue Vec = Op.getOperand(0);
5816  SDValue SubVec = Op.getOperand(1);
5817  SDValue Idx = Op.getOperand(2);
5818
5819  if (!isa<ConstantSDNode>(Idx))
5820    return SDValue();
5821
5822  // Inserting undef is a nop. We can just return the original vector.
5823  if (SubVec.isUndef())
5824    return Vec;
5825
5826  unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
5827  if (IdxVal == 0 && Vec.isUndef()) // the operation is legal
5828    return Op;
5829
5830  MVT OpVT = Op.getSimpleValueType();
5831  unsigned NumElems = OpVT.getVectorNumElements();
5832
5833  SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
5834
5835  // Extend to natively supported kshift.
5836  MVT WideOpVT = OpVT;
5837  if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8)
5838    WideOpVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
5839
5840  // Inserting into the lsbs of a zero vector is legal. ISel will insert shifts
5841  // if necessary.
5842  if (IdxVal == 0 && ISD::isBuildVectorAllZeros(Vec.getNode())) {
5843    // May need to promote to a legal type.
5844    Op = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5845                     DAG.getConstant(0, dl, WideOpVT),
5846                     SubVec, Idx);
5847    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
5848  }
5849
5850  MVT SubVecVT = SubVec.getSimpleValueType();
5851  unsigned SubVecNumElems = SubVecVT.getVectorNumElements();
5852
5853  assert(IdxVal + SubVecNumElems <= NumElems &&
5854         IdxVal % SubVecVT.getSizeInBits() == 0 &&
5855         "Unexpected index value in INSERT_SUBVECTOR");
5856
5857  SDValue Undef = DAG.getUNDEF(WideOpVT);
5858
5859  if (IdxVal == 0) {
5860    // Zero lower bits of the Vec
5861    SDValue ShiftBits = DAG.getTargetConstant(SubVecNumElems, dl, MVT::i8);
5862    Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec,
5863                      ZeroIdx);
5864    Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
5865    Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
5866    // Merge them together, SubVec should be zero extended.
5867    SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5868                         DAG.getConstant(0, dl, WideOpVT),
5869                         SubVec, ZeroIdx);
5870    Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
5871    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
5872  }
5873
5874  SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5875                       Undef, SubVec, ZeroIdx);
5876
5877  if (Vec.isUndef()) {
5878    assert(IdxVal != 0 && "Unexpected index");
5879    SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
5880                         DAG.getTargetConstant(IdxVal, dl, MVT::i8));
5881    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
5882  }
5883
5884  if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
5885    assert(IdxVal != 0 && "Unexpected index");
5886    NumElems = WideOpVT.getVectorNumElements();
5887    unsigned ShiftLeft = NumElems - SubVecNumElems;
5888    unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
5889    SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
5890                         DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
5891    if (ShiftRight != 0)
5892      SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
5893                           DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
5894    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
5895  }
5896
5897  // Simple case when we put subvector in the upper part
5898  if (IdxVal + SubVecNumElems == NumElems) {
5899    SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
5900                         DAG.getTargetConstant(IdxVal, dl, MVT::i8));
5901    if (SubVecNumElems * 2 == NumElems) {
5902      // Special case, use legal zero extending insert_subvector. This allows
5903      // isel to opimitize when bits are known zero.
5904      Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVecVT, Vec, ZeroIdx);
5905      Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5906                        DAG.getConstant(0, dl, WideOpVT),
5907                        Vec, ZeroIdx);
5908    } else {
5909      // Otherwise use explicit shifts to zero the bits.
5910      Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5911                        Undef, Vec, ZeroIdx);
5912      NumElems = WideOpVT.getVectorNumElements();
5913      SDValue ShiftBits = DAG.getTargetConstant(NumElems - IdxVal, dl, MVT::i8);
5914      Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
5915      Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
5916    }
5917    Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
5918    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
5919  }
5920
5921  // Inserting into the middle is more complicated.
5922
5923  NumElems = WideOpVT.getVectorNumElements();
5924
5925  // Widen the vector if needed.
5926  Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx);
5927
5928  unsigned ShiftLeft = NumElems - SubVecNumElems;
5929  unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
5930
5931  // Do an optimization for the the most frequently used types.
5932  if (WideOpVT != MVT::v64i1 || Subtarget.is64Bit()) {
5933    APInt Mask0 = APInt::getBitsSet(NumElems, IdxVal, IdxVal + SubVecNumElems);
5934    Mask0.flipAllBits();
5935    SDValue CMask0 = DAG.getConstant(Mask0, dl, MVT::getIntegerVT(NumElems));
5936    SDValue VMask0 = DAG.getNode(ISD::BITCAST, dl, WideOpVT, CMask0);
5937    Vec = DAG.getNode(ISD::AND, dl, WideOpVT, Vec, VMask0);
5938    SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
5939                         DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
5940    SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
5941                         DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
5942    Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
5943
5944    // Reduce to original width if needed.
5945    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
5946  }
5947
5948  // Clear the upper bits of the subvector and move it to its insert position.
5949  SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
5950                       DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
5951  SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
5952                       DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
5953
5954  // Isolate the bits below the insertion point.
5955  unsigned LowShift = NumElems - IdxVal;
5956  SDValue Low = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec,
5957                            DAG.getTargetConstant(LowShift, dl, MVT::i8));
5958  Low = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Low,
5959                    DAG.getTargetConstant(LowShift, dl, MVT::i8));
5960
5961  // Isolate the bits after the last inserted bit.
5962  unsigned HighShift = IdxVal + SubVecNumElems;
5963  SDValue High = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec,
5964                            DAG.getTargetConstant(HighShift, dl, MVT::i8));
5965  High = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, High,
5966                    DAG.getTargetConstant(HighShift, dl, MVT::i8));
5967
5968  // Now OR all 3 pieces together.
5969  Vec = DAG.getNode(ISD::OR, dl, WideOpVT, Low, High);
5970  SubVec = DAG.getNode(ISD::OR, dl, WideOpVT, SubVec, Vec);
5971
5972  // Reduce to original width if needed.
5973  return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
5974}
5975
5976static SDValue concatSubVectors(SDValue V1, SDValue V2, SelectionDAG &DAG,
5977                                const SDLoc &dl) {
5978  assert(V1.getValueType() == V2.getValueType() && "subvector type mismatch");
5979  EVT SubVT = V1.getValueType();
5980  EVT SubSVT = SubVT.getScalarType();
5981  unsigned SubNumElts = SubVT.getVectorNumElements();
5982  unsigned SubVectorWidth = SubVT.getSizeInBits();
5983  EVT VT = EVT::getVectorVT(*DAG.getContext(), SubSVT, 2 * SubNumElts);
5984  SDValue V = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, dl, SubVectorWidth);
5985  return insertSubVector(V, V2, SubNumElts, DAG, dl, SubVectorWidth);
5986}
5987
5988/// Returns a vector of specified type with all bits set.
5989/// Always build ones vectors as <4 x i32>, <8 x i32> or <16 x i32>.
5990/// Then bitcast to their original type, ensuring they get CSE'd.
5991static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
5992  assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
5993         "Expected a 128/256/512-bit vector type");
5994
5995  APInt Ones = APInt::getAllOnesValue(32);
5996  unsigned NumElts = VT.getSizeInBits() / 32;
5997  SDValue Vec = DAG.getConstant(Ones, dl, MVT::getVectorVT(MVT::i32, NumElts));
5998  return DAG.getBitcast(VT, Vec);
5999}
6000
6001// Convert *_EXTEND to *_EXTEND_VECTOR_INREG opcode.
6002static unsigned getOpcode_EXTEND_VECTOR_INREG(unsigned Opcode) {
6003  switch (Opcode) {
6004  case ISD::ANY_EXTEND:
6005  case ISD::ANY_EXTEND_VECTOR_INREG:
6006    return ISD::ANY_EXTEND_VECTOR_INREG;
6007  case ISD::ZERO_EXTEND:
6008  case ISD::ZERO_EXTEND_VECTOR_INREG:
6009    return ISD::ZERO_EXTEND_VECTOR_INREG;
6010  case ISD::SIGN_EXTEND:
6011  case ISD::SIGN_EXTEND_VECTOR_INREG:
6012    return ISD::SIGN_EXTEND_VECTOR_INREG;
6013  }
6014  llvm_unreachable("Unknown opcode");
6015}
6016
6017static SDValue getExtendInVec(unsigned Opcode, const SDLoc &DL, EVT VT,
6018                              SDValue In, SelectionDAG &DAG) {
6019  EVT InVT = In.getValueType();
6020  assert(VT.isVector() && InVT.isVector() && "Expected vector VTs.");
6021  assert((ISD::ANY_EXTEND == Opcode || ISD::SIGN_EXTEND == Opcode ||
6022          ISD::ZERO_EXTEND == Opcode) &&
6023         "Unknown extension opcode");
6024
6025  // For 256-bit vectors, we only need the lower (128-bit) input half.
6026  // For 512-bit vectors, we only need the lower input half or quarter.
6027  if (InVT.getSizeInBits() > 128) {
6028    assert(VT.getSizeInBits() == InVT.getSizeInBits() &&
6029           "Expected VTs to be the same size!");
6030    unsigned Scale = VT.getScalarSizeInBits() / InVT.getScalarSizeInBits();
6031    In = extractSubVector(In, 0, DAG, DL,
6032                          std::max(128U, (unsigned)VT.getSizeInBits() / Scale));
6033    InVT = In.getValueType();
6034  }
6035
6036  if (VT.getVectorNumElements() != InVT.getVectorNumElements())
6037    Opcode = getOpcode_EXTEND_VECTOR_INREG(Opcode);
6038
6039  return DAG.getNode(Opcode, DL, VT, In);
6040}
6041
6042// Match (xor X, -1) -> X.
6043// Match extract_subvector(xor X, -1) -> extract_subvector(X).
6044// Match concat_vectors(xor X, -1, xor Y, -1) -> concat_vectors(X, Y).
6045static SDValue IsNOT(SDValue V, SelectionDAG &DAG) {
6046  V = peekThroughBitcasts(V);
6047  if (V.getOpcode() == ISD::XOR &&
6048      ISD::isBuildVectorAllOnes(V.getOperand(1).getNode()))
6049    return V.getOperand(0);
6050  if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
6051      (isNullConstant(V.getOperand(1)) || V.getOperand(0).hasOneUse())) {
6052    if (SDValue Not = IsNOT(V.getOperand(0), DAG)) {
6053      Not = DAG.getBitcast(V.getOperand(0).getValueType(), Not);
6054      return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Not), V.getValueType(),
6055                         Not, V.getOperand(1));
6056    }
6057  }
6058  SmallVector<SDValue, 2> CatOps;
6059  if (collectConcatOps(V.getNode(), CatOps)) {
6060    for (SDValue &CatOp : CatOps) {
6061      SDValue NotCat = IsNOT(CatOp, DAG);
6062      if (!NotCat) return SDValue();
6063      CatOp = DAG.getBitcast(CatOp.getValueType(), NotCat);
6064    }
6065    return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(V), V.getValueType(), CatOps);
6066  }
6067  return SDValue();
6068}
6069
6070/// Returns a vector_shuffle node for an unpackl operation.
6071static SDValue getUnpackl(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
6072                          SDValue V1, SDValue V2) {
6073  SmallVector<int, 8> Mask;
6074  createUnpackShuffleMask(VT, Mask, /* Lo = */ true, /* Unary = */ false);
6075  return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
6076}
6077
6078/// Returns a vector_shuffle node for an unpackh operation.
6079static SDValue getUnpackh(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
6080                          SDValue V1, SDValue V2) {
6081  SmallVector<int, 8> Mask;
6082  createUnpackShuffleMask(VT, Mask, /* Lo = */ false, /* Unary = */ false);
6083  return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
6084}
6085
6086/// Return a vector_shuffle of the specified vector of zero or undef vector.
6087/// This produces a shuffle where the low element of V2 is swizzled into the
6088/// zero/undef vector, landing at element Idx.
6089/// This produces a shuffle mask like 4,1,2,3 (idx=0) or  0,1,2,4 (idx=3).
6090static SDValue getShuffleVectorZeroOrUndef(SDValue V2, int Idx,
6091                                           bool IsZero,
6092                                           const X86Subtarget &Subtarget,
6093                                           SelectionDAG &DAG) {
6094  MVT VT = V2.getSimpleValueType();
6095  SDValue V1 = IsZero
6096    ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
6097  int NumElems = VT.getVectorNumElements();
6098  SmallVector<int, 16> MaskVec(NumElems);
6099  for (int i = 0; i != NumElems; ++i)
6100    // If this is the insertion idx, put the low elt of V2 here.
6101    MaskVec[i] = (i == Idx) ? NumElems : i;
6102  return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, MaskVec);
6103}
6104
6105static const Constant *getTargetConstantFromNode(LoadSDNode *Load) {
6106  if (!Load || !ISD::isNormalLoad(Load))
6107    return nullptr;
6108
6109  SDValue Ptr = Load->getBasePtr();
6110  if (Ptr->getOpcode() == X86ISD::Wrapper ||
6111      Ptr->getOpcode() == X86ISD::WrapperRIP)
6112    Ptr = Ptr->getOperand(0);
6113
6114  auto *CNode = dyn_cast<ConstantPoolSDNode>(Ptr);
6115  if (!CNode || CNode->isMachineConstantPoolEntry() || CNode->getOffset() != 0)
6116    return nullptr;
6117
6118  return CNode->getConstVal();
6119}
6120
6121static const Constant *getTargetConstantFromNode(SDValue Op) {
6122  Op = peekThroughBitcasts(Op);
6123  return getTargetConstantFromNode(dyn_cast<LoadSDNode>(Op));
6124}
6125
6126const Constant *
6127X86TargetLowering::getTargetConstantFromLoad(LoadSDNode *LD) const {
6128  assert(LD && "Unexpected null LoadSDNode");
6129  return getTargetConstantFromNode(LD);
6130}
6131
6132// Extract raw constant bits from constant pools.
6133static bool getTargetConstantBitsFromNode(SDValue Op, unsigned EltSizeInBits,
6134                                          APInt &UndefElts,
6135                                          SmallVectorImpl<APInt> &EltBits,
6136                                          bool AllowWholeUndefs = true,
6137                                          bool AllowPartialUndefs = true) {
6138  assert(EltBits.empty() && "Expected an empty EltBits vector");
6139
6140  Op = peekThroughBitcasts(Op);
6141
6142  EVT VT = Op.getValueType();
6143  unsigned SizeInBits = VT.getSizeInBits();
6144  assert((SizeInBits % EltSizeInBits) == 0 && "Can't split constant!");
6145  unsigned NumElts = SizeInBits / EltSizeInBits;
6146
6147  // Bitcast a source array of element bits to the target size.
6148  auto CastBitData = [&](APInt &UndefSrcElts, ArrayRef<APInt> SrcEltBits) {
6149    unsigned NumSrcElts = UndefSrcElts.getBitWidth();
6150    unsigned SrcEltSizeInBits = SrcEltBits[0].getBitWidth();
6151    assert((NumSrcElts * SrcEltSizeInBits) == SizeInBits &&
6152           "Constant bit sizes don't match");
6153
6154    // Don't split if we don't allow undef bits.
6155    bool AllowUndefs = AllowWholeUndefs || AllowPartialUndefs;
6156    if (UndefSrcElts.getBoolValue() && !AllowUndefs)
6157      return false;
6158
6159    // If we're already the right size, don't bother bitcasting.
6160    if (NumSrcElts == NumElts) {
6161      UndefElts = UndefSrcElts;
6162      EltBits.assign(SrcEltBits.begin(), SrcEltBits.end());
6163      return true;
6164    }
6165
6166    // Extract all the undef/constant element data and pack into single bitsets.
6167    APInt UndefBits(SizeInBits, 0);
6168    APInt MaskBits(SizeInBits, 0);
6169
6170    for (unsigned i = 0; i != NumSrcElts; ++i) {
6171      unsigned BitOffset = i * SrcEltSizeInBits;
6172      if (UndefSrcElts[i])
6173        UndefBits.setBits(BitOffset, BitOffset + SrcEltSizeInBits);
6174      MaskBits.insertBits(SrcEltBits[i], BitOffset);
6175    }
6176
6177    // Split the undef/constant single bitset data into the target elements.
6178    UndefElts = APInt(NumElts, 0);
6179    EltBits.resize(NumElts, APInt(EltSizeInBits, 0));
6180
6181    for (unsigned i = 0; i != NumElts; ++i) {
6182      unsigned BitOffset = i * EltSizeInBits;
6183      APInt UndefEltBits = UndefBits.extractBits(EltSizeInBits, BitOffset);
6184
6185      // Only treat an element as UNDEF if all bits are UNDEF.
6186      if (UndefEltBits.isAllOnesValue()) {
6187        if (!AllowWholeUndefs)
6188          return false;
6189        UndefElts.setBit(i);
6190        continue;
6191      }
6192
6193      // If only some bits are UNDEF then treat them as zero (or bail if not
6194      // supported).
6195      if (UndefEltBits.getBoolValue() && !AllowPartialUndefs)
6196        return false;
6197
6198      EltBits[i] = MaskBits.extractBits(EltSizeInBits, BitOffset);
6199    }
6200    return true;
6201  };
6202
6203  // Collect constant bits and insert into mask/undef bit masks.
6204  auto CollectConstantBits = [](const Constant *Cst, APInt &Mask, APInt &Undefs,
6205                                unsigned UndefBitIndex) {
6206    if (!Cst)
6207      return false;
6208    if (isa<UndefValue>(Cst)) {
6209      Undefs.setBit(UndefBitIndex);
6210      return true;
6211    }
6212    if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
6213      Mask = CInt->getValue();
6214      return true;
6215    }
6216    if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
6217      Mask = CFP->getValueAPF().bitcastToAPInt();
6218      return true;
6219    }
6220    return false;
6221  };
6222
6223  // Handle UNDEFs.
6224  if (Op.isUndef()) {
6225    APInt UndefSrcElts = APInt::getAllOnesValue(NumElts);
6226    SmallVector<APInt, 64> SrcEltBits(NumElts, APInt(EltSizeInBits, 0));
6227    return CastBitData(UndefSrcElts, SrcEltBits);
6228  }
6229
6230  // Extract scalar constant bits.
6231  if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) {
6232    APInt UndefSrcElts = APInt::getNullValue(1);
6233    SmallVector<APInt, 64> SrcEltBits(1, Cst->getAPIntValue());
6234    return CastBitData(UndefSrcElts, SrcEltBits);
6235  }
6236  if (auto *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
6237    APInt UndefSrcElts = APInt::getNullValue(1);
6238    APInt RawBits = Cst->getValueAPF().bitcastToAPInt();
6239    SmallVector<APInt, 64> SrcEltBits(1, RawBits);
6240    return CastBitData(UndefSrcElts, SrcEltBits);
6241  }
6242
6243  // Extract constant bits from build vector.
6244  if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
6245    unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
6246    unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6247
6248    APInt UndefSrcElts(NumSrcElts, 0);
6249    SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
6250    for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
6251      const SDValue &Src = Op.getOperand(i);
6252      if (Src.isUndef()) {
6253        UndefSrcElts.setBit(i);
6254        continue;
6255      }
6256      auto *Cst = cast<ConstantSDNode>(Src);
6257      SrcEltBits[i] = Cst->getAPIntValue().zextOrTrunc(SrcEltSizeInBits);
6258    }
6259    return CastBitData(UndefSrcElts, SrcEltBits);
6260  }
6261  if (ISD::isBuildVectorOfConstantFPSDNodes(Op.getNode())) {
6262    unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
6263    unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6264
6265    APInt UndefSrcElts(NumSrcElts, 0);
6266    SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
6267    for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
6268      const SDValue &Src = Op.getOperand(i);
6269      if (Src.isUndef()) {
6270        UndefSrcElts.setBit(i);
6271        continue;
6272      }
6273      auto *Cst = cast<ConstantFPSDNode>(Src);
6274      APInt RawBits = Cst->getValueAPF().bitcastToAPInt();
6275      SrcEltBits[i] = RawBits.zextOrTrunc(SrcEltSizeInBits);
6276    }
6277    return CastBitData(UndefSrcElts, SrcEltBits);
6278  }
6279
6280  // Extract constant bits from constant pool vector.
6281  if (auto *Cst = getTargetConstantFromNode(Op)) {
6282    Type *CstTy = Cst->getType();
6283    unsigned CstSizeInBits = CstTy->getPrimitiveSizeInBits();
6284    if (!CstTy->isVectorTy() || (CstSizeInBits % SizeInBits) != 0)
6285      return false;
6286
6287    unsigned SrcEltSizeInBits = CstTy->getScalarSizeInBits();
6288    unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6289
6290    APInt UndefSrcElts(NumSrcElts, 0);
6291    SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
6292    for (unsigned i = 0; i != NumSrcElts; ++i)
6293      if (!CollectConstantBits(Cst->getAggregateElement(i), SrcEltBits[i],
6294                               UndefSrcElts, i))
6295        return false;
6296
6297    return CastBitData(UndefSrcElts, SrcEltBits);
6298  }
6299
6300  // Extract constant bits from a broadcasted constant pool scalar.
6301  if (Op.getOpcode() == X86ISD::VBROADCAST &&
6302      EltSizeInBits <= VT.getScalarSizeInBits()) {
6303    if (auto *Broadcast = getTargetConstantFromNode(Op.getOperand(0))) {
6304      unsigned SrcEltSizeInBits = Broadcast->getType()->getScalarSizeInBits();
6305      unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6306
6307      APInt UndefSrcElts(NumSrcElts, 0);
6308      SmallVector<APInt, 64> SrcEltBits(1, APInt(SrcEltSizeInBits, 0));
6309      if (CollectConstantBits(Broadcast, SrcEltBits[0], UndefSrcElts, 0)) {
6310        if (UndefSrcElts[0])
6311          UndefSrcElts.setBits(0, NumSrcElts);
6312        SrcEltBits.append(NumSrcElts - 1, SrcEltBits[0]);
6313        return CastBitData(UndefSrcElts, SrcEltBits);
6314      }
6315    }
6316  }
6317
6318  if (Op.getOpcode() == X86ISD::VBROADCAST_LOAD &&
6319      EltSizeInBits <= VT.getScalarSizeInBits()) {
6320    auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
6321    if (MemIntr->getMemoryVT().getScalarSizeInBits() != VT.getScalarSizeInBits())
6322      return false;
6323
6324    SDValue Ptr = MemIntr->getBasePtr();
6325    if (Ptr->getOpcode() == X86ISD::Wrapper ||
6326        Ptr->getOpcode() == X86ISD::WrapperRIP)
6327      Ptr = Ptr->getOperand(0);
6328
6329    auto *CNode = dyn_cast<ConstantPoolSDNode>(Ptr);
6330    if (!CNode || CNode->isMachineConstantPoolEntry() ||
6331        CNode->getOffset() != 0)
6332      return false;
6333
6334    if (const Constant *C = CNode->getConstVal()) {
6335      unsigned SrcEltSizeInBits = C->getType()->getScalarSizeInBits();
6336      unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6337
6338      APInt UndefSrcElts(NumSrcElts, 0);
6339      SmallVector<APInt, 64> SrcEltBits(1, APInt(SrcEltSizeInBits, 0));
6340      if (CollectConstantBits(C, SrcEltBits[0], UndefSrcElts, 0)) {
6341        if (UndefSrcElts[0])
6342          UndefSrcElts.setBits(0, NumSrcElts);
6343        SrcEltBits.append(NumSrcElts - 1, SrcEltBits[0]);
6344        return CastBitData(UndefSrcElts, SrcEltBits);
6345      }
6346    }
6347  }
6348
6349  // Extract constant bits from a subvector broadcast.
6350  if (Op.getOpcode() == X86ISD::SUBV_BROADCAST) {
6351    SmallVector<APInt, 16> SubEltBits;
6352    if (getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6353                                      UndefElts, SubEltBits, AllowWholeUndefs,
6354                                      AllowPartialUndefs)) {
6355      UndefElts = APInt::getSplat(NumElts, UndefElts);
6356      while (EltBits.size() < NumElts)
6357        EltBits.append(SubEltBits.begin(), SubEltBits.end());
6358      return true;
6359    }
6360  }
6361
6362  // Extract a rematerialized scalar constant insertion.
6363  if (Op.getOpcode() == X86ISD::VZEXT_MOVL &&
6364      Op.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
6365      isa<ConstantSDNode>(Op.getOperand(0).getOperand(0))) {
6366    unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
6367    unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6368
6369    APInt UndefSrcElts(NumSrcElts, 0);
6370    SmallVector<APInt, 64> SrcEltBits;
6371    auto *CN = cast<ConstantSDNode>(Op.getOperand(0).getOperand(0));
6372    SrcEltBits.push_back(CN->getAPIntValue().zextOrTrunc(SrcEltSizeInBits));
6373    SrcEltBits.append(NumSrcElts - 1, APInt(SrcEltSizeInBits, 0));
6374    return CastBitData(UndefSrcElts, SrcEltBits);
6375  }
6376
6377  // Insert constant bits from a base and sub vector sources.
6378  if (Op.getOpcode() == ISD::INSERT_SUBVECTOR &&
6379      isa<ConstantSDNode>(Op.getOperand(2))) {
6380    // TODO - support insert_subvector through bitcasts.
6381    if (EltSizeInBits != VT.getScalarSizeInBits())
6382      return false;
6383
6384    APInt UndefSubElts;
6385    SmallVector<APInt, 32> EltSubBits;
6386    if (getTargetConstantBitsFromNode(Op.getOperand(1), EltSizeInBits,
6387                                      UndefSubElts, EltSubBits,
6388                                      AllowWholeUndefs, AllowPartialUndefs) &&
6389        getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6390                                      UndefElts, EltBits, AllowWholeUndefs,
6391                                      AllowPartialUndefs)) {
6392      unsigned BaseIdx = Op.getConstantOperandVal(2);
6393      UndefElts.insertBits(UndefSubElts, BaseIdx);
6394      for (unsigned i = 0, e = EltSubBits.size(); i != e; ++i)
6395        EltBits[BaseIdx + i] = EltSubBits[i];
6396      return true;
6397    }
6398  }
6399
6400  // Extract constant bits from a subvector's source.
6401  if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
6402      isa<ConstantSDNode>(Op.getOperand(1))) {
6403    // TODO - support extract_subvector through bitcasts.
6404    if (EltSizeInBits != VT.getScalarSizeInBits())
6405      return false;
6406
6407    if (getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6408                                      UndefElts, EltBits, AllowWholeUndefs,
6409                                      AllowPartialUndefs)) {
6410      EVT SrcVT = Op.getOperand(0).getValueType();
6411      unsigned NumSrcElts = SrcVT.getVectorNumElements();
6412      unsigned NumSubElts = VT.getVectorNumElements();
6413      unsigned BaseIdx = Op.getConstantOperandVal(1);
6414      UndefElts = UndefElts.extractBits(NumSubElts, BaseIdx);
6415      if ((BaseIdx + NumSubElts) != NumSrcElts)
6416        EltBits.erase(EltBits.begin() + BaseIdx + NumSubElts, EltBits.end());
6417      if (BaseIdx != 0)
6418        EltBits.erase(EltBits.begin(), EltBits.begin() + BaseIdx);
6419      return true;
6420    }
6421  }
6422
6423  // Extract constant bits from shuffle node sources.
6424  if (auto *SVN = dyn_cast<ShuffleVectorSDNode>(Op)) {
6425    // TODO - support shuffle through bitcasts.
6426    if (EltSizeInBits != VT.getScalarSizeInBits())
6427      return false;
6428
6429    ArrayRef<int> Mask = SVN->getMask();
6430    if ((!AllowWholeUndefs || !AllowPartialUndefs) &&
6431        llvm::any_of(Mask, [](int M) { return M < 0; }))
6432      return false;
6433
6434    APInt UndefElts0, UndefElts1;
6435    SmallVector<APInt, 32> EltBits0, EltBits1;
6436    if (isAnyInRange(Mask, 0, NumElts) &&
6437        !getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6438                                       UndefElts0, EltBits0, AllowWholeUndefs,
6439                                       AllowPartialUndefs))
6440      return false;
6441    if (isAnyInRange(Mask, NumElts, 2 * NumElts) &&
6442        !getTargetConstantBitsFromNode(Op.getOperand(1), EltSizeInBits,
6443                                       UndefElts1, EltBits1, AllowWholeUndefs,
6444                                       AllowPartialUndefs))
6445      return false;
6446
6447    UndefElts = APInt::getNullValue(NumElts);
6448    for (int i = 0; i != (int)NumElts; ++i) {
6449      int M = Mask[i];
6450      if (M < 0) {
6451        UndefElts.setBit(i);
6452        EltBits.push_back(APInt::getNullValue(EltSizeInBits));
6453      } else if (M < (int)NumElts) {
6454        if (UndefElts0[M])
6455          UndefElts.setBit(i);
6456        EltBits.push_back(EltBits0[M]);
6457      } else {
6458        if (UndefElts1[M - NumElts])
6459          UndefElts.setBit(i);
6460        EltBits.push_back(EltBits1[M - NumElts]);
6461      }
6462    }
6463    return true;
6464  }
6465
6466  return false;
6467}
6468
6469namespace llvm {
6470namespace X86 {
6471bool isConstantSplat(SDValue Op, APInt &SplatVal) {
6472  APInt UndefElts;
6473  SmallVector<APInt, 16> EltBits;
6474  if (getTargetConstantBitsFromNode(Op, Op.getScalarValueSizeInBits(),
6475                                    UndefElts, EltBits, true, false)) {
6476    int SplatIndex = -1;
6477    for (int i = 0, e = EltBits.size(); i != e; ++i) {
6478      if (UndefElts[i])
6479        continue;
6480      if (0 <= SplatIndex && EltBits[i] != EltBits[SplatIndex]) {
6481        SplatIndex = -1;
6482        break;
6483      }
6484      SplatIndex = i;
6485    }
6486    if (0 <= SplatIndex) {
6487      SplatVal = EltBits[SplatIndex];
6488      return true;
6489    }
6490  }
6491
6492  return false;
6493}
6494} // namespace X86
6495} // namespace llvm
6496
6497static bool getTargetShuffleMaskIndices(SDValue MaskNode,
6498                                        unsigned MaskEltSizeInBits,
6499                                        SmallVectorImpl<uint64_t> &RawMask,
6500                                        APInt &UndefElts) {
6501  // Extract the raw target constant bits.
6502  SmallVector<APInt, 64> EltBits;
6503  if (!getTargetConstantBitsFromNode(MaskNode, MaskEltSizeInBits, UndefElts,
6504                                     EltBits, /* AllowWholeUndefs */ true,
6505                                     /* AllowPartialUndefs */ false))
6506    return false;
6507
6508  // Insert the extracted elements into the mask.
6509  for (APInt Elt : EltBits)
6510    RawMask.push_back(Elt.getZExtValue());
6511
6512  return true;
6513}
6514
6515/// Create a shuffle mask that matches the PACKSS/PACKUS truncation.
6516/// Note: This ignores saturation, so inputs must be checked first.
6517static void createPackShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
6518                                  bool Unary) {
6519  assert(Mask.empty() && "Expected an empty shuffle mask vector");
6520  unsigned NumElts = VT.getVectorNumElements();
6521  unsigned NumLanes = VT.getSizeInBits() / 128;
6522  unsigned NumEltsPerLane = 128 / VT.getScalarSizeInBits();
6523  unsigned Offset = Unary ? 0 : NumElts;
6524
6525  for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
6526    for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += 2)
6527      Mask.push_back(Elt + (Lane * NumEltsPerLane));
6528    for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += 2)
6529      Mask.push_back(Elt + (Lane * NumEltsPerLane) + Offset);
6530  }
6531}
6532
6533// Split the demanded elts of a PACKSS/PACKUS node between its operands.
6534static void getPackDemandedElts(EVT VT, const APInt &DemandedElts,
6535                                APInt &DemandedLHS, APInt &DemandedRHS) {
6536  int NumLanes = VT.getSizeInBits() / 128;
6537  int NumElts = DemandedElts.getBitWidth();
6538  int NumInnerElts = NumElts / 2;
6539  int NumEltsPerLane = NumElts / NumLanes;
6540  int NumInnerEltsPerLane = NumInnerElts / NumLanes;
6541
6542  DemandedLHS = APInt::getNullValue(NumInnerElts);
6543  DemandedRHS = APInt::getNullValue(NumInnerElts);
6544
6545  // Map DemandedElts to the packed operands.
6546  for (int Lane = 0; Lane != NumLanes; ++Lane) {
6547    for (int Elt = 0; Elt != NumInnerEltsPerLane; ++Elt) {
6548      int OuterIdx = (Lane * NumEltsPerLane) + Elt;
6549      int InnerIdx = (Lane * NumInnerEltsPerLane) + Elt;
6550      if (DemandedElts[OuterIdx])
6551        DemandedLHS.setBit(InnerIdx);
6552      if (DemandedElts[OuterIdx + NumInnerEltsPerLane])
6553        DemandedRHS.setBit(InnerIdx);
6554    }
6555  }
6556}
6557
6558// Split the demanded elts of a HADD/HSUB node between its operands.
6559static void getHorizDemandedElts(EVT VT, const APInt &DemandedElts,
6560                                 APInt &DemandedLHS, APInt &DemandedRHS) {
6561  int NumLanes = VT.getSizeInBits() / 128;
6562  int NumElts = DemandedElts.getBitWidth();
6563  int NumEltsPerLane = NumElts / NumLanes;
6564  int HalfEltsPerLane = NumEltsPerLane / 2;
6565
6566  DemandedLHS = APInt::getNullValue(NumElts);
6567  DemandedRHS = APInt::getNullValue(NumElts);
6568
6569  // Map DemandedElts to the horizontal operands.
6570  for (int Idx = 0; Idx != NumElts; ++Idx) {
6571    if (!DemandedElts[Idx])
6572      continue;
6573    int LaneIdx = (Idx / NumEltsPerLane) * NumEltsPerLane;
6574    int LocalIdx = Idx % NumEltsPerLane;
6575    if (LocalIdx < HalfEltsPerLane) {
6576      DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 0);
6577      DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 1);
6578    } else {
6579      LocalIdx -= HalfEltsPerLane;
6580      DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 0);
6581      DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 1);
6582    }
6583  }
6584}
6585
6586/// Calculates the shuffle mask corresponding to the target-specific opcode.
6587/// If the mask could be calculated, returns it in \p Mask, returns the shuffle
6588/// operands in \p Ops, and returns true.
6589/// Sets \p IsUnary to true if only one source is used. Note that this will set
6590/// IsUnary for shuffles which use a single input multiple times, and in those
6591/// cases it will adjust the mask to only have indices within that single input.
6592/// It is an error to call this with non-empty Mask/Ops vectors.
6593static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
6594                                 SmallVectorImpl<SDValue> &Ops,
6595                                 SmallVectorImpl<int> &Mask, bool &IsUnary) {
6596  unsigned NumElems = VT.getVectorNumElements();
6597  unsigned MaskEltSize = VT.getScalarSizeInBits();
6598  SmallVector<uint64_t, 32> RawMask;
6599  APInt RawUndefs;
6600  SDValue ImmN;
6601
6602  assert(Mask.empty() && "getTargetShuffleMask expects an empty Mask vector");
6603  assert(Ops.empty() && "getTargetShuffleMask expects an empty Ops vector");
6604
6605  IsUnary = false;
6606  bool IsFakeUnary = false;
6607  switch (N->getOpcode()) {
6608  case X86ISD::BLENDI:
6609    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6610    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6611    ImmN = N->getOperand(N->getNumOperands() - 1);
6612    DecodeBLENDMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6613    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6614    break;
6615  case X86ISD::SHUFP:
6616    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6617    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6618    ImmN = N->getOperand(N->getNumOperands() - 1);
6619    DecodeSHUFPMask(NumElems, MaskEltSize,
6620                    cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6621    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6622    break;
6623  case X86ISD::INSERTPS:
6624    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6625    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6626    ImmN = N->getOperand(N->getNumOperands() - 1);
6627    DecodeINSERTPSMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6628    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6629    break;
6630  case X86ISD::EXTRQI:
6631    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6632    if (isa<ConstantSDNode>(N->getOperand(1)) &&
6633        isa<ConstantSDNode>(N->getOperand(2))) {
6634      int BitLen = N->getConstantOperandVal(1);
6635      int BitIdx = N->getConstantOperandVal(2);
6636      DecodeEXTRQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
6637      IsUnary = true;
6638    }
6639    break;
6640  case X86ISD::INSERTQI:
6641    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6642    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6643    if (isa<ConstantSDNode>(N->getOperand(2)) &&
6644        isa<ConstantSDNode>(N->getOperand(3))) {
6645      int BitLen = N->getConstantOperandVal(2);
6646      int BitIdx = N->getConstantOperandVal(3);
6647      DecodeINSERTQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
6648      IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6649    }
6650    break;
6651  case X86ISD::UNPCKH:
6652    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6653    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6654    DecodeUNPCKHMask(NumElems, MaskEltSize, Mask);
6655    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6656    break;
6657  case X86ISD::UNPCKL:
6658    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6659    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6660    DecodeUNPCKLMask(NumElems, MaskEltSize, Mask);
6661    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6662    break;
6663  case X86ISD::MOVHLPS:
6664    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6665    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6666    DecodeMOVHLPSMask(NumElems, Mask);
6667    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6668    break;
6669  case X86ISD::MOVLHPS:
6670    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6671    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6672    DecodeMOVLHPSMask(NumElems, Mask);
6673    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6674    break;
6675  case X86ISD::PALIGNR:
6676    assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
6677    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6678    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6679    ImmN = N->getOperand(N->getNumOperands() - 1);
6680    DecodePALIGNRMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6681                      Mask);
6682    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6683    Ops.push_back(N->getOperand(1));
6684    Ops.push_back(N->getOperand(0));
6685    break;
6686  case X86ISD::VSHLDQ:
6687    assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
6688    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6689    ImmN = N->getOperand(N->getNumOperands() - 1);
6690    DecodePSLLDQMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6691                     Mask);
6692    IsUnary = true;
6693    break;
6694  case X86ISD::VSRLDQ:
6695    assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
6696    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6697    ImmN = N->getOperand(N->getNumOperands() - 1);
6698    DecodePSRLDQMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6699                     Mask);
6700    IsUnary = true;
6701    break;
6702  case X86ISD::PSHUFD:
6703  case X86ISD::VPERMILPI:
6704    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6705    ImmN = N->getOperand(N->getNumOperands() - 1);
6706    DecodePSHUFMask(NumElems, MaskEltSize,
6707                    cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6708    IsUnary = true;
6709    break;
6710  case X86ISD::PSHUFHW:
6711    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6712    ImmN = N->getOperand(N->getNumOperands() - 1);
6713    DecodePSHUFHWMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6714                      Mask);
6715    IsUnary = true;
6716    break;
6717  case X86ISD::PSHUFLW:
6718    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6719    ImmN = N->getOperand(N->getNumOperands() - 1);
6720    DecodePSHUFLWMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6721                      Mask);
6722    IsUnary = true;
6723    break;
6724  case X86ISD::VZEXT_MOVL:
6725    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6726    DecodeZeroMoveLowMask(NumElems, Mask);
6727    IsUnary = true;
6728    break;
6729  case X86ISD::VBROADCAST: {
6730    SDValue N0 = N->getOperand(0);
6731    // See if we're broadcasting from index 0 of an EXTRACT_SUBVECTOR. If so,
6732    // add the pre-extracted value to the Ops vector.
6733    if (N0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
6734        N0.getOperand(0).getValueType() == VT &&
6735        N0.getConstantOperandVal(1) == 0)
6736      Ops.push_back(N0.getOperand(0));
6737
6738    // We only decode broadcasts of same-sized vectors, unless the broadcast
6739    // came from an extract from the original width. If we found one, we
6740    // pushed it the Ops vector above.
6741    if (N0.getValueType() == VT || !Ops.empty()) {
6742      DecodeVectorBroadcast(NumElems, Mask);
6743      IsUnary = true;
6744      break;
6745    }
6746    return false;
6747  }
6748  case X86ISD::VPERMILPV: {
6749    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6750    IsUnary = true;
6751    SDValue MaskNode = N->getOperand(1);
6752    if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
6753                                    RawUndefs)) {
6754      DecodeVPERMILPMask(NumElems, MaskEltSize, RawMask, RawUndefs, Mask);
6755      break;
6756    }
6757    return false;
6758  }
6759  case X86ISD::PSHUFB: {
6760    assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
6761    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6762    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6763    IsUnary = true;
6764    SDValue MaskNode = N->getOperand(1);
6765    if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
6766      DecodePSHUFBMask(RawMask, RawUndefs, Mask);
6767      break;
6768    }
6769    return false;
6770  }
6771  case X86ISD::VPERMI:
6772    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6773    ImmN = N->getOperand(N->getNumOperands() - 1);
6774    DecodeVPERMMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6775    IsUnary = true;
6776    break;
6777  case X86ISD::MOVSS:
6778  case X86ISD::MOVSD:
6779    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6780    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6781    DecodeScalarMoveMask(NumElems, /* IsLoad */ false, Mask);
6782    break;
6783  case X86ISD::VPERM2X128:
6784    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6785    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6786    ImmN = N->getOperand(N->getNumOperands() - 1);
6787    DecodeVPERM2X128Mask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6788                         Mask);
6789    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6790    break;
6791  case X86ISD::SHUF128:
6792    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6793    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6794    ImmN = N->getOperand(N->getNumOperands() - 1);
6795    decodeVSHUF64x2FamilyMask(NumElems, MaskEltSize,
6796                              cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6797    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6798    break;
6799  case X86ISD::MOVSLDUP:
6800    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6801    DecodeMOVSLDUPMask(NumElems, Mask);
6802    IsUnary = true;
6803    break;
6804  case X86ISD::MOVSHDUP:
6805    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6806    DecodeMOVSHDUPMask(NumElems, Mask);
6807    IsUnary = true;
6808    break;
6809  case X86ISD::MOVDDUP:
6810    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6811    DecodeMOVDDUPMask(NumElems, Mask);
6812    IsUnary = true;
6813    break;
6814  case X86ISD::VPERMIL2: {
6815    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6816    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6817    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6818    SDValue MaskNode = N->getOperand(2);
6819    SDValue CtrlNode = N->getOperand(3);
6820    if (ConstantSDNode *CtrlOp = dyn_cast<ConstantSDNode>(CtrlNode)) {
6821      unsigned CtrlImm = CtrlOp->getZExtValue();
6822      if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
6823                                      RawUndefs)) {
6824        DecodeVPERMIL2PMask(NumElems, MaskEltSize, CtrlImm, RawMask, RawUndefs,
6825                            Mask);
6826        break;
6827      }
6828    }
6829    return false;
6830  }
6831  case X86ISD::VPPERM: {
6832    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6833    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6834    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6835    SDValue MaskNode = N->getOperand(2);
6836    if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
6837      DecodeVPPERMMask(RawMask, RawUndefs, Mask);
6838      break;
6839    }
6840    return false;
6841  }
6842  case X86ISD::VPERMV: {
6843    assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6844    IsUnary = true;
6845    // Unlike most shuffle nodes, VPERMV's mask operand is operand 0.
6846    Ops.push_back(N->getOperand(1));
6847    SDValue MaskNode = N->getOperand(0);
6848    if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
6849                                    RawUndefs)) {
6850      DecodeVPERMVMask(RawMask, RawUndefs, Mask);
6851      break;
6852    }
6853    return false;
6854  }
6855  case X86ISD::VPERMV3: {
6856    assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6857    assert(N->getOperand(2).getValueType() == VT && "Unexpected value type");
6858    IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(2);
6859    // Unlike most shuffle nodes, VPERMV3's mask operand is the middle one.
6860    Ops.push_back(N->getOperand(0));
6861    Ops.push_back(N->getOperand(2));
6862    SDValue MaskNode = N->getOperand(1);
6863    if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
6864                                    RawUndefs)) {
6865      DecodeVPERMV3Mask(RawMask, RawUndefs, Mask);
6866      break;
6867    }
6868    return false;
6869  }
6870  default: llvm_unreachable("unknown target shuffle node");
6871  }
6872
6873  // Empty mask indicates the decode failed.
6874  if (Mask.empty())
6875    return false;
6876
6877  // Check if we're getting a shuffle mask with zero'd elements.
6878  if (!AllowSentinelZero)
6879    if (any_of(Mask, [](int M) { return M == SM_SentinelZero; }))
6880      return false;
6881
6882  // If we have a fake unary shuffle, the shuffle mask is spread across two
6883  // inputs that are actually the same node. Re-map the mask to always point
6884  // into the first input.
6885  if (IsFakeUnary)
6886    for (int &M : Mask)
6887      if (M >= (int)Mask.size())
6888        M -= Mask.size();
6889
6890  // If we didn't already add operands in the opcode-specific code, default to
6891  // adding 1 or 2 operands starting at 0.
6892  if (Ops.empty()) {
6893    Ops.push_back(N->getOperand(0));
6894    if (!IsUnary || IsFakeUnary)
6895      Ops.push_back(N->getOperand(1));
6896  }
6897
6898  return true;
6899}
6900
6901/// Compute whether each element of a shuffle is zeroable.
6902///
6903/// A "zeroable" vector shuffle element is one which can be lowered to zero.
6904/// Either it is an undef element in the shuffle mask, the element of the input
6905/// referenced is undef, or the element of the input referenced is known to be
6906/// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
6907/// as many lanes with this technique as possible to simplify the remaining
6908/// shuffle.
6909static void computeZeroableShuffleElements(ArrayRef<int> Mask,
6910                                           SDValue V1, SDValue V2,
6911                                           APInt &KnownUndef, APInt &KnownZero) {
6912  int Size = Mask.size();
6913  KnownUndef = KnownZero = APInt::getNullValue(Size);
6914
6915  V1 = peekThroughBitcasts(V1);
6916  V2 = peekThroughBitcasts(V2);
6917
6918  bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
6919  bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
6920
6921  int VectorSizeInBits = V1.getValueSizeInBits();
6922  int ScalarSizeInBits = VectorSizeInBits / Size;
6923  assert(!(VectorSizeInBits % ScalarSizeInBits) && "Illegal shuffle mask size");
6924
6925  for (int i = 0; i < Size; ++i) {
6926    int M = Mask[i];
6927    // Handle the easy cases.
6928    if (M < 0) {
6929      KnownUndef.setBit(i);
6930      continue;
6931    }
6932    if ((M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
6933      KnownZero.setBit(i);
6934      continue;
6935    }
6936
6937    // Determine shuffle input and normalize the mask.
6938    SDValue V = M < Size ? V1 : V2;
6939    M %= Size;
6940
6941    // Currently we can only search BUILD_VECTOR for UNDEF/ZERO elements.
6942    if (V.getOpcode() != ISD::BUILD_VECTOR)
6943      continue;
6944
6945    // If the BUILD_VECTOR has fewer elements then the bitcasted portion of
6946    // the (larger) source element must be UNDEF/ZERO.
6947    if ((Size % V.getNumOperands()) == 0) {
6948      int Scale = Size / V->getNumOperands();
6949      SDValue Op = V.getOperand(M / Scale);
6950      if (Op.isUndef())
6951        KnownUndef.setBit(i);
6952      if (X86::isZeroNode(Op))
6953        KnownZero.setBit(i);
6954      else if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) {
6955        APInt Val = Cst->getAPIntValue();
6956        Val = Val.extractBits(ScalarSizeInBits, (M % Scale) * ScalarSizeInBits);
6957        if (Val == 0)
6958          KnownZero.setBit(i);
6959      } else if (ConstantFPSDNode *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
6960        APInt Val = Cst->getValueAPF().bitcastToAPInt();
6961        Val = Val.extractBits(ScalarSizeInBits, (M % Scale) * ScalarSizeInBits);
6962        if (Val == 0)
6963          KnownZero.setBit(i);
6964      }
6965      continue;
6966    }
6967
6968    // If the BUILD_VECTOR has more elements then all the (smaller) source
6969    // elements must be UNDEF or ZERO.
6970    if ((V.getNumOperands() % Size) == 0) {
6971      int Scale = V->getNumOperands() / Size;
6972      bool AllUndef = true;
6973      bool AllZero = true;
6974      for (int j = 0; j < Scale; ++j) {
6975        SDValue Op = V.getOperand((M * Scale) + j);
6976        AllUndef &= Op.isUndef();
6977        AllZero &= X86::isZeroNode(Op);
6978      }
6979      if (AllUndef)
6980        KnownUndef.setBit(i);
6981      if (AllZero)
6982        KnownZero.setBit(i);
6983      continue;
6984    }
6985  }
6986}
6987
6988/// Decode a target shuffle mask and inputs and see if any values are
6989/// known to be undef or zero from their inputs.
6990/// Returns true if the target shuffle mask was decoded.
6991/// FIXME: Merge this with computeZeroableShuffleElements?
6992static bool getTargetShuffleAndZeroables(SDValue N, SmallVectorImpl<int> &Mask,
6993                                         SmallVectorImpl<SDValue> &Ops,
6994                                         APInt &KnownUndef, APInt &KnownZero) {
6995  bool IsUnary;
6996  if (!isTargetShuffle(N.getOpcode()))
6997    return false;
6998
6999  MVT VT = N.getSimpleValueType();
7000  if (!getTargetShuffleMask(N.getNode(), VT, true, Ops, Mask, IsUnary))
7001    return false;
7002
7003  int Size = Mask.size();
7004  SDValue V1 = Ops[0];
7005  SDValue V2 = IsUnary ? V1 : Ops[1];
7006  KnownUndef = KnownZero = APInt::getNullValue(Size);
7007
7008  V1 = peekThroughBitcasts(V1);
7009  V2 = peekThroughBitcasts(V2);
7010
7011  assert((VT.getSizeInBits() % Size) == 0 &&
7012         "Illegal split of shuffle value type");
7013  unsigned EltSizeInBits = VT.getSizeInBits() / Size;
7014
7015  // Extract known constant input data.
7016  APInt UndefSrcElts[2];
7017  SmallVector<APInt, 32> SrcEltBits[2];
7018  bool IsSrcConstant[2] = {
7019      getTargetConstantBitsFromNode(V1, EltSizeInBits, UndefSrcElts[0],
7020                                    SrcEltBits[0], true, false),
7021      getTargetConstantBitsFromNode(V2, EltSizeInBits, UndefSrcElts[1],
7022                                    SrcEltBits[1], true, false)};
7023
7024  for (int i = 0; i < Size; ++i) {
7025    int M = Mask[i];
7026
7027    // Already decoded as SM_SentinelZero / SM_SentinelUndef.
7028    if (M < 0) {
7029      assert(isUndefOrZero(M) && "Unknown shuffle sentinel value!");
7030      if (SM_SentinelUndef == M)
7031        KnownUndef.setBit(i);
7032      if (SM_SentinelZero == M)
7033        KnownZero.setBit(i);
7034      continue;
7035    }
7036
7037    // Determine shuffle input and normalize the mask.
7038    unsigned SrcIdx = M / Size;
7039    SDValue V = M < Size ? V1 : V2;
7040    M %= Size;
7041
7042    // We are referencing an UNDEF input.
7043    if (V.isUndef()) {
7044      KnownUndef.setBit(i);
7045      continue;
7046    }
7047
7048    // SCALAR_TO_VECTOR - only the first element is defined, and the rest UNDEF.
7049    // TODO: We currently only set UNDEF for integer types - floats use the same
7050    // registers as vectors and many of the scalar folded loads rely on the
7051    // SCALAR_TO_VECTOR pattern.
7052    if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
7053        (Size % V.getValueType().getVectorNumElements()) == 0) {
7054      int Scale = Size / V.getValueType().getVectorNumElements();
7055      int Idx = M / Scale;
7056      if (Idx != 0 && !VT.isFloatingPoint())
7057        KnownUndef.setBit(i);
7058      else if (Idx == 0 && X86::isZeroNode(V.getOperand(0)))
7059        KnownZero.setBit(i);
7060      continue;
7061    }
7062
7063    // Attempt to extract from the source's constant bits.
7064    if (IsSrcConstant[SrcIdx]) {
7065      if (UndefSrcElts[SrcIdx][M])
7066        KnownUndef.setBit(i);
7067      else if (SrcEltBits[SrcIdx][M] == 0)
7068        KnownZero.setBit(i);
7069    }
7070  }
7071
7072  assert(VT.getVectorNumElements() == (unsigned)Size &&
7073         "Different mask size from vector size!");
7074  return true;
7075}
7076
7077// Replace target shuffle mask elements with known undef/zero sentinels.
7078static void resolveTargetShuffleFromZeroables(SmallVectorImpl<int> &Mask,
7079                                              const APInt &KnownUndef,
7080                                              const APInt &KnownZero,
7081                                              bool ResolveKnownZeros= true) {
7082  unsigned NumElts = Mask.size();
7083  assert(KnownUndef.getBitWidth() == NumElts &&
7084         KnownZero.getBitWidth() == NumElts && "Shuffle mask size mismatch");
7085
7086  for (unsigned i = 0; i != NumElts; ++i) {
7087    if (KnownUndef[i])
7088      Mask[i] = SM_SentinelUndef;
7089    else if (ResolveKnownZeros && KnownZero[i])
7090      Mask[i] = SM_SentinelZero;
7091  }
7092}
7093
7094// Extract target shuffle mask sentinel elements to known undef/zero bitmasks.
7095static void resolveZeroablesFromTargetShuffle(const SmallVectorImpl<int> &Mask,
7096                                              APInt &KnownUndef,
7097                                              APInt &KnownZero) {
7098  unsigned NumElts = Mask.size();
7099  KnownUndef = KnownZero = APInt::getNullValue(NumElts);
7100
7101  for (unsigned i = 0; i != NumElts; ++i) {
7102    int M = Mask[i];
7103    if (SM_SentinelUndef == M)
7104      KnownUndef.setBit(i);
7105    if (SM_SentinelZero == M)
7106      KnownZero.setBit(i);
7107  }
7108}
7109
7110// Forward declaration (for getFauxShuffleMask recursive check).
7111// TODO: Use DemandedElts variant.
7112static bool getTargetShuffleInputs(SDValue Op, SmallVectorImpl<SDValue> &Inputs,
7113                                   SmallVectorImpl<int> &Mask,
7114                                   SelectionDAG &DAG, unsigned Depth,
7115                                   bool ResolveKnownElts);
7116
7117// Attempt to decode ops that could be represented as a shuffle mask.
7118// The decoded shuffle mask may contain a different number of elements to the
7119// destination value type.
7120static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
7121                               SmallVectorImpl<int> &Mask,
7122                               SmallVectorImpl<SDValue> &Ops,
7123                               SelectionDAG &DAG, unsigned Depth,
7124                               bool ResolveKnownElts) {
7125  Mask.clear();
7126  Ops.clear();
7127
7128  MVT VT = N.getSimpleValueType();
7129  unsigned NumElts = VT.getVectorNumElements();
7130  unsigned NumSizeInBits = VT.getSizeInBits();
7131  unsigned NumBitsPerElt = VT.getScalarSizeInBits();
7132  if ((NumBitsPerElt % 8) != 0 || (NumSizeInBits % 8) != 0)
7133    return false;
7134  assert(NumElts == DemandedElts.getBitWidth() && "Unexpected vector size");
7135
7136  unsigned Opcode = N.getOpcode();
7137  switch (Opcode) {
7138  case ISD::VECTOR_SHUFFLE: {
7139    // Don't treat ISD::VECTOR_SHUFFLE as a target shuffle so decode it here.
7140    ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(N)->getMask();
7141    if (isUndefOrInRange(ShuffleMask, 0, 2 * NumElts)) {
7142      Mask.append(ShuffleMask.begin(), ShuffleMask.end());
7143      Ops.push_back(N.getOperand(0));
7144      Ops.push_back(N.getOperand(1));
7145      return true;
7146    }
7147    return false;
7148  }
7149  case ISD::AND:
7150  case X86ISD::ANDNP: {
7151    // Attempt to decode as a per-byte mask.
7152    APInt UndefElts;
7153    SmallVector<APInt, 32> EltBits;
7154    SDValue N0 = N.getOperand(0);
7155    SDValue N1 = N.getOperand(1);
7156    bool IsAndN = (X86ISD::ANDNP == Opcode);
7157    uint64_t ZeroMask = IsAndN ? 255 : 0;
7158    if (!getTargetConstantBitsFromNode(IsAndN ? N0 : N1, 8, UndefElts, EltBits))
7159      return false;
7160    for (int i = 0, e = (int)EltBits.size(); i != e; ++i) {
7161      if (UndefElts[i]) {
7162        Mask.push_back(SM_SentinelUndef);
7163        continue;
7164      }
7165      const APInt &ByteBits = EltBits[i];
7166      if (ByteBits != 0 && ByteBits != 255)
7167        return false;
7168      Mask.push_back(ByteBits == ZeroMask ? SM_SentinelZero : i);
7169    }
7170    Ops.push_back(IsAndN ? N1 : N0);
7171    return true;
7172  }
7173  case ISD::OR: {
7174    // Inspect each operand at the byte level. We can merge these into a
7175    // blend shuffle mask if for each byte at least one is masked out (zero).
7176    KnownBits Known0 =
7177        DAG.computeKnownBits(N.getOperand(0), DemandedElts, Depth + 1);
7178    KnownBits Known1 =
7179        DAG.computeKnownBits(N.getOperand(1), DemandedElts, Depth + 1);
7180    if (Known0.One.isNullValue() && Known1.One.isNullValue()) {
7181      bool IsByteMask = true;
7182      unsigned NumSizeInBytes = NumSizeInBits / 8;
7183      unsigned NumBytesPerElt = NumBitsPerElt / 8;
7184      APInt ZeroMask = APInt::getNullValue(NumBytesPerElt);
7185      APInt SelectMask = APInt::getNullValue(NumBytesPerElt);
7186      for (unsigned i = 0; i != NumBytesPerElt && IsByteMask; ++i) {
7187        unsigned LHS = Known0.Zero.extractBits(8, i * 8).getZExtValue();
7188        unsigned RHS = Known1.Zero.extractBits(8, i * 8).getZExtValue();
7189        if (LHS == 255 && RHS == 0)
7190          SelectMask.setBit(i);
7191        else if (LHS == 255 && RHS == 255)
7192          ZeroMask.setBit(i);
7193        else if (!(LHS == 0 && RHS == 255))
7194          IsByteMask = false;
7195      }
7196      if (IsByteMask) {
7197        for (unsigned i = 0; i != NumSizeInBytes; i += NumBytesPerElt) {
7198          for (unsigned j = 0; j != NumBytesPerElt; ++j) {
7199            unsigned Ofs = (SelectMask[j] ? NumSizeInBytes : 0);
7200            int Idx = (ZeroMask[j] ? (int)SM_SentinelZero : (i + j + Ofs));
7201            Mask.push_back(Idx);
7202          }
7203        }
7204        Ops.push_back(N.getOperand(0));
7205        Ops.push_back(N.getOperand(1));
7206        return true;
7207      }
7208    }
7209
7210    // Handle OR(SHUFFLE,SHUFFLE) case where one source is zero and the other
7211    // is a valid shuffle index.
7212    SDValue N0 = peekThroughOneUseBitcasts(N.getOperand(0));
7213    SDValue N1 = peekThroughOneUseBitcasts(N.getOperand(1));
7214    if (!N0.getValueType().isVector() || !N1.getValueType().isVector())
7215      return false;
7216    SmallVector<int, 64> SrcMask0, SrcMask1;
7217    SmallVector<SDValue, 2> SrcInputs0, SrcInputs1;
7218    if (!getTargetShuffleInputs(N0, SrcInputs0, SrcMask0, DAG, Depth + 1,
7219                                true) ||
7220        !getTargetShuffleInputs(N1, SrcInputs1, SrcMask1, DAG, Depth + 1,
7221                                true))
7222      return false;
7223    size_t MaskSize = std::max(SrcMask0.size(), SrcMask1.size());
7224    SmallVector<int, 64> Mask0, Mask1;
7225    scaleShuffleMask<int>(MaskSize / SrcMask0.size(), SrcMask0, Mask0);
7226    scaleShuffleMask<int>(MaskSize / SrcMask1.size(), SrcMask1, Mask1);
7227    for (size_t i = 0; i != MaskSize; ++i) {
7228      if (Mask0[i] == SM_SentinelUndef && Mask1[i] == SM_SentinelUndef)
7229        Mask.push_back(SM_SentinelUndef);
7230      else if (Mask0[i] == SM_SentinelZero && Mask1[i] == SM_SentinelZero)
7231        Mask.push_back(SM_SentinelZero);
7232      else if (Mask1[i] == SM_SentinelZero)
7233        Mask.push_back(Mask0[i]);
7234      else if (Mask0[i] == SM_SentinelZero)
7235        Mask.push_back(Mask1[i] + (int)(MaskSize * SrcInputs0.size()));
7236      else
7237        return false;
7238    }
7239    Ops.append(SrcInputs0.begin(), SrcInputs0.end());
7240    Ops.append(SrcInputs1.begin(), SrcInputs1.end());
7241    return true;
7242  }
7243  case ISD::INSERT_SUBVECTOR: {
7244    SDValue Src = N.getOperand(0);
7245    SDValue Sub = N.getOperand(1);
7246    EVT SubVT = Sub.getValueType();
7247    unsigned NumSubElts = SubVT.getVectorNumElements();
7248    if (!isa<ConstantSDNode>(N.getOperand(2)) ||
7249        !N->isOnlyUserOf(Sub.getNode()))
7250      return false;
7251    uint64_t InsertIdx = N.getConstantOperandVal(2);
7252    // Handle INSERT_SUBVECTOR(SRC0, EXTRACT_SUBVECTOR(SRC1)).
7253    if (Sub.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
7254        Sub.getOperand(0).getValueType() == VT &&
7255        isa<ConstantSDNode>(Sub.getOperand(1))) {
7256      uint64_t ExtractIdx = Sub.getConstantOperandVal(1);
7257      for (int i = 0; i != (int)NumElts; ++i)
7258        Mask.push_back(i);
7259      for (int i = 0; i != (int)NumSubElts; ++i)
7260        Mask[InsertIdx + i] = NumElts + ExtractIdx + i;
7261      Ops.push_back(Src);
7262      Ops.push_back(Sub.getOperand(0));
7263      return true;
7264    }
7265    // Handle INSERT_SUBVECTOR(SRC0, SHUFFLE(SRC1)).
7266    SmallVector<int, 64> SubMask;
7267    SmallVector<SDValue, 2> SubInputs;
7268    if (!getTargetShuffleInputs(peekThroughOneUseBitcasts(Sub), SubInputs,
7269                                SubMask, DAG, Depth + 1, ResolveKnownElts))
7270      return false;
7271    if (SubMask.size() != NumSubElts) {
7272      assert(((SubMask.size() % NumSubElts) == 0 ||
7273              (NumSubElts % SubMask.size()) == 0) && "Illegal submask scale");
7274      if ((NumSubElts % SubMask.size()) == 0) {
7275        int Scale = NumSubElts / SubMask.size();
7276        SmallVector<int,64> ScaledSubMask;
7277        scaleShuffleMask<int>(Scale, SubMask, ScaledSubMask);
7278        SubMask = ScaledSubMask;
7279      } else {
7280        int Scale = SubMask.size() / NumSubElts;
7281        NumSubElts = SubMask.size();
7282        NumElts *= Scale;
7283        InsertIdx *= Scale;
7284      }
7285    }
7286    Ops.push_back(Src);
7287    for (SDValue &SubInput : SubInputs) {
7288      EVT SubSVT = SubInput.getValueType().getScalarType();
7289      EVT AltVT = EVT::getVectorVT(*DAG.getContext(), SubSVT,
7290                                   NumSizeInBits / SubSVT.getSizeInBits());
7291      Ops.push_back(DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), AltVT,
7292                                DAG.getUNDEF(AltVT), SubInput,
7293                                DAG.getIntPtrConstant(0, SDLoc(N))));
7294    }
7295    for (int i = 0; i != (int)NumElts; ++i)
7296      Mask.push_back(i);
7297    for (int i = 0; i != (int)NumSubElts; ++i) {
7298      int M = SubMask[i];
7299      if (0 <= M) {
7300        int InputIdx = M / NumSubElts;
7301        M = (NumElts * (1 + InputIdx)) + (M % NumSubElts);
7302      }
7303      Mask[i + InsertIdx] = M;
7304    }
7305    return true;
7306  }
7307  case ISD::SCALAR_TO_VECTOR: {
7308    // Match against a scalar_to_vector of an extract from a vector,
7309    // for PEXTRW/PEXTRB we must handle the implicit zext of the scalar.
7310    SDValue N0 = N.getOperand(0);
7311    SDValue SrcExtract;
7312
7313    if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
7314         N0.getOperand(0).getValueType() == VT) ||
7315        (N0.getOpcode() == X86ISD::PEXTRW &&
7316         N0.getOperand(0).getValueType() == MVT::v8i16) ||
7317        (N0.getOpcode() == X86ISD::PEXTRB &&
7318         N0.getOperand(0).getValueType() == MVT::v16i8)) {
7319      SrcExtract = N0;
7320    }
7321
7322    if (!SrcExtract || !isa<ConstantSDNode>(SrcExtract.getOperand(1)))
7323      return false;
7324
7325    SDValue SrcVec = SrcExtract.getOperand(0);
7326    EVT SrcVT = SrcVec.getValueType();
7327    unsigned NumSrcElts = SrcVT.getVectorNumElements();
7328    unsigned NumZeros = (NumBitsPerElt / SrcVT.getScalarSizeInBits()) - 1;
7329
7330    unsigned SrcIdx = SrcExtract.getConstantOperandVal(1);
7331    if (NumSrcElts <= SrcIdx)
7332      return false;
7333
7334    Ops.push_back(SrcVec);
7335    Mask.push_back(SrcIdx);
7336    Mask.append(NumZeros, SM_SentinelZero);
7337    Mask.append(NumSrcElts - Mask.size(), SM_SentinelUndef);
7338    return true;
7339  }
7340  case X86ISD::PINSRB:
7341  case X86ISD::PINSRW: {
7342    SDValue InVec = N.getOperand(0);
7343    SDValue InScl = N.getOperand(1);
7344    SDValue InIndex = N.getOperand(2);
7345    if (!isa<ConstantSDNode>(InIndex) ||
7346        cast<ConstantSDNode>(InIndex)->getAPIntValue().uge(NumElts))
7347      return false;
7348    uint64_t InIdx = N.getConstantOperandVal(2);
7349
7350    // Attempt to recognise a PINSR*(VEC, 0, Idx) shuffle pattern.
7351    if (X86::isZeroNode(InScl)) {
7352      Ops.push_back(InVec);
7353      for (unsigned i = 0; i != NumElts; ++i)
7354        Mask.push_back(i == InIdx ? SM_SentinelZero : (int)i);
7355      return true;
7356    }
7357
7358    // Attempt to recognise a PINSR*(PEXTR*) shuffle pattern.
7359    // TODO: Expand this to support INSERT_VECTOR_ELT/etc.
7360    unsigned ExOp =
7361        (X86ISD::PINSRB == Opcode ? X86ISD::PEXTRB : X86ISD::PEXTRW);
7362    if (InScl.getOpcode() != ExOp)
7363      return false;
7364
7365    SDValue ExVec = InScl.getOperand(0);
7366    SDValue ExIndex = InScl.getOperand(1);
7367    if (!isa<ConstantSDNode>(ExIndex) ||
7368        cast<ConstantSDNode>(ExIndex)->getAPIntValue().uge(NumElts))
7369      return false;
7370    uint64_t ExIdx = InScl.getConstantOperandVal(1);
7371
7372    Ops.push_back(InVec);
7373    Ops.push_back(ExVec);
7374    for (unsigned i = 0; i != NumElts; ++i)
7375      Mask.push_back(i == InIdx ? NumElts + ExIdx : i);
7376    return true;
7377  }
7378  case X86ISD::PACKSS:
7379  case X86ISD::PACKUS: {
7380    SDValue N0 = N.getOperand(0);
7381    SDValue N1 = N.getOperand(1);
7382    assert(N0.getValueType().getVectorNumElements() == (NumElts / 2) &&
7383           N1.getValueType().getVectorNumElements() == (NumElts / 2) &&
7384           "Unexpected input value type");
7385
7386    APInt EltsLHS, EltsRHS;
7387    getPackDemandedElts(VT, DemandedElts, EltsLHS, EltsRHS);
7388
7389    // If we know input saturation won't happen we can treat this
7390    // as a truncation shuffle.
7391    if (Opcode == X86ISD::PACKSS) {
7392      if ((!N0.isUndef() &&
7393           DAG.ComputeNumSignBits(N0, EltsLHS, Depth + 1) <= NumBitsPerElt) ||
7394          (!N1.isUndef() &&
7395           DAG.ComputeNumSignBits(N1, EltsRHS, Depth + 1) <= NumBitsPerElt))
7396        return false;
7397    } else {
7398      APInt ZeroMask = APInt::getHighBitsSet(2 * NumBitsPerElt, NumBitsPerElt);
7399      if ((!N0.isUndef() &&
7400           !DAG.MaskedValueIsZero(N0, ZeroMask, EltsLHS, Depth + 1)) ||
7401          (!N1.isUndef() &&
7402           !DAG.MaskedValueIsZero(N1, ZeroMask, EltsRHS, Depth + 1)))
7403        return false;
7404    }
7405
7406    bool IsUnary = (N0 == N1);
7407
7408    Ops.push_back(N0);
7409    if (!IsUnary)
7410      Ops.push_back(N1);
7411
7412    createPackShuffleMask(VT, Mask, IsUnary);
7413    return true;
7414  }
7415  case X86ISD::VSHLI:
7416  case X86ISD::VSRLI: {
7417    uint64_t ShiftVal = N.getConstantOperandVal(1);
7418    // Out of range bit shifts are guaranteed to be zero.
7419    if (NumBitsPerElt <= ShiftVal) {
7420      Mask.append(NumElts, SM_SentinelZero);
7421      return true;
7422    }
7423
7424    // We can only decode 'whole byte' bit shifts as shuffles.
7425    if ((ShiftVal % 8) != 0)
7426      break;
7427
7428    uint64_t ByteShift = ShiftVal / 8;
7429    unsigned NumBytes = NumSizeInBits / 8;
7430    unsigned NumBytesPerElt = NumBitsPerElt / 8;
7431    Ops.push_back(N.getOperand(0));
7432
7433    // Clear mask to all zeros and insert the shifted byte indices.
7434    Mask.append(NumBytes, SM_SentinelZero);
7435
7436    if (X86ISD::VSHLI == Opcode) {
7437      for (unsigned i = 0; i != NumBytes; i += NumBytesPerElt)
7438        for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
7439          Mask[i + j] = i + j - ByteShift;
7440    } else {
7441      for (unsigned i = 0; i != NumBytes; i += NumBytesPerElt)
7442        for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
7443          Mask[i + j - ByteShift] = i + j;
7444    }
7445    return true;
7446  }
7447  case X86ISD::VBROADCAST: {
7448    SDValue Src = N.getOperand(0);
7449    MVT SrcVT = Src.getSimpleValueType();
7450    if (!SrcVT.isVector())
7451      return false;
7452
7453    if (NumSizeInBits != SrcVT.getSizeInBits()) {
7454      assert((NumSizeInBits % SrcVT.getSizeInBits()) == 0 &&
7455             "Illegal broadcast type");
7456      SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
7457                               NumSizeInBits / SrcVT.getScalarSizeInBits());
7458      Src = DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), SrcVT,
7459                        DAG.getUNDEF(SrcVT), Src,
7460                        DAG.getIntPtrConstant(0, SDLoc(N)));
7461    }
7462
7463    Ops.push_back(Src);
7464    Mask.append(NumElts, 0);
7465    return true;
7466  }
7467  case ISD::ZERO_EXTEND:
7468  case ISD::ANY_EXTEND:
7469  case ISD::ZERO_EXTEND_VECTOR_INREG:
7470  case ISD::ANY_EXTEND_VECTOR_INREG: {
7471    SDValue Src = N.getOperand(0);
7472    EVT SrcVT = Src.getValueType();
7473
7474    // Extended source must be a simple vector.
7475    if (!SrcVT.isSimple() || (SrcVT.getSizeInBits() % 128) != 0 ||
7476        (SrcVT.getScalarSizeInBits() % 8) != 0)
7477      return false;
7478
7479    unsigned NumSrcBitsPerElt = SrcVT.getScalarSizeInBits();
7480    bool IsAnyExtend =
7481        (ISD::ANY_EXTEND == Opcode || ISD::ANY_EXTEND_VECTOR_INREG == Opcode);
7482    DecodeZeroExtendMask(NumSrcBitsPerElt, NumBitsPerElt, NumElts, IsAnyExtend,
7483                         Mask);
7484
7485    if (NumSizeInBits != SrcVT.getSizeInBits()) {
7486      assert((NumSizeInBits % SrcVT.getSizeInBits()) == 0 &&
7487             "Illegal zero-extension type");
7488      SrcVT = MVT::getVectorVT(SrcVT.getSimpleVT().getScalarType(),
7489                               NumSizeInBits / NumSrcBitsPerElt);
7490      Src = DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), SrcVT,
7491                        DAG.getUNDEF(SrcVT), Src,
7492                        DAG.getIntPtrConstant(0, SDLoc(N)));
7493    }
7494
7495    Ops.push_back(Src);
7496    return true;
7497  }
7498  }
7499
7500  return false;
7501}
7502
7503/// Removes unused/repeated shuffle source inputs and adjusts the shuffle mask.
7504static void resolveTargetShuffleInputsAndMask(SmallVectorImpl<SDValue> &Inputs,
7505                                              SmallVectorImpl<int> &Mask) {
7506  int MaskWidth = Mask.size();
7507  SmallVector<SDValue, 16> UsedInputs;
7508  for (int i = 0, e = Inputs.size(); i < e; ++i) {
7509    int lo = UsedInputs.size() * MaskWidth;
7510    int hi = lo + MaskWidth;
7511
7512    // Strip UNDEF input usage.
7513    if (Inputs[i].isUndef())
7514      for (int &M : Mask)
7515        if ((lo <= M) && (M < hi))
7516          M = SM_SentinelUndef;
7517
7518    // Check for unused inputs.
7519    if (none_of(Mask, [lo, hi](int i) { return (lo <= i) && (i < hi); })) {
7520      for (int &M : Mask)
7521        if (lo <= M)
7522          M -= MaskWidth;
7523      continue;
7524    }
7525
7526    // Check for repeated inputs.
7527    bool IsRepeat = false;
7528    for (int j = 0, ue = UsedInputs.size(); j != ue; ++j) {
7529      if (UsedInputs[j] != Inputs[i])
7530        continue;
7531      for (int &M : Mask)
7532        if (lo <= M)
7533          M = (M < hi) ? ((M - lo) + (j * MaskWidth)) : (M - MaskWidth);
7534      IsRepeat = true;
7535      break;
7536    }
7537    if (IsRepeat)
7538      continue;
7539
7540    UsedInputs.push_back(Inputs[i]);
7541  }
7542  Inputs = UsedInputs;
7543}
7544
7545/// Calls getTargetShuffleAndZeroables to resolve a target shuffle mask's inputs
7546/// and then sets the SM_SentinelUndef and SM_SentinelZero values.
7547/// Returns true if the target shuffle mask was decoded.
7548static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
7549                                   SmallVectorImpl<SDValue> &Inputs,
7550                                   SmallVectorImpl<int> &Mask,
7551                                   APInt &KnownUndef, APInt &KnownZero,
7552                                   SelectionDAG &DAG, unsigned Depth,
7553                                   bool ResolveKnownElts) {
7554  EVT VT = Op.getValueType();
7555  if (!VT.isSimple() || !VT.isVector())
7556    return false;
7557
7558  if (getTargetShuffleAndZeroables(Op, Mask, Inputs, KnownUndef, KnownZero)) {
7559    if (ResolveKnownElts)
7560      resolveTargetShuffleFromZeroables(Mask, KnownUndef, KnownZero);
7561    return true;
7562  }
7563  if (getFauxShuffleMask(Op, DemandedElts, Mask, Inputs, DAG, Depth,
7564                         ResolveKnownElts)) {
7565    resolveZeroablesFromTargetShuffle(Mask, KnownUndef, KnownZero);
7566    return true;
7567  }
7568  return false;
7569}
7570
7571static bool getTargetShuffleInputs(SDValue Op, SmallVectorImpl<SDValue> &Inputs,
7572                                   SmallVectorImpl<int> &Mask,
7573                                   SelectionDAG &DAG, unsigned Depth = 0,
7574                                   bool ResolveKnownElts = true) {
7575  EVT VT = Op.getValueType();
7576  if (!VT.isSimple() || !VT.isVector())
7577    return false;
7578
7579  APInt KnownUndef, KnownZero;
7580  unsigned NumElts = Op.getValueType().getVectorNumElements();
7581  APInt DemandedElts = APInt::getAllOnesValue(NumElts);
7582  return getTargetShuffleInputs(Op, DemandedElts, Inputs, Mask, KnownUndef,
7583                                KnownZero, DAG, Depth, ResolveKnownElts);
7584}
7585
7586/// Returns the scalar element that will make up the ith
7587/// element of the result of the vector shuffle.
7588static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
7589                                   unsigned Depth) {
7590  if (Depth == 6)
7591    return SDValue();  // Limit search depth.
7592
7593  SDValue V = SDValue(N, 0);
7594  EVT VT = V.getValueType();
7595  unsigned Opcode = V.getOpcode();
7596
7597  // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
7598  if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
7599    int Elt = SV->getMaskElt(Index);
7600
7601    if (Elt < 0)
7602      return DAG.getUNDEF(VT.getVectorElementType());
7603
7604    unsigned NumElems = VT.getVectorNumElements();
7605    SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0)
7606                                         : SV->getOperand(1);
7607    return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1);
7608  }
7609
7610  // Recurse into target specific vector shuffles to find scalars.
7611  if (isTargetShuffle(Opcode)) {
7612    MVT ShufVT = V.getSimpleValueType();
7613    MVT ShufSVT = ShufVT.getVectorElementType();
7614    int NumElems = (int)ShufVT.getVectorNumElements();
7615    SmallVector<int, 16> ShuffleMask;
7616    SmallVector<SDValue, 16> ShuffleOps;
7617    bool IsUnary;
7618
7619    if (!getTargetShuffleMask(N, ShufVT, true, ShuffleOps, ShuffleMask, IsUnary))
7620      return SDValue();
7621
7622    int Elt = ShuffleMask[Index];
7623    if (Elt == SM_SentinelZero)
7624      return ShufSVT.isInteger() ? DAG.getConstant(0, SDLoc(N), ShufSVT)
7625                                 : DAG.getConstantFP(+0.0, SDLoc(N), ShufSVT);
7626    if (Elt == SM_SentinelUndef)
7627      return DAG.getUNDEF(ShufSVT);
7628
7629    assert(0 <= Elt && Elt < (2*NumElems) && "Shuffle index out of range");
7630    SDValue NewV = (Elt < NumElems) ? ShuffleOps[0] : ShuffleOps[1];
7631    return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
7632                               Depth+1);
7633  }
7634
7635  // Recurse into insert_subvector base/sub vector to find scalars.
7636  if (Opcode == ISD::INSERT_SUBVECTOR &&
7637      isa<ConstantSDNode>(N->getOperand(2))) {
7638    SDValue Vec = N->getOperand(0);
7639    SDValue Sub = N->getOperand(1);
7640    EVT SubVT = Sub.getValueType();
7641    unsigned NumSubElts = SubVT.getVectorNumElements();
7642    uint64_t SubIdx = N->getConstantOperandVal(2);
7643
7644    if (SubIdx <= Index && Index < (SubIdx + NumSubElts))
7645      return getShuffleScalarElt(Sub.getNode(), Index - SubIdx, DAG, Depth + 1);
7646    return getShuffleScalarElt(Vec.getNode(), Index, DAG, Depth + 1);
7647  }
7648
7649  // Recurse into extract_subvector src vector to find scalars.
7650  if (Opcode == ISD::EXTRACT_SUBVECTOR &&
7651      isa<ConstantSDNode>(N->getOperand(1))) {
7652    SDValue Src = N->getOperand(0);
7653    uint64_t SrcIdx = N->getConstantOperandVal(1);
7654    return getShuffleScalarElt(Src.getNode(), Index + SrcIdx, DAG, Depth + 1);
7655  }
7656
7657  // Actual nodes that may contain scalar elements
7658  if (Opcode == ISD::BITCAST) {
7659    V = V.getOperand(0);
7660    EVT SrcVT = V.getValueType();
7661    unsigned NumElems = VT.getVectorNumElements();
7662
7663    if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
7664      return SDValue();
7665  }
7666
7667  if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
7668    return (Index == 0) ? V.getOperand(0)
7669                        : DAG.getUNDEF(VT.getVectorElementType());
7670
7671  if (V.getOpcode() == ISD::BUILD_VECTOR)
7672    return V.getOperand(Index);
7673
7674  return SDValue();
7675}
7676
7677// Use PINSRB/PINSRW/PINSRD to create a build vector.
7678static SDValue LowerBuildVectorAsInsert(SDValue Op, unsigned NonZeros,
7679                                        unsigned NumNonZero, unsigned NumZero,
7680                                        SelectionDAG &DAG,
7681                                        const X86Subtarget &Subtarget) {
7682  MVT VT = Op.getSimpleValueType();
7683  unsigned NumElts = VT.getVectorNumElements();
7684  assert(((VT == MVT::v8i16 && Subtarget.hasSSE2()) ||
7685          ((VT == MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41())) &&
7686         "Illegal vector insertion");
7687
7688  SDLoc dl(Op);
7689  SDValue V;
7690  bool First = true;
7691
7692  for (unsigned i = 0; i < NumElts; ++i) {
7693    bool IsNonZero = (NonZeros & (1 << i)) != 0;
7694    if (!IsNonZero)
7695      continue;
7696
7697    // If the build vector contains zeros or our first insertion is not the
7698    // first index then insert into zero vector to break any register
7699    // dependency else use SCALAR_TO_VECTOR.
7700    if (First) {
7701      First = false;
7702      if (NumZero || 0 != i)
7703        V = getZeroVector(VT, Subtarget, DAG, dl);
7704      else {
7705        assert(0 == i && "Expected insertion into zero-index");
7706        V = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
7707        V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, V);
7708        V = DAG.getBitcast(VT, V);
7709        continue;
7710      }
7711    }
7712    V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, V, Op.getOperand(i),
7713                    DAG.getIntPtrConstant(i, dl));
7714  }
7715
7716  return V;
7717}
7718
7719/// Custom lower build_vector of v16i8.
7720static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
7721                                     unsigned NumNonZero, unsigned NumZero,
7722                                     SelectionDAG &DAG,
7723                                     const X86Subtarget &Subtarget) {
7724  if (NumNonZero > 8 && !Subtarget.hasSSE41())
7725    return SDValue();
7726
7727  // SSE4.1 - use PINSRB to insert each byte directly.
7728  if (Subtarget.hasSSE41())
7729    return LowerBuildVectorAsInsert(Op, NonZeros, NumNonZero, NumZero, DAG,
7730                                    Subtarget);
7731
7732  SDLoc dl(Op);
7733  SDValue V;
7734
7735  // Pre-SSE4.1 - merge byte pairs and insert with PINSRW.
7736  for (unsigned i = 0; i < 16; i += 2) {
7737    bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
7738    bool NextIsNonZero = (NonZeros & (1 << (i + 1))) != 0;
7739    if (!ThisIsNonZero && !NextIsNonZero)
7740      continue;
7741
7742    // FIXME: Investigate combining the first 4 bytes as a i32 instead.
7743    SDValue Elt;
7744    if (ThisIsNonZero) {
7745      if (NumZero || NextIsNonZero)
7746        Elt = DAG.getZExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
7747      else
7748        Elt = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
7749    }
7750
7751    if (NextIsNonZero) {
7752      SDValue NextElt = Op.getOperand(i + 1);
7753      if (i == 0 && NumZero)
7754        NextElt = DAG.getZExtOrTrunc(NextElt, dl, MVT::i32);
7755      else
7756        NextElt = DAG.getAnyExtOrTrunc(NextElt, dl, MVT::i32);
7757      NextElt = DAG.getNode(ISD::SHL, dl, MVT::i32, NextElt,
7758                            DAG.getConstant(8, dl, MVT::i8));
7759      if (ThisIsNonZero)
7760        Elt = DAG.getNode(ISD::OR, dl, MVT::i32, NextElt, Elt);
7761      else
7762        Elt = NextElt;
7763    }
7764
7765    // If our first insertion is not the first index then insert into zero
7766    // vector to break any register dependency else use SCALAR_TO_VECTOR.
7767    if (!V) {
7768      if (i != 0)
7769        V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
7770      else {
7771        V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Elt);
7772        V = DAG.getBitcast(MVT::v8i16, V);
7773        continue;
7774      }
7775    }
7776    Elt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Elt);
7777    V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, Elt,
7778                    DAG.getIntPtrConstant(i / 2, dl));
7779  }
7780
7781  return DAG.getBitcast(MVT::v16i8, V);
7782}
7783
7784/// Custom lower build_vector of v8i16.
7785static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
7786                                     unsigned NumNonZero, unsigned NumZero,
7787                                     SelectionDAG &DAG,
7788                                     const X86Subtarget &Subtarget) {
7789  if (NumNonZero > 4 && !Subtarget.hasSSE41())
7790    return SDValue();
7791
7792  // Use PINSRW to insert each byte directly.
7793  return LowerBuildVectorAsInsert(Op, NonZeros, NumNonZero, NumZero, DAG,
7794                                  Subtarget);
7795}
7796
7797/// Custom lower build_vector of v4i32 or v4f32.
7798static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
7799                                     const X86Subtarget &Subtarget) {
7800  // If this is a splat of a pair of elements, use MOVDDUP (unless the target
7801  // has XOP; in that case defer lowering to potentially use VPERMIL2PS).
7802  // Because we're creating a less complicated build vector here, we may enable
7803  // further folding of the MOVDDUP via shuffle transforms.
7804  if (Subtarget.hasSSE3() && !Subtarget.hasXOP() &&
7805      Op.getOperand(0) == Op.getOperand(2) &&
7806      Op.getOperand(1) == Op.getOperand(3) &&
7807      Op.getOperand(0) != Op.getOperand(1)) {
7808    SDLoc DL(Op);
7809    MVT VT = Op.getSimpleValueType();
7810    MVT EltVT = VT.getVectorElementType();
7811    // Create a new build vector with the first 2 elements followed by undef
7812    // padding, bitcast to v2f64, duplicate, and bitcast back.
7813    SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
7814                       DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
7815    SDValue NewBV = DAG.getBitcast(MVT::v2f64, DAG.getBuildVector(VT, DL, Ops));
7816    SDValue Dup = DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, NewBV);
7817    return DAG.getBitcast(VT, Dup);
7818  }
7819
7820  // Find all zeroable elements.
7821  std::bitset<4> Zeroable, Undefs;
7822  for (int i = 0; i < 4; ++i) {
7823    SDValue Elt = Op.getOperand(i);
7824    Undefs[i] = Elt.isUndef();
7825    Zeroable[i] = (Elt.isUndef() || X86::isZeroNode(Elt));
7826  }
7827  assert(Zeroable.size() - Zeroable.count() > 1 &&
7828         "We expect at least two non-zero elements!");
7829
7830  // We only know how to deal with build_vector nodes where elements are either
7831  // zeroable or extract_vector_elt with constant index.
7832  SDValue FirstNonZero;
7833  unsigned FirstNonZeroIdx;
7834  for (unsigned i = 0; i < 4; ++i) {
7835    if (Zeroable[i])
7836      continue;
7837    SDValue Elt = Op.getOperand(i);
7838    if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
7839        !isa<ConstantSDNode>(Elt.getOperand(1)))
7840      return SDValue();
7841    // Make sure that this node is extracting from a 128-bit vector.
7842    MVT VT = Elt.getOperand(0).getSimpleValueType();
7843    if (!VT.is128BitVector())
7844      return SDValue();
7845    if (!FirstNonZero.getNode()) {
7846      FirstNonZero = Elt;
7847      FirstNonZeroIdx = i;
7848    }
7849  }
7850
7851  assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
7852  SDValue V1 = FirstNonZero.getOperand(0);
7853  MVT VT = V1.getSimpleValueType();
7854
7855  // See if this build_vector can be lowered as a blend with zero.
7856  SDValue Elt;
7857  unsigned EltMaskIdx, EltIdx;
7858  int Mask[4];
7859  for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
7860    if (Zeroable[EltIdx]) {
7861      // The zero vector will be on the right hand side.
7862      Mask[EltIdx] = EltIdx+4;
7863      continue;
7864    }
7865
7866    Elt = Op->getOperand(EltIdx);
7867    // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
7868    EltMaskIdx = Elt.getConstantOperandVal(1);
7869    if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
7870      break;
7871    Mask[EltIdx] = EltIdx;
7872  }
7873
7874  if (EltIdx == 4) {
7875    // Let the shuffle legalizer deal with blend operations.
7876    SDValue VZeroOrUndef = (Zeroable == Undefs)
7877                               ? DAG.getUNDEF(VT)
7878                               : getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
7879    if (V1.getSimpleValueType() != VT)
7880      V1 = DAG.getBitcast(VT, V1);
7881    return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZeroOrUndef, Mask);
7882  }
7883
7884  // See if we can lower this build_vector to a INSERTPS.
7885  if (!Subtarget.hasSSE41())
7886    return SDValue();
7887
7888  SDValue V2 = Elt.getOperand(0);
7889  if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
7890    V1 = SDValue();
7891
7892  bool CanFold = true;
7893  for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
7894    if (Zeroable[i])
7895      continue;
7896
7897    SDValue Current = Op->getOperand(i);
7898    SDValue SrcVector = Current->getOperand(0);
7899    if (!V1.getNode())
7900      V1 = SrcVector;
7901    CanFold = (SrcVector == V1) && (Current.getConstantOperandAPInt(1) == i);
7902  }
7903
7904  if (!CanFold)
7905    return SDValue();
7906
7907  assert(V1.getNode() && "Expected at least two non-zero elements!");
7908  if (V1.getSimpleValueType() != MVT::v4f32)
7909    V1 = DAG.getBitcast(MVT::v4f32, V1);
7910  if (V2.getSimpleValueType() != MVT::v4f32)
7911    V2 = DAG.getBitcast(MVT::v4f32, V2);
7912
7913  // Ok, we can emit an INSERTPS instruction.
7914  unsigned ZMask = Zeroable.to_ulong();
7915
7916  unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
7917  assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
7918  SDLoc DL(Op);
7919  SDValue Result = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
7920                               DAG.getIntPtrConstant(InsertPSMask, DL, true));
7921  return DAG.getBitcast(VT, Result);
7922}
7923
7924/// Return a vector logical shift node.
7925static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, unsigned NumBits,
7926                         SelectionDAG &DAG, const TargetLowering &TLI,
7927                         const SDLoc &dl) {
7928  assert(VT.is128BitVector() && "Unknown type for VShift");
7929  MVT ShVT = MVT::v16i8;
7930  unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
7931  SrcOp = DAG.getBitcast(ShVT, SrcOp);
7932  assert(NumBits % 8 == 0 && "Only support byte sized shifts");
7933  SDValue ShiftVal = DAG.getTargetConstant(NumBits / 8, dl, MVT::i8);
7934  return DAG.getBitcast(VT, DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
7935}
7936
7937static SDValue LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, const SDLoc &dl,
7938                                      SelectionDAG &DAG) {
7939
7940  // Check if the scalar load can be widened into a vector load. And if
7941  // the address is "base + cst" see if the cst can be "absorbed" into
7942  // the shuffle mask.
7943  if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
7944    SDValue Ptr = LD->getBasePtr();
7945    if (!ISD::isNormalLoad(LD) || !LD->isSimple())
7946      return SDValue();
7947    EVT PVT = LD->getValueType(0);
7948    if (PVT != MVT::i32 && PVT != MVT::f32)
7949      return SDValue();
7950
7951    int FI = -1;
7952    int64_t Offset = 0;
7953    if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
7954      FI = FINode->getIndex();
7955      Offset = 0;
7956    } else if (DAG.isBaseWithConstantOffset(Ptr) &&
7957               isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
7958      FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
7959      Offset = Ptr.getConstantOperandVal(1);
7960      Ptr = Ptr.getOperand(0);
7961    } else {
7962      return SDValue();
7963    }
7964
7965    // FIXME: 256-bit vector instructions don't require a strict alignment,
7966    // improve this code to support it better.
7967    unsigned RequiredAlign = VT.getSizeInBits()/8;
7968    SDValue Chain = LD->getChain();
7969    // Make sure the stack object alignment is at least 16 or 32.
7970    MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
7971    if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) {
7972      if (MFI.isFixedObjectIndex(FI)) {
7973        // Can't change the alignment. FIXME: It's possible to compute
7974        // the exact stack offset and reference FI + adjust offset instead.
7975        // If someone *really* cares about this. That's the way to implement it.
7976        return SDValue();
7977      } else {
7978        MFI.setObjectAlignment(FI, RequiredAlign);
7979      }
7980    }
7981
7982    // (Offset % 16 or 32) must be multiple of 4. Then address is then
7983    // Ptr + (Offset & ~15).
7984    if (Offset < 0)
7985      return SDValue();
7986    if ((Offset % RequiredAlign) & 3)
7987      return SDValue();
7988    int64_t StartOffset = Offset & ~int64_t(RequiredAlign - 1);
7989    if (StartOffset) {
7990      SDLoc DL(Ptr);
7991      Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
7992                        DAG.getConstant(StartOffset, DL, Ptr.getValueType()));
7993    }
7994
7995    int EltNo = (Offset - StartOffset) >> 2;
7996    unsigned NumElems = VT.getVectorNumElements();
7997
7998    EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
7999    SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
8000                             LD->getPointerInfo().getWithOffset(StartOffset));
8001
8002    SmallVector<int, 8> Mask(NumElems, EltNo);
8003
8004    return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), Mask);
8005  }
8006
8007  return SDValue();
8008}
8009
8010// Recurse to find a LoadSDNode source and the accumulated ByteOffest.
8011static bool findEltLoadSrc(SDValue Elt, LoadSDNode *&Ld, int64_t &ByteOffset) {
8012  if (ISD::isNON_EXTLoad(Elt.getNode())) {
8013    auto *BaseLd = cast<LoadSDNode>(Elt);
8014    if (!BaseLd->isSimple())
8015      return false;
8016    Ld = BaseLd;
8017    ByteOffset = 0;
8018    return true;
8019  }
8020
8021  switch (Elt.getOpcode()) {
8022  case ISD::BITCAST:
8023  case ISD::TRUNCATE:
8024  case ISD::SCALAR_TO_VECTOR:
8025    return findEltLoadSrc(Elt.getOperand(0), Ld, ByteOffset);
8026  case ISD::SRL:
8027    if (isa<ConstantSDNode>(Elt.getOperand(1))) {
8028      uint64_t Idx = Elt.getConstantOperandVal(1);
8029      if ((Idx % 8) == 0 && findEltLoadSrc(Elt.getOperand(0), Ld, ByteOffset)) {
8030        ByteOffset += Idx / 8;
8031        return true;
8032      }
8033    }
8034    break;
8035  case ISD::EXTRACT_VECTOR_ELT:
8036    if (isa<ConstantSDNode>(Elt.getOperand(1))) {
8037      SDValue Src = Elt.getOperand(0);
8038      unsigned SrcSizeInBits = Src.getScalarValueSizeInBits();
8039      unsigned DstSizeInBits = Elt.getScalarValueSizeInBits();
8040      if (DstSizeInBits == SrcSizeInBits && (SrcSizeInBits % 8) == 0 &&
8041          findEltLoadSrc(Src, Ld, ByteOffset)) {
8042        uint64_t Idx = Elt.getConstantOperandVal(1);
8043        ByteOffset += Idx * (SrcSizeInBits / 8);
8044        return true;
8045      }
8046    }
8047    break;
8048  }
8049
8050  return false;
8051}
8052
8053/// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
8054/// elements can be replaced by a single large load which has the same value as
8055/// a build_vector or insert_subvector whose loaded operands are 'Elts'.
8056///
8057/// Example: <load i32 *a, load i32 *a+4, zero, undef> -> zextload a
8058static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
8059                                        const SDLoc &DL, SelectionDAG &DAG,
8060                                        const X86Subtarget &Subtarget,
8061                                        bool isAfterLegalize) {
8062  if ((VT.getScalarSizeInBits() % 8) != 0)
8063    return SDValue();
8064
8065  unsigned NumElems = Elts.size();
8066
8067  int LastLoadedElt = -1;
8068  APInt LoadMask = APInt::getNullValue(NumElems);
8069  APInt ZeroMask = APInt::getNullValue(NumElems);
8070  APInt UndefMask = APInt::getNullValue(NumElems);
8071
8072  SmallVector<LoadSDNode*, 8> Loads(NumElems, nullptr);
8073  SmallVector<int64_t, 8> ByteOffsets(NumElems, 0);
8074
8075  // For each element in the initializer, see if we've found a load, zero or an
8076  // undef.
8077  for (unsigned i = 0; i < NumElems; ++i) {
8078    SDValue Elt = peekThroughBitcasts(Elts[i]);
8079    if (!Elt.getNode())
8080      return SDValue();
8081    if (Elt.isUndef()) {
8082      UndefMask.setBit(i);
8083      continue;
8084    }
8085    if (X86::isZeroNode(Elt) || ISD::isBuildVectorAllZeros(Elt.getNode())) {
8086      ZeroMask.setBit(i);
8087      continue;
8088    }
8089
8090    // Each loaded element must be the correct fractional portion of the
8091    // requested vector load.
8092    unsigned EltSizeInBits = Elt.getValueSizeInBits();
8093    if ((NumElems * EltSizeInBits) != VT.getSizeInBits())
8094      return SDValue();
8095
8096    if (!findEltLoadSrc(Elt, Loads[i], ByteOffsets[i]) || ByteOffsets[i] < 0)
8097      return SDValue();
8098    unsigned LoadSizeInBits = Loads[i]->getValueSizeInBits(0);
8099    if (((ByteOffsets[i] * 8) + EltSizeInBits) > LoadSizeInBits)
8100      return SDValue();
8101
8102    LoadMask.setBit(i);
8103    LastLoadedElt = i;
8104  }
8105  assert((ZeroMask.countPopulation() + UndefMask.countPopulation() +
8106          LoadMask.countPopulation()) == NumElems &&
8107         "Incomplete element masks");
8108
8109  // Handle Special Cases - all undef or undef/zero.
8110  if (UndefMask.countPopulation() == NumElems)
8111    return DAG.getUNDEF(VT);
8112
8113  // FIXME: Should we return this as a BUILD_VECTOR instead?
8114  if ((ZeroMask.countPopulation() + UndefMask.countPopulation()) == NumElems)
8115    return VT.isInteger() ? DAG.getConstant(0, DL, VT)
8116                          : DAG.getConstantFP(0.0, DL, VT);
8117
8118  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8119  int FirstLoadedElt = LoadMask.countTrailingZeros();
8120  SDValue EltBase = peekThroughBitcasts(Elts[FirstLoadedElt]);
8121  EVT EltBaseVT = EltBase.getValueType();
8122  assert(EltBaseVT.getSizeInBits() == EltBaseVT.getStoreSizeInBits() &&
8123         "Register/Memory size mismatch");
8124  LoadSDNode *LDBase = Loads[FirstLoadedElt];
8125  assert(LDBase && "Did not find base load for merging consecutive loads");
8126  unsigned BaseSizeInBits = EltBaseVT.getStoreSizeInBits();
8127  unsigned BaseSizeInBytes = BaseSizeInBits / 8;
8128  int LoadSizeInBits = (1 + LastLoadedElt - FirstLoadedElt) * BaseSizeInBits;
8129  assert((BaseSizeInBits % 8) == 0 && "Sub-byte element loads detected");
8130
8131  // TODO: Support offsetting the base load.
8132  if (ByteOffsets[FirstLoadedElt] != 0)
8133    return SDValue();
8134
8135  // Check to see if the element's load is consecutive to the base load
8136  // or offset from a previous (already checked) load.
8137  auto CheckConsecutiveLoad = [&](LoadSDNode *Base, int EltIdx) {
8138    LoadSDNode *Ld = Loads[EltIdx];
8139    int64_t ByteOffset = ByteOffsets[EltIdx];
8140    if (ByteOffset && (ByteOffset % BaseSizeInBytes) == 0) {
8141      int64_t BaseIdx = EltIdx - (ByteOffset / BaseSizeInBytes);
8142      return (0 <= BaseIdx && BaseIdx < (int)NumElems && LoadMask[BaseIdx] &&
8143              Loads[BaseIdx] == Ld && ByteOffsets[BaseIdx] == 0);
8144    }
8145    return DAG.areNonVolatileConsecutiveLoads(Ld, Base, BaseSizeInBytes,
8146                                              EltIdx - FirstLoadedElt);
8147  };
8148
8149  // Consecutive loads can contain UNDEFS but not ZERO elements.
8150  // Consecutive loads with UNDEFs and ZEROs elements require a
8151  // an additional shuffle stage to clear the ZERO elements.
8152  bool IsConsecutiveLoad = true;
8153  bool IsConsecutiveLoadWithZeros = true;
8154  for (int i = FirstLoadedElt + 1; i <= LastLoadedElt; ++i) {
8155    if (LoadMask[i]) {
8156      if (!CheckConsecutiveLoad(LDBase, i)) {
8157        IsConsecutiveLoad = false;
8158        IsConsecutiveLoadWithZeros = false;
8159        break;
8160      }
8161    } else if (ZeroMask[i]) {
8162      IsConsecutiveLoad = false;
8163    }
8164  }
8165
8166  auto CreateLoad = [&DAG, &DL, &Loads](EVT VT, LoadSDNode *LDBase) {
8167    auto MMOFlags = LDBase->getMemOperand()->getFlags();
8168    assert(LDBase->isSimple() &&
8169           "Cannot merge volatile or atomic loads.");
8170    SDValue NewLd =
8171        DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
8172                    LDBase->getPointerInfo(), LDBase->getAlignment(), MMOFlags);
8173    for (auto *LD : Loads)
8174      if (LD)
8175        DAG.makeEquivalentMemoryOrdering(LD, NewLd);
8176    return NewLd;
8177  };
8178
8179  // Check if the base load is entirely dereferenceable.
8180  bool IsDereferenceable = LDBase->getPointerInfo().isDereferenceable(
8181      VT.getSizeInBits() / 8, *DAG.getContext(), DAG.getDataLayout());
8182
8183  // LOAD - all consecutive load/undefs (must start/end with a load or be
8184  // entirely dereferenceable). If we have found an entire vector of loads and
8185  // undefs, then return a large load of the entire vector width starting at the
8186  // base pointer. If the vector contains zeros, then attempt to shuffle those
8187  // elements.
8188  if (FirstLoadedElt == 0 &&
8189      (LastLoadedElt == (int)(NumElems - 1) || IsDereferenceable) &&
8190      (IsConsecutiveLoad || IsConsecutiveLoadWithZeros)) {
8191    if (isAfterLegalize && !TLI.isOperationLegal(ISD::LOAD, VT))
8192      return SDValue();
8193
8194    // Don't create 256-bit non-temporal aligned loads without AVX2 as these
8195    // will lower to regular temporal loads and use the cache.
8196    if (LDBase->isNonTemporal() && LDBase->getAlignment() >= 32 &&
8197        VT.is256BitVector() && !Subtarget.hasInt256())
8198      return SDValue();
8199
8200    if (NumElems == 1)
8201      return DAG.getBitcast(VT, Elts[FirstLoadedElt]);
8202
8203    if (!ZeroMask)
8204      return CreateLoad(VT, LDBase);
8205
8206    // IsConsecutiveLoadWithZeros - we need to create a shuffle of the loaded
8207    // vector and a zero vector to clear out the zero elements.
8208    if (!isAfterLegalize && VT.isVector()) {
8209      unsigned NumMaskElts = VT.getVectorNumElements();
8210      if ((NumMaskElts % NumElems) == 0) {
8211        unsigned Scale = NumMaskElts / NumElems;
8212        SmallVector<int, 4> ClearMask(NumMaskElts, -1);
8213        for (unsigned i = 0; i < NumElems; ++i) {
8214          if (UndefMask[i])
8215            continue;
8216          int Offset = ZeroMask[i] ? NumMaskElts : 0;
8217          for (unsigned j = 0; j != Scale; ++j)
8218            ClearMask[(i * Scale) + j] = (i * Scale) + j + Offset;
8219        }
8220        SDValue V = CreateLoad(VT, LDBase);
8221        SDValue Z = VT.isInteger() ? DAG.getConstant(0, DL, VT)
8222                                   : DAG.getConstantFP(0.0, DL, VT);
8223        return DAG.getVectorShuffle(VT, DL, V, Z, ClearMask);
8224      }
8225    }
8226  }
8227
8228  // If the upper half of a ymm/zmm load is undef then just load the lower half.
8229  if (VT.is256BitVector() || VT.is512BitVector()) {
8230    unsigned HalfNumElems = NumElems / 2;
8231    if (UndefMask.extractBits(HalfNumElems, HalfNumElems).isAllOnesValue()) {
8232      EVT HalfVT =
8233          EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), HalfNumElems);
8234      SDValue HalfLD =
8235          EltsFromConsecutiveLoads(HalfVT, Elts.drop_back(HalfNumElems), DL,
8236                                   DAG, Subtarget, isAfterLegalize);
8237      if (HalfLD)
8238        return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT),
8239                           HalfLD, DAG.getIntPtrConstant(0, DL));
8240    }
8241  }
8242
8243  // VZEXT_LOAD - consecutive 32/64-bit load/undefs followed by zeros/undefs.
8244  if (IsConsecutiveLoad && FirstLoadedElt == 0 &&
8245      (LoadSizeInBits == 32 || LoadSizeInBits == 64) &&
8246      ((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))) {
8247    MVT VecSVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(LoadSizeInBits)
8248                                      : MVT::getIntegerVT(LoadSizeInBits);
8249    MVT VecVT = MVT::getVectorVT(VecSVT, VT.getSizeInBits() / LoadSizeInBits);
8250    if (TLI.isTypeLegal(VecVT)) {
8251      SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
8252      SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
8253      SDValue ResNode =
8254          DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, VecSVT,
8255                                  LDBase->getPointerInfo(),
8256                                  LDBase->getAlignment(),
8257                                  MachineMemOperand::MOLoad);
8258      for (auto *LD : Loads)
8259        if (LD)
8260          DAG.makeEquivalentMemoryOrdering(LD, ResNode);
8261      return DAG.getBitcast(VT, ResNode);
8262    }
8263  }
8264
8265  // BROADCAST - match the smallest possible repetition pattern, load that
8266  // scalar/subvector element and then broadcast to the entire vector.
8267  if (ZeroMask.isNullValue() && isPowerOf2_32(NumElems) && Subtarget.hasAVX() &&
8268      (VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector())) {
8269    for (unsigned SubElems = 1; SubElems < NumElems; SubElems *= 2) {
8270      unsigned RepeatSize = SubElems * BaseSizeInBits;
8271      unsigned ScalarSize = std::min(RepeatSize, 64u);
8272      if (!Subtarget.hasAVX2() && ScalarSize < 32)
8273        continue;
8274
8275      bool Match = true;
8276      SmallVector<SDValue, 8> RepeatedLoads(SubElems, DAG.getUNDEF(EltBaseVT));
8277      for (unsigned i = 0; i != NumElems && Match; ++i) {
8278        if (!LoadMask[i])
8279          continue;
8280        SDValue Elt = peekThroughBitcasts(Elts[i]);
8281        if (RepeatedLoads[i % SubElems].isUndef())
8282          RepeatedLoads[i % SubElems] = Elt;
8283        else
8284          Match &= (RepeatedLoads[i % SubElems] == Elt);
8285      }
8286
8287      // We must have loads at both ends of the repetition.
8288      Match &= !RepeatedLoads.front().isUndef();
8289      Match &= !RepeatedLoads.back().isUndef();
8290      if (!Match)
8291        continue;
8292
8293      EVT RepeatVT =
8294          VT.isInteger() && (RepeatSize != 64 || TLI.isTypeLegal(MVT::i64))
8295              ? EVT::getIntegerVT(*DAG.getContext(), ScalarSize)
8296              : EVT::getFloatingPointVT(ScalarSize);
8297      if (RepeatSize > ScalarSize)
8298        RepeatVT = EVT::getVectorVT(*DAG.getContext(), RepeatVT,
8299                                    RepeatSize / ScalarSize);
8300      EVT BroadcastVT =
8301          EVT::getVectorVT(*DAG.getContext(), RepeatVT.getScalarType(),
8302                           VT.getSizeInBits() / ScalarSize);
8303      if (TLI.isTypeLegal(BroadcastVT)) {
8304        if (SDValue RepeatLoad = EltsFromConsecutiveLoads(
8305                RepeatVT, RepeatedLoads, DL, DAG, Subtarget, isAfterLegalize)) {
8306          unsigned Opcode = RepeatSize > ScalarSize ? X86ISD::SUBV_BROADCAST
8307                                                    : X86ISD::VBROADCAST;
8308          SDValue Broadcast = DAG.getNode(Opcode, DL, BroadcastVT, RepeatLoad);
8309          return DAG.getBitcast(VT, Broadcast);
8310        }
8311      }
8312    }
8313  }
8314
8315  return SDValue();
8316}
8317
8318// Combine a vector ops (shuffles etc.) that is equal to build_vector load1,
8319// load2, load3, load4, <0, 1, 2, 3> into a vector load if the load addresses
8320// are consecutive, non-overlapping, and in the right order.
8321static SDValue combineToConsecutiveLoads(EVT VT, SDNode *N, const SDLoc &DL,
8322                                         SelectionDAG &DAG,
8323                                         const X86Subtarget &Subtarget,
8324                                         bool isAfterLegalize) {
8325  SmallVector<SDValue, 64> Elts;
8326  for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
8327    if (SDValue Elt = getShuffleScalarElt(N, i, DAG, 0)) {
8328      Elts.push_back(Elt);
8329      continue;
8330    }
8331    return SDValue();
8332  }
8333  assert(Elts.size() == VT.getVectorNumElements());
8334  return EltsFromConsecutiveLoads(VT, Elts, DL, DAG, Subtarget,
8335                                  isAfterLegalize);
8336}
8337
8338static Constant *getConstantVector(MVT VT, const APInt &SplatValue,
8339                                   unsigned SplatBitSize, LLVMContext &C) {
8340  unsigned ScalarSize = VT.getScalarSizeInBits();
8341  unsigned NumElm = SplatBitSize / ScalarSize;
8342
8343  SmallVector<Constant *, 32> ConstantVec;
8344  for (unsigned i = 0; i < NumElm; i++) {
8345    APInt Val = SplatValue.extractBits(ScalarSize, ScalarSize * i);
8346    Constant *Const;
8347    if (VT.isFloatingPoint()) {
8348      if (ScalarSize == 32) {
8349        Const = ConstantFP::get(C, APFloat(APFloat::IEEEsingle(), Val));
8350      } else {
8351        assert(ScalarSize == 64 && "Unsupported floating point scalar size");
8352        Const = ConstantFP::get(C, APFloat(APFloat::IEEEdouble(), Val));
8353      }
8354    } else
8355      Const = Constant::getIntegerValue(Type::getIntNTy(C, ScalarSize), Val);
8356    ConstantVec.push_back(Const);
8357  }
8358  return ConstantVector::get(ArrayRef<Constant *>(ConstantVec));
8359}
8360
8361static bool isFoldableUseOfShuffle(SDNode *N) {
8362  for (auto *U : N->uses()) {
8363    unsigned Opc = U->getOpcode();
8364    // VPERMV/VPERMV3 shuffles can never fold their index operands.
8365    if (Opc == X86ISD::VPERMV && U->getOperand(0).getNode() == N)
8366      return false;
8367    if (Opc == X86ISD::VPERMV3 && U->getOperand(1).getNode() == N)
8368      return false;
8369    if (isTargetShuffle(Opc))
8370      return true;
8371    if (Opc == ISD::BITCAST) // Ignore bitcasts
8372      return isFoldableUseOfShuffle(U);
8373    if (N->hasOneUse())
8374      return true;
8375  }
8376  return false;
8377}
8378
8379// Check if the current node of build vector is a zero extended vector.
8380// // If so, return the value extended.
8381// // For example: (0,0,0,a,0,0,0,a,0,0,0,a,0,0,0,a) returns a.
8382// // NumElt - return the number of zero extended identical values.
8383// // EltType - return the type of the value include the zero extend.
8384static SDValue isSplatZeroExtended(const BuildVectorSDNode *Op,
8385                                   unsigned &NumElt, MVT &EltType) {
8386  SDValue ExtValue = Op->getOperand(0);
8387  unsigned NumElts = Op->getNumOperands();
8388  unsigned Delta = NumElts;
8389
8390  for (unsigned i = 1; i < NumElts; i++) {
8391    if (Op->getOperand(i) == ExtValue) {
8392      Delta = i;
8393      break;
8394    }
8395    if (!(Op->getOperand(i).isUndef() || isNullConstant(Op->getOperand(i))))
8396      return SDValue();
8397  }
8398  if (!isPowerOf2_32(Delta) || Delta == 1)
8399    return SDValue();
8400
8401  for (unsigned i = Delta; i < NumElts; i++) {
8402    if (i % Delta == 0) {
8403      if (Op->getOperand(i) != ExtValue)
8404        return SDValue();
8405    } else if (!(isNullConstant(Op->getOperand(i)) ||
8406                 Op->getOperand(i).isUndef()))
8407      return SDValue();
8408  }
8409  unsigned EltSize = Op->getSimpleValueType(0).getScalarSizeInBits();
8410  unsigned ExtVTSize = EltSize * Delta;
8411  EltType = MVT::getIntegerVT(ExtVTSize);
8412  NumElt = NumElts / Delta;
8413  return ExtValue;
8414}
8415
8416/// Attempt to use the vbroadcast instruction to generate a splat value
8417/// from a splat BUILD_VECTOR which uses:
8418///  a. A single scalar load, or a constant.
8419///  b. Repeated pattern of constants (e.g. <0,1,0,1> or <0,1,2,3,0,1,2,3>).
8420///
8421/// The VBROADCAST node is returned when a pattern is found,
8422/// or SDValue() otherwise.
8423static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp,
8424                                           const X86Subtarget &Subtarget,
8425                                           SelectionDAG &DAG) {
8426  // VBROADCAST requires AVX.
8427  // TODO: Splats could be generated for non-AVX CPUs using SSE
8428  // instructions, but there's less potential gain for only 128-bit vectors.
8429  if (!Subtarget.hasAVX())
8430    return SDValue();
8431
8432  MVT VT = BVOp->getSimpleValueType(0);
8433  SDLoc dl(BVOp);
8434
8435  assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
8436         "Unsupported vector type for broadcast.");
8437
8438  BitVector UndefElements;
8439  SDValue Ld = BVOp->getSplatValue(&UndefElements);
8440
8441  // Attempt to use VBROADCASTM
8442  // From this paterrn:
8443  // a. t0 = (zext_i64 (bitcast_i8 v2i1 X))
8444  // b. t1 = (build_vector t0 t0)
8445  //
8446  // Create (VBROADCASTM v2i1 X)
8447  if (Subtarget.hasCDI() && (VT.is512BitVector() || Subtarget.hasVLX())) {
8448    MVT EltType = VT.getScalarType();
8449    unsigned NumElts = VT.getVectorNumElements();
8450    SDValue BOperand;
8451    SDValue ZeroExtended = isSplatZeroExtended(BVOp, NumElts, EltType);
8452    if ((ZeroExtended && ZeroExtended.getOpcode() == ISD::BITCAST) ||
8453        (Ld && Ld.getOpcode() == ISD::ZERO_EXTEND &&
8454         Ld.getOperand(0).getOpcode() == ISD::BITCAST)) {
8455      if (ZeroExtended)
8456        BOperand = ZeroExtended.getOperand(0);
8457      else
8458        BOperand = Ld.getOperand(0).getOperand(0);
8459      MVT MaskVT = BOperand.getSimpleValueType();
8460      if ((EltType == MVT::i64 && MaskVT == MVT::v8i1) || // for broadcastmb2q
8461          (EltType == MVT::i32 && MaskVT == MVT::v16i1)) { // for broadcastmw2d
8462        SDValue Brdcst =
8463            DAG.getNode(X86ISD::VBROADCASTM, dl,
8464                        MVT::getVectorVT(EltType, NumElts), BOperand);
8465        return DAG.getBitcast(VT, Brdcst);
8466      }
8467    }
8468  }
8469
8470  unsigned NumElts = VT.getVectorNumElements();
8471  unsigned NumUndefElts = UndefElements.count();
8472  if (!Ld || (NumElts - NumUndefElts) <= 1) {
8473    APInt SplatValue, Undef;
8474    unsigned SplatBitSize;
8475    bool HasUndef;
8476    // Check if this is a repeated constant pattern suitable for broadcasting.
8477    if (BVOp->isConstantSplat(SplatValue, Undef, SplatBitSize, HasUndef) &&
8478        SplatBitSize > VT.getScalarSizeInBits() &&
8479        SplatBitSize < VT.getSizeInBits()) {
8480      // Avoid replacing with broadcast when it's a use of a shuffle
8481      // instruction to preserve the present custom lowering of shuffles.
8482      if (isFoldableUseOfShuffle(BVOp))
8483        return SDValue();
8484      // replace BUILD_VECTOR with broadcast of the repeated constants.
8485      const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8486      LLVMContext *Ctx = DAG.getContext();
8487      MVT PVT = TLI.getPointerTy(DAG.getDataLayout());
8488      if (Subtarget.hasAVX()) {
8489        if (SplatBitSize <= 64 && Subtarget.hasAVX2() &&
8490            !(SplatBitSize == 64 && Subtarget.is32Bit())) {
8491          // Splatted value can fit in one INTEGER constant in constant pool.
8492          // Load the constant and broadcast it.
8493          MVT CVT = MVT::getIntegerVT(SplatBitSize);
8494          Type *ScalarTy = Type::getIntNTy(*Ctx, SplatBitSize);
8495          Constant *C = Constant::getIntegerValue(ScalarTy, SplatValue);
8496          SDValue CP = DAG.getConstantPool(C, PVT);
8497          unsigned Repeat = VT.getSizeInBits() / SplatBitSize;
8498
8499          unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
8500          Ld = DAG.getLoad(
8501              CVT, dl, DAG.getEntryNode(), CP,
8502              MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
8503              Alignment);
8504          SDValue Brdcst = DAG.getNode(X86ISD::VBROADCAST, dl,
8505                                       MVT::getVectorVT(CVT, Repeat), Ld);
8506          return DAG.getBitcast(VT, Brdcst);
8507        } else if (SplatBitSize == 32 || SplatBitSize == 64) {
8508          // Splatted value can fit in one FLOAT constant in constant pool.
8509          // Load the constant and broadcast it.
8510          // AVX have support for 32 and 64 bit broadcast for floats only.
8511          // No 64bit integer in 32bit subtarget.
8512          MVT CVT = MVT::getFloatingPointVT(SplatBitSize);
8513          // Lower the splat via APFloat directly, to avoid any conversion.
8514          Constant *C =
8515              SplatBitSize == 32
8516                  ? ConstantFP::get(*Ctx,
8517                                    APFloat(APFloat::IEEEsingle(), SplatValue))
8518                  : ConstantFP::get(*Ctx,
8519                                    APFloat(APFloat::IEEEdouble(), SplatValue));
8520          SDValue CP = DAG.getConstantPool(C, PVT);
8521          unsigned Repeat = VT.getSizeInBits() / SplatBitSize;
8522
8523          unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
8524          Ld = DAG.getLoad(
8525              CVT, dl, DAG.getEntryNode(), CP,
8526              MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
8527              Alignment);
8528          SDValue Brdcst = DAG.getNode(X86ISD::VBROADCAST, dl,
8529                                       MVT::getVectorVT(CVT, Repeat), Ld);
8530          return DAG.getBitcast(VT, Brdcst);
8531        } else if (SplatBitSize > 64) {
8532          // Load the vector of constants and broadcast it.
8533          MVT CVT = VT.getScalarType();
8534          Constant *VecC = getConstantVector(VT, SplatValue, SplatBitSize,
8535                                             *Ctx);
8536          SDValue VCP = DAG.getConstantPool(VecC, PVT);
8537          unsigned NumElm = SplatBitSize / VT.getScalarSizeInBits();
8538          unsigned Alignment = cast<ConstantPoolSDNode>(VCP)->getAlignment();
8539          Ld = DAG.getLoad(
8540              MVT::getVectorVT(CVT, NumElm), dl, DAG.getEntryNode(), VCP,
8541              MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
8542              Alignment);
8543          SDValue Brdcst = DAG.getNode(X86ISD::SUBV_BROADCAST, dl, VT, Ld);
8544          return DAG.getBitcast(VT, Brdcst);
8545        }
8546      }
8547    }
8548
8549    // If we are moving a scalar into a vector (Ld must be set and all elements
8550    // but 1 are undef) and that operation is not obviously supported by
8551    // vmovd/vmovq/vmovss/vmovsd, then keep trying to form a broadcast.
8552    // That's better than general shuffling and may eliminate a load to GPR and
8553    // move from scalar to vector register.
8554    if (!Ld || NumElts - NumUndefElts != 1)
8555      return SDValue();
8556    unsigned ScalarSize = Ld.getValueSizeInBits();
8557    if (!(UndefElements[0] || (ScalarSize != 32 && ScalarSize != 64)))
8558      return SDValue();
8559  }
8560
8561  bool ConstSplatVal =
8562      (Ld.getOpcode() == ISD::Constant || Ld.getOpcode() == ISD::ConstantFP);
8563
8564  // Make sure that all of the users of a non-constant load are from the
8565  // BUILD_VECTOR node.
8566  if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode()))
8567    return SDValue();
8568
8569  unsigned ScalarSize = Ld.getValueSizeInBits();
8570  bool IsGE256 = (VT.getSizeInBits() >= 256);
8571
8572  // When optimizing for size, generate up to 5 extra bytes for a broadcast
8573  // instruction to save 8 or more bytes of constant pool data.
8574  // TODO: If multiple splats are generated to load the same constant,
8575  // it may be detrimental to overall size. There needs to be a way to detect
8576  // that condition to know if this is truly a size win.
8577  bool OptForSize = DAG.shouldOptForSize();
8578
8579  // Handle broadcasting a single constant scalar from the constant pool
8580  // into a vector.
8581  // On Sandybridge (no AVX2), it is still better to load a constant vector
8582  // from the constant pool and not to broadcast it from a scalar.
8583  // But override that restriction when optimizing for size.
8584  // TODO: Check if splatting is recommended for other AVX-capable CPUs.
8585  if (ConstSplatVal && (Subtarget.hasAVX2() || OptForSize)) {
8586    EVT CVT = Ld.getValueType();
8587    assert(!CVT.isVector() && "Must not broadcast a vector type");
8588
8589    // Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
8590    // For size optimization, also splat v2f64 and v2i64, and for size opt
8591    // with AVX2, also splat i8 and i16.
8592    // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
8593    if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
8594        (OptForSize && (ScalarSize == 64 || Subtarget.hasAVX2()))) {
8595      const Constant *C = nullptr;
8596      if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
8597        C = CI->getConstantIntValue();
8598      else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
8599        C = CF->getConstantFPValue();
8600
8601      assert(C && "Invalid constant type");
8602
8603      const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8604      SDValue CP =
8605          DAG.getConstantPool(C, TLI.getPointerTy(DAG.getDataLayout()));
8606      unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
8607      Ld = DAG.getLoad(
8608          CVT, dl, DAG.getEntryNode(), CP,
8609          MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
8610          Alignment);
8611
8612      return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
8613    }
8614  }
8615
8616  bool IsLoad = ISD::isNormalLoad(Ld.getNode());
8617
8618  // Handle AVX2 in-register broadcasts.
8619  if (!IsLoad && Subtarget.hasInt256() &&
8620      (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
8621    return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
8622
8623  // The scalar source must be a normal load.
8624  if (!IsLoad)
8625    return SDValue();
8626
8627  if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
8628      (Subtarget.hasVLX() && ScalarSize == 64))
8629    return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
8630
8631  // The integer check is needed for the 64-bit into 128-bit so it doesn't match
8632  // double since there is no vbroadcastsd xmm
8633  if (Subtarget.hasInt256() && Ld.getValueType().isInteger()) {
8634    if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
8635      return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
8636  }
8637
8638  // Unsupported broadcast.
8639  return SDValue();
8640}
8641
8642/// For an EXTRACT_VECTOR_ELT with a constant index return the real
8643/// underlying vector and index.
8644///
8645/// Modifies \p ExtractedFromVec to the real vector and returns the real
8646/// index.
8647static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
8648                                         SDValue ExtIdx) {
8649  int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
8650  if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
8651    return Idx;
8652
8653  // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
8654  // lowered this:
8655  //   (extract_vector_elt (v8f32 %1), Constant<6>)
8656  // to:
8657  //   (extract_vector_elt (vector_shuffle<2,u,u,u>
8658  //                           (extract_subvector (v8f32 %0), Constant<4>),
8659  //                           undef)
8660  //                       Constant<0>)
8661  // In this case the vector is the extract_subvector expression and the index
8662  // is 2, as specified by the shuffle.
8663  ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
8664  SDValue ShuffleVec = SVOp->getOperand(0);
8665  MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
8666  assert(ShuffleVecVT.getVectorElementType() ==
8667         ExtractedFromVec.getSimpleValueType().getVectorElementType());
8668
8669  int ShuffleIdx = SVOp->getMaskElt(Idx);
8670  if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
8671    ExtractedFromVec = ShuffleVec;
8672    return ShuffleIdx;
8673  }
8674  return Idx;
8675}
8676
8677static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
8678  MVT VT = Op.getSimpleValueType();
8679
8680  // Skip if insert_vec_elt is not supported.
8681  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8682  if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
8683    return SDValue();
8684
8685  SDLoc DL(Op);
8686  unsigned NumElems = Op.getNumOperands();
8687
8688  SDValue VecIn1;
8689  SDValue VecIn2;
8690  SmallVector<unsigned, 4> InsertIndices;
8691  SmallVector<int, 8> Mask(NumElems, -1);
8692
8693  for (unsigned i = 0; i != NumElems; ++i) {
8694    unsigned Opc = Op.getOperand(i).getOpcode();
8695
8696    if (Opc == ISD::UNDEF)
8697      continue;
8698
8699    if (Opc != ISD::EXTRACT_VECTOR_ELT) {
8700      // Quit if more than 1 elements need inserting.
8701      if (InsertIndices.size() > 1)
8702        return SDValue();
8703
8704      InsertIndices.push_back(i);
8705      continue;
8706    }
8707
8708    SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
8709    SDValue ExtIdx = Op.getOperand(i).getOperand(1);
8710
8711    // Quit if non-constant index.
8712    if (!isa<ConstantSDNode>(ExtIdx))
8713      return SDValue();
8714    int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
8715
8716    // Quit if extracted from vector of different type.
8717    if (ExtractedFromVec.getValueType() != VT)
8718      return SDValue();
8719
8720    if (!VecIn1.getNode())
8721      VecIn1 = ExtractedFromVec;
8722    else if (VecIn1 != ExtractedFromVec) {
8723      if (!VecIn2.getNode())
8724        VecIn2 = ExtractedFromVec;
8725      else if (VecIn2 != ExtractedFromVec)
8726        // Quit if more than 2 vectors to shuffle
8727        return SDValue();
8728    }
8729
8730    if (ExtractedFromVec == VecIn1)
8731      Mask[i] = Idx;
8732    else if (ExtractedFromVec == VecIn2)
8733      Mask[i] = Idx + NumElems;
8734  }
8735
8736  if (!VecIn1.getNode())
8737    return SDValue();
8738
8739  VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
8740  SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, Mask);
8741
8742  for (unsigned Idx : InsertIndices)
8743    NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
8744                     DAG.getIntPtrConstant(Idx, DL));
8745
8746  return NV;
8747}
8748
8749static SDValue ConvertI1VectorToInteger(SDValue Op, SelectionDAG &DAG) {
8750  assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
8751         Op.getScalarValueSizeInBits() == 1 &&
8752         "Can not convert non-constant vector");
8753  uint64_t Immediate = 0;
8754  for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
8755    SDValue In = Op.getOperand(idx);
8756    if (!In.isUndef())
8757      Immediate |= (cast<ConstantSDNode>(In)->getZExtValue() & 0x1) << idx;
8758  }
8759  SDLoc dl(Op);
8760  MVT VT = MVT::getIntegerVT(std::max((int)Op.getValueSizeInBits(), 8));
8761  return DAG.getConstant(Immediate, dl, VT);
8762}
8763// Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
8764static SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG,
8765                                     const X86Subtarget &Subtarget) {
8766
8767  MVT VT = Op.getSimpleValueType();
8768  assert((VT.getVectorElementType() == MVT::i1) &&
8769         "Unexpected type in LowerBUILD_VECTORvXi1!");
8770
8771  SDLoc dl(Op);
8772  if (ISD::isBuildVectorAllZeros(Op.getNode()) ||
8773      ISD::isBuildVectorAllOnes(Op.getNode()))
8774    return Op;
8775
8776  uint64_t Immediate = 0;
8777  SmallVector<unsigned, 16> NonConstIdx;
8778  bool IsSplat = true;
8779  bool HasConstElts = false;
8780  int SplatIdx = -1;
8781  for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
8782    SDValue In = Op.getOperand(idx);
8783    if (In.isUndef())
8784      continue;
8785    if (!isa<ConstantSDNode>(In))
8786      NonConstIdx.push_back(idx);
8787    else {
8788      Immediate |= (cast<ConstantSDNode>(In)->getZExtValue() & 0x1) << idx;
8789      HasConstElts = true;
8790    }
8791    if (SplatIdx < 0)
8792      SplatIdx = idx;
8793    else if (In != Op.getOperand(SplatIdx))
8794      IsSplat = false;
8795  }
8796
8797  // for splat use " (select i1 splat_elt, all-ones, all-zeroes)"
8798  if (IsSplat) {
8799    // The build_vector allows the scalar element to be larger than the vector
8800    // element type. We need to mask it to use as a condition unless we know
8801    // the upper bits are zero.
8802    // FIXME: Use computeKnownBits instead of checking specific opcode?
8803    SDValue Cond = Op.getOperand(SplatIdx);
8804    assert(Cond.getValueType() == MVT::i8 && "Unexpected VT!");
8805    if (Cond.getOpcode() != ISD::SETCC)
8806      Cond = DAG.getNode(ISD::AND, dl, MVT::i8, Cond,
8807                         DAG.getConstant(1, dl, MVT::i8));
8808    return DAG.getSelect(dl, VT, Cond,
8809                         DAG.getConstant(1, dl, VT),
8810                         DAG.getConstant(0, dl, VT));
8811  }
8812
8813  // insert elements one by one
8814  SDValue DstVec;
8815  if (HasConstElts) {
8816    if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
8817      SDValue ImmL = DAG.getConstant(Lo_32(Immediate), dl, MVT::i32);
8818      SDValue ImmH = DAG.getConstant(Hi_32(Immediate), dl, MVT::i32);
8819      ImmL = DAG.getBitcast(MVT::v32i1, ImmL);
8820      ImmH = DAG.getBitcast(MVT::v32i1, ImmH);
8821      DstVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, ImmL, ImmH);
8822    } else {
8823      MVT ImmVT = MVT::getIntegerVT(std::max((unsigned)VT.getSizeInBits(), 8U));
8824      SDValue Imm = DAG.getConstant(Immediate, dl, ImmVT);
8825      MVT VecVT = VT.getSizeInBits() >= 8 ? VT : MVT::v8i1;
8826      DstVec = DAG.getBitcast(VecVT, Imm);
8827      DstVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, DstVec,
8828                           DAG.getIntPtrConstant(0, dl));
8829    }
8830  } else
8831    DstVec = DAG.getUNDEF(VT);
8832
8833  for (unsigned i = 0, e = NonConstIdx.size(); i != e; ++i) {
8834    unsigned InsertIdx = NonConstIdx[i];
8835    DstVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
8836                         Op.getOperand(InsertIdx),
8837                         DAG.getIntPtrConstant(InsertIdx, dl));
8838  }
8839  return DstVec;
8840}
8841
8842/// This is a helper function of LowerToHorizontalOp().
8843/// This function checks that the build_vector \p N in input implements a
8844/// 128-bit partial horizontal operation on a 256-bit vector, but that operation
8845/// may not match the layout of an x86 256-bit horizontal instruction.
8846/// In other words, if this returns true, then some extraction/insertion will
8847/// be required to produce a valid horizontal instruction.
8848///
8849/// Parameter \p Opcode defines the kind of horizontal operation to match.
8850/// For example, if \p Opcode is equal to ISD::ADD, then this function
8851/// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
8852/// is equal to ISD::SUB, then this function checks if this is a horizontal
8853/// arithmetic sub.
8854///
8855/// This function only analyzes elements of \p N whose indices are
8856/// in range [BaseIdx, LastIdx).
8857///
8858/// TODO: This function was originally used to match both real and fake partial
8859/// horizontal operations, but the index-matching logic is incorrect for that.
8860/// See the corrected implementation in isHopBuildVector(). Can we reduce this
8861/// code because it is only used for partial h-op matching now?
8862static bool isHorizontalBinOpPart(const BuildVectorSDNode *N, unsigned Opcode,
8863                                  SelectionDAG &DAG,
8864                                  unsigned BaseIdx, unsigned LastIdx,
8865                                  SDValue &V0, SDValue &V1) {
8866  EVT VT = N->getValueType(0);
8867  assert(VT.is256BitVector() && "Only use for matching partial 256-bit h-ops");
8868  assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
8869  assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
8870         "Invalid Vector in input!");
8871
8872  bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
8873  bool CanFold = true;
8874  unsigned ExpectedVExtractIdx = BaseIdx;
8875  unsigned NumElts = LastIdx - BaseIdx;
8876  V0 = DAG.getUNDEF(VT);
8877  V1 = DAG.getUNDEF(VT);
8878
8879  // Check if N implements a horizontal binop.
8880  for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
8881    SDValue Op = N->getOperand(i + BaseIdx);
8882
8883    // Skip UNDEFs.
8884    if (Op->isUndef()) {
8885      // Update the expected vector extract index.
8886      if (i * 2 == NumElts)
8887        ExpectedVExtractIdx = BaseIdx;
8888      ExpectedVExtractIdx += 2;
8889      continue;
8890    }
8891
8892    CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
8893
8894    if (!CanFold)
8895      break;
8896
8897    SDValue Op0 = Op.getOperand(0);
8898    SDValue Op1 = Op.getOperand(1);
8899
8900    // Try to match the following pattern:
8901    // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
8902    CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
8903        Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
8904        Op0.getOperand(0) == Op1.getOperand(0) &&
8905        isa<ConstantSDNode>(Op0.getOperand(1)) &&
8906        isa<ConstantSDNode>(Op1.getOperand(1)));
8907    if (!CanFold)
8908      break;
8909
8910    unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
8911    unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
8912
8913    if (i * 2 < NumElts) {
8914      if (V0.isUndef()) {
8915        V0 = Op0.getOperand(0);
8916        if (V0.getValueType() != VT)
8917          return false;
8918      }
8919    } else {
8920      if (V1.isUndef()) {
8921        V1 = Op0.getOperand(0);
8922        if (V1.getValueType() != VT)
8923          return false;
8924      }
8925      if (i * 2 == NumElts)
8926        ExpectedVExtractIdx = BaseIdx;
8927    }
8928
8929    SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
8930    if (I0 == ExpectedVExtractIdx)
8931      CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
8932    else if (IsCommutable && I1 == ExpectedVExtractIdx) {
8933      // Try to match the following dag sequence:
8934      // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
8935      CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
8936    } else
8937      CanFold = false;
8938
8939    ExpectedVExtractIdx += 2;
8940  }
8941
8942  return CanFold;
8943}
8944
8945/// Emit a sequence of two 128-bit horizontal add/sub followed by
8946/// a concat_vector.
8947///
8948/// This is a helper function of LowerToHorizontalOp().
8949/// This function expects two 256-bit vectors called V0 and V1.
8950/// At first, each vector is split into two separate 128-bit vectors.
8951/// Then, the resulting 128-bit vectors are used to implement two
8952/// horizontal binary operations.
8953///
8954/// The kind of horizontal binary operation is defined by \p X86Opcode.
8955///
8956/// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
8957/// the two new horizontal binop.
8958/// When Mode is set, the first horizontal binop dag node would take as input
8959/// the lower 128-bit of V0 and the upper 128-bit of V0. The second
8960/// horizontal binop dag node would take as input the lower 128-bit of V1
8961/// and the upper 128-bit of V1.
8962///   Example:
8963///     HADD V0_LO, V0_HI
8964///     HADD V1_LO, V1_HI
8965///
8966/// Otherwise, the first horizontal binop dag node takes as input the lower
8967/// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
8968/// dag node takes the upper 128-bit of V0 and the upper 128-bit of V1.
8969///   Example:
8970///     HADD V0_LO, V1_LO
8971///     HADD V0_HI, V1_HI
8972///
8973/// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
8974/// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
8975/// the upper 128-bits of the result.
8976static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
8977                                     const SDLoc &DL, SelectionDAG &DAG,
8978                                     unsigned X86Opcode, bool Mode,
8979                                     bool isUndefLO, bool isUndefHI) {
8980  MVT VT = V0.getSimpleValueType();
8981  assert(VT.is256BitVector() && VT == V1.getSimpleValueType() &&
8982         "Invalid nodes in input!");
8983
8984  unsigned NumElts = VT.getVectorNumElements();
8985  SDValue V0_LO = extract128BitVector(V0, 0, DAG, DL);
8986  SDValue V0_HI = extract128BitVector(V0, NumElts/2, DAG, DL);
8987  SDValue V1_LO = extract128BitVector(V1, 0, DAG, DL);
8988  SDValue V1_HI = extract128BitVector(V1, NumElts/2, DAG, DL);
8989  MVT NewVT = V0_LO.getSimpleValueType();
8990
8991  SDValue LO = DAG.getUNDEF(NewVT);
8992  SDValue HI = DAG.getUNDEF(NewVT);
8993
8994  if (Mode) {
8995    // Don't emit a horizontal binop if the result is expected to be UNDEF.
8996    if (!isUndefLO && !V0->isUndef())
8997      LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
8998    if (!isUndefHI && !V1->isUndef())
8999      HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
9000  } else {
9001    // Don't emit a horizontal binop if the result is expected to be UNDEF.
9002    if (!isUndefLO && (!V0_LO->isUndef() || !V1_LO->isUndef()))
9003      LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
9004
9005    if (!isUndefHI && (!V0_HI->isUndef() || !V1_HI->isUndef()))
9006      HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
9007  }
9008
9009  return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
9010}
9011
9012/// Returns true iff \p BV builds a vector with the result equivalent to
9013/// the result of ADDSUB/SUBADD operation.
9014/// If true is returned then the operands of ADDSUB = Opnd0 +- Opnd1
9015/// (SUBADD = Opnd0 -+ Opnd1) operation are written to the parameters
9016/// \p Opnd0 and \p Opnd1.
9017static bool isAddSubOrSubAdd(const BuildVectorSDNode *BV,
9018                             const X86Subtarget &Subtarget, SelectionDAG &DAG,
9019                             SDValue &Opnd0, SDValue &Opnd1,
9020                             unsigned &NumExtracts,
9021                             bool &IsSubAdd) {
9022
9023  MVT VT = BV->getSimpleValueType(0);
9024  if (!Subtarget.hasSSE3() || !VT.isFloatingPoint())
9025    return false;
9026
9027  unsigned NumElts = VT.getVectorNumElements();
9028  SDValue InVec0 = DAG.getUNDEF(VT);
9029  SDValue InVec1 = DAG.getUNDEF(VT);
9030
9031  NumExtracts = 0;
9032
9033  // Odd-numbered elements in the input build vector are obtained from
9034  // adding/subtracting two integer/float elements.
9035  // Even-numbered elements in the input build vector are obtained from
9036  // subtracting/adding two integer/float elements.
9037  unsigned Opc[2] = {0, 0};
9038  for (unsigned i = 0, e = NumElts; i != e; ++i) {
9039    SDValue Op = BV->getOperand(i);
9040
9041    // Skip 'undef' values.
9042    unsigned Opcode = Op.getOpcode();
9043    if (Opcode == ISD::UNDEF)
9044      continue;
9045
9046    // Early exit if we found an unexpected opcode.
9047    if (Opcode != ISD::FADD && Opcode != ISD::FSUB)
9048      return false;
9049
9050    SDValue Op0 = Op.getOperand(0);
9051    SDValue Op1 = Op.getOperand(1);
9052
9053    // Try to match the following pattern:
9054    // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
9055    // Early exit if we cannot match that sequence.
9056    if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9057        Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9058        !isa<ConstantSDNode>(Op0.getOperand(1)) ||
9059        !isa<ConstantSDNode>(Op1.getOperand(1)) ||
9060        Op0.getOperand(1) != Op1.getOperand(1))
9061      return false;
9062
9063    unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
9064    if (I0 != i)
9065      return false;
9066
9067    // We found a valid add/sub node, make sure its the same opcode as previous
9068    // elements for this parity.
9069    if (Opc[i % 2] != 0 && Opc[i % 2] != Opcode)
9070      return false;
9071    Opc[i % 2] = Opcode;
9072
9073    // Update InVec0 and InVec1.
9074    if (InVec0.isUndef()) {
9075      InVec0 = Op0.getOperand(0);
9076      if (InVec0.getSimpleValueType() != VT)
9077        return false;
9078    }
9079    if (InVec1.isUndef()) {
9080      InVec1 = Op1.getOperand(0);
9081      if (InVec1.getSimpleValueType() != VT)
9082        return false;
9083    }
9084
9085    // Make sure that operands in input to each add/sub node always
9086    // come from a same pair of vectors.
9087    if (InVec0 != Op0.getOperand(0)) {
9088      if (Opcode == ISD::FSUB)
9089        return false;
9090
9091      // FADD is commutable. Try to commute the operands
9092      // and then test again.
9093      std::swap(Op0, Op1);
9094      if (InVec0 != Op0.getOperand(0))
9095        return false;
9096    }
9097
9098    if (InVec1 != Op1.getOperand(0))
9099      return false;
9100
9101    // Increment the number of extractions done.
9102    ++NumExtracts;
9103  }
9104
9105  // Ensure we have found an opcode for both parities and that they are
9106  // different. Don't try to fold this build_vector into an ADDSUB/SUBADD if the
9107  // inputs are undef.
9108  if (!Opc[0] || !Opc[1] || Opc[0] == Opc[1] ||
9109      InVec0.isUndef() || InVec1.isUndef())
9110    return false;
9111
9112  IsSubAdd = Opc[0] == ISD::FADD;
9113
9114  Opnd0 = InVec0;
9115  Opnd1 = InVec1;
9116  return true;
9117}
9118
9119/// Returns true if is possible to fold MUL and an idiom that has already been
9120/// recognized as ADDSUB/SUBADD(\p Opnd0, \p Opnd1) into
9121/// FMADDSUB/FMSUBADD(x, y, \p Opnd1). If (and only if) true is returned, the
9122/// operands of FMADDSUB/FMSUBADD are written to parameters \p Opnd0, \p Opnd1, \p Opnd2.
9123///
9124/// Prior to calling this function it should be known that there is some
9125/// SDNode that potentially can be replaced with an X86ISD::ADDSUB operation
9126/// using \p Opnd0 and \p Opnd1 as operands. Also, this method is called
9127/// before replacement of such SDNode with ADDSUB operation. Thus the number
9128/// of \p Opnd0 uses is expected to be equal to 2.
9129/// For example, this function may be called for the following IR:
9130///    %AB = fmul fast <2 x double> %A, %B
9131///    %Sub = fsub fast <2 x double> %AB, %C
9132///    %Add = fadd fast <2 x double> %AB, %C
9133///    %Addsub = shufflevector <2 x double> %Sub, <2 x double> %Add,
9134///                            <2 x i32> <i32 0, i32 3>
9135/// There is a def for %Addsub here, which potentially can be replaced by
9136/// X86ISD::ADDSUB operation:
9137///    %Addsub = X86ISD::ADDSUB %AB, %C
9138/// and such ADDSUB can further be replaced with FMADDSUB:
9139///    %Addsub = FMADDSUB %A, %B, %C.
9140///
9141/// The main reason why this method is called before the replacement of the
9142/// recognized ADDSUB idiom with ADDSUB operation is that such replacement
9143/// is illegal sometimes. E.g. 512-bit ADDSUB is not available, while 512-bit
9144/// FMADDSUB is.
9145static bool isFMAddSubOrFMSubAdd(const X86Subtarget &Subtarget,
9146                                 SelectionDAG &DAG,
9147                                 SDValue &Opnd0, SDValue &Opnd1, SDValue &Opnd2,
9148                                 unsigned ExpectedUses) {
9149  if (Opnd0.getOpcode() != ISD::FMUL ||
9150      !Opnd0->hasNUsesOfValue(ExpectedUses, 0) || !Subtarget.hasAnyFMA())
9151    return false;
9152
9153  // FIXME: These checks must match the similar ones in
9154  // DAGCombiner::visitFADDForFMACombine. It would be good to have one
9155  // function that would answer if it is Ok to fuse MUL + ADD to FMADD
9156  // or MUL + ADDSUB to FMADDSUB.
9157  const TargetOptions &Options = DAG.getTarget().Options;
9158  bool AllowFusion =
9159      (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath);
9160  if (!AllowFusion)
9161    return false;
9162
9163  Opnd2 = Opnd1;
9164  Opnd1 = Opnd0.getOperand(1);
9165  Opnd0 = Opnd0.getOperand(0);
9166
9167  return true;
9168}
9169
9170/// Try to fold a build_vector that performs an 'addsub' or 'fmaddsub' or
9171/// 'fsubadd' operation accordingly to X86ISD::ADDSUB or X86ISD::FMADDSUB or
9172/// X86ISD::FMSUBADD node.
9173static SDValue lowerToAddSubOrFMAddSub(const BuildVectorSDNode *BV,
9174                                       const X86Subtarget &Subtarget,
9175                                       SelectionDAG &DAG) {
9176  SDValue Opnd0, Opnd1;
9177  unsigned NumExtracts;
9178  bool IsSubAdd;
9179  if (!isAddSubOrSubAdd(BV, Subtarget, DAG, Opnd0, Opnd1, NumExtracts,
9180                        IsSubAdd))
9181    return SDValue();
9182
9183  MVT VT = BV->getSimpleValueType(0);
9184  SDLoc DL(BV);
9185
9186  // Try to generate X86ISD::FMADDSUB node here.
9187  SDValue Opnd2;
9188  if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, NumExtracts)) {
9189    unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
9190    return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
9191  }
9192
9193  // We only support ADDSUB.
9194  if (IsSubAdd)
9195    return SDValue();
9196
9197  // Do not generate X86ISD::ADDSUB node for 512-bit types even though
9198  // the ADDSUB idiom has been successfully recognized. There are no known
9199  // X86 targets with 512-bit ADDSUB instructions!
9200  // 512-bit ADDSUB idiom recognition was needed only as part of FMADDSUB idiom
9201  // recognition.
9202  if (VT.is512BitVector())
9203    return SDValue();
9204
9205  return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
9206}
9207
9208static bool isHopBuildVector(const BuildVectorSDNode *BV, SelectionDAG &DAG,
9209                             unsigned &HOpcode, SDValue &V0, SDValue &V1) {
9210  // Initialize outputs to known values.
9211  MVT VT = BV->getSimpleValueType(0);
9212  HOpcode = ISD::DELETED_NODE;
9213  V0 = DAG.getUNDEF(VT);
9214  V1 = DAG.getUNDEF(VT);
9215
9216  // x86 256-bit horizontal ops are defined in a non-obvious way. Each 128-bit
9217  // half of the result is calculated independently from the 128-bit halves of
9218  // the inputs, so that makes the index-checking logic below more complicated.
9219  unsigned NumElts = VT.getVectorNumElements();
9220  unsigned GenericOpcode = ISD::DELETED_NODE;
9221  unsigned Num128BitChunks = VT.is256BitVector() ? 2 : 1;
9222  unsigned NumEltsIn128Bits = NumElts / Num128BitChunks;
9223  unsigned NumEltsIn64Bits = NumEltsIn128Bits / 2;
9224  for (unsigned i = 0; i != Num128BitChunks; ++i) {
9225    for (unsigned j = 0; j != NumEltsIn128Bits; ++j) {
9226      // Ignore undef elements.
9227      SDValue Op = BV->getOperand(i * NumEltsIn128Bits + j);
9228      if (Op.isUndef())
9229        continue;
9230
9231      // If there's an opcode mismatch, we're done.
9232      if (HOpcode != ISD::DELETED_NODE && Op.getOpcode() != GenericOpcode)
9233        return false;
9234
9235      // Initialize horizontal opcode.
9236      if (HOpcode == ISD::DELETED_NODE) {
9237        GenericOpcode = Op.getOpcode();
9238        switch (GenericOpcode) {
9239        case ISD::ADD: HOpcode = X86ISD::HADD; break;
9240        case ISD::SUB: HOpcode = X86ISD::HSUB; break;
9241        case ISD::FADD: HOpcode = X86ISD::FHADD; break;
9242        case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
9243        default: return false;
9244        }
9245      }
9246
9247      SDValue Op0 = Op.getOperand(0);
9248      SDValue Op1 = Op.getOperand(1);
9249      if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9250          Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9251          Op0.getOperand(0) != Op1.getOperand(0) ||
9252          !isa<ConstantSDNode>(Op0.getOperand(1)) ||
9253          !isa<ConstantSDNode>(Op1.getOperand(1)) || !Op.hasOneUse())
9254        return false;
9255
9256      // The source vector is chosen based on which 64-bit half of the
9257      // destination vector is being calculated.
9258      if (j < NumEltsIn64Bits) {
9259        if (V0.isUndef())
9260          V0 = Op0.getOperand(0);
9261      } else {
9262        if (V1.isUndef())
9263          V1 = Op0.getOperand(0);
9264      }
9265
9266      SDValue SourceVec = (j < NumEltsIn64Bits) ? V0 : V1;
9267      if (SourceVec != Op0.getOperand(0))
9268        return false;
9269
9270      // op (extract_vector_elt A, I), (extract_vector_elt A, I+1)
9271      unsigned ExtIndex0 = Op0.getConstantOperandVal(1);
9272      unsigned ExtIndex1 = Op1.getConstantOperandVal(1);
9273      unsigned ExpectedIndex = i * NumEltsIn128Bits +
9274                               (j % NumEltsIn64Bits) * 2;
9275      if (ExpectedIndex == ExtIndex0 && ExtIndex1 == ExtIndex0 + 1)
9276        continue;
9277
9278      // If this is not a commutative op, this does not match.
9279      if (GenericOpcode != ISD::ADD && GenericOpcode != ISD::FADD)
9280        return false;
9281
9282      // Addition is commutative, so try swapping the extract indexes.
9283      // op (extract_vector_elt A, I+1), (extract_vector_elt A, I)
9284      if (ExpectedIndex == ExtIndex1 && ExtIndex0 == ExtIndex1 + 1)
9285        continue;
9286
9287      // Extract indexes do not match horizontal requirement.
9288      return false;
9289    }
9290  }
9291  // We matched. Opcode and operands are returned by reference as arguments.
9292  return true;
9293}
9294
9295static SDValue getHopForBuildVector(const BuildVectorSDNode *BV,
9296                                    SelectionDAG &DAG, unsigned HOpcode,
9297                                    SDValue V0, SDValue V1) {
9298  // If either input vector is not the same size as the build vector,
9299  // extract/insert the low bits to the correct size.
9300  // This is free (examples: zmm --> xmm, xmm --> ymm).
9301  MVT VT = BV->getSimpleValueType(0);
9302  unsigned Width = VT.getSizeInBits();
9303  if (V0.getValueSizeInBits() > Width)
9304    V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), Width);
9305  else if (V0.getValueSizeInBits() < Width)
9306    V0 = insertSubVector(DAG.getUNDEF(VT), V0, 0, DAG, SDLoc(BV), Width);
9307
9308  if (V1.getValueSizeInBits() > Width)
9309    V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), Width);
9310  else if (V1.getValueSizeInBits() < Width)
9311    V1 = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, SDLoc(BV), Width);
9312
9313  unsigned NumElts = VT.getVectorNumElements();
9314  APInt DemandedElts = APInt::getAllOnesValue(NumElts);
9315  for (unsigned i = 0; i != NumElts; ++i)
9316    if (BV->getOperand(i).isUndef())
9317      DemandedElts.clearBit(i);
9318
9319  // If we don't need the upper xmm, then perform as a xmm hop.
9320  unsigned HalfNumElts = NumElts / 2;
9321  if (VT.is256BitVector() && DemandedElts.lshr(HalfNumElts) == 0) {
9322    MVT HalfVT = VT.getHalfNumVectorElementsVT();
9323    V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), 128);
9324    V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), 128);
9325    SDValue Half = DAG.getNode(HOpcode, SDLoc(BV), HalfVT, V0, V1);
9326    return insertSubVector(DAG.getUNDEF(VT), Half, 0, DAG, SDLoc(BV), 256);
9327  }
9328
9329  return DAG.getNode(HOpcode, SDLoc(BV), VT, V0, V1);
9330}
9331
9332/// Lower BUILD_VECTOR to a horizontal add/sub operation if possible.
9333static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
9334                                   const X86Subtarget &Subtarget,
9335                                   SelectionDAG &DAG) {
9336  // We need at least 2 non-undef elements to make this worthwhile by default.
9337  unsigned NumNonUndefs =
9338      count_if(BV->op_values(), [](SDValue V) { return !V.isUndef(); });
9339  if (NumNonUndefs < 2)
9340    return SDValue();
9341
9342  // There are 4 sets of horizontal math operations distinguished by type:
9343  // int/FP at 128-bit/256-bit. Each type was introduced with a different
9344  // subtarget feature. Try to match those "native" patterns first.
9345  MVT VT = BV->getSimpleValueType(0);
9346  if (((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget.hasSSE3()) ||
9347      ((VT == MVT::v8i16 || VT == MVT::v4i32) && Subtarget.hasSSSE3()) ||
9348      ((VT == MVT::v8f32 || VT == MVT::v4f64) && Subtarget.hasAVX()) ||
9349      ((VT == MVT::v16i16 || VT == MVT::v8i32) && Subtarget.hasAVX2())) {
9350    unsigned HOpcode;
9351    SDValue V0, V1;
9352    if (isHopBuildVector(BV, DAG, HOpcode, V0, V1))
9353      return getHopForBuildVector(BV, DAG, HOpcode, V0, V1);
9354  }
9355
9356  // Try harder to match 256-bit ops by using extract/concat.
9357  if (!Subtarget.hasAVX() || !VT.is256BitVector())
9358    return SDValue();
9359
9360  // Count the number of UNDEF operands in the build_vector in input.
9361  unsigned NumElts = VT.getVectorNumElements();
9362  unsigned Half = NumElts / 2;
9363  unsigned NumUndefsLO = 0;
9364  unsigned NumUndefsHI = 0;
9365  for (unsigned i = 0, e = Half; i != e; ++i)
9366    if (BV->getOperand(i)->isUndef())
9367      NumUndefsLO++;
9368
9369  for (unsigned i = Half, e = NumElts; i != e; ++i)
9370    if (BV->getOperand(i)->isUndef())
9371      NumUndefsHI++;
9372
9373  SDLoc DL(BV);
9374  SDValue InVec0, InVec1;
9375  if (VT == MVT::v8i32 || VT == MVT::v16i16) {
9376    SDValue InVec2, InVec3;
9377    unsigned X86Opcode;
9378    bool CanFold = true;
9379
9380    if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
9381        isHorizontalBinOpPart(BV, ISD::ADD, DAG, Half, NumElts, InVec2,
9382                              InVec3) &&
9383        ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
9384        ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
9385      X86Opcode = X86ISD::HADD;
9386    else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, Half, InVec0,
9387                                   InVec1) &&
9388             isHorizontalBinOpPart(BV, ISD::SUB, DAG, Half, NumElts, InVec2,
9389                                   InVec3) &&
9390             ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
9391             ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
9392      X86Opcode = X86ISD::HSUB;
9393    else
9394      CanFold = false;
9395
9396    if (CanFold) {
9397      // Do not try to expand this build_vector into a pair of horizontal
9398      // add/sub if we can emit a pair of scalar add/sub.
9399      if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
9400        return SDValue();
9401
9402      // Convert this build_vector into a pair of horizontal binops followed by
9403      // a concat vector. We must adjust the outputs from the partial horizontal
9404      // matching calls above to account for undefined vector halves.
9405      SDValue V0 = InVec0.isUndef() ? InVec2 : InVec0;
9406      SDValue V1 = InVec1.isUndef() ? InVec3 : InVec1;
9407      assert((!V0.isUndef() || !V1.isUndef()) && "Horizontal-op of undefs?");
9408      bool isUndefLO = NumUndefsLO == Half;
9409      bool isUndefHI = NumUndefsHI == Half;
9410      return ExpandHorizontalBinOp(V0, V1, DL, DAG, X86Opcode, false, isUndefLO,
9411                                   isUndefHI);
9412    }
9413  }
9414
9415  if (VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
9416      VT == MVT::v16i16) {
9417    unsigned X86Opcode;
9418    if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
9419      X86Opcode = X86ISD::HADD;
9420    else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, NumElts, InVec0,
9421                                   InVec1))
9422      X86Opcode = X86ISD::HSUB;
9423    else if (isHorizontalBinOpPart(BV, ISD::FADD, DAG, 0, NumElts, InVec0,
9424                                   InVec1))
9425      X86Opcode = X86ISD::FHADD;
9426    else if (isHorizontalBinOpPart(BV, ISD::FSUB, DAG, 0, NumElts, InVec0,
9427                                   InVec1))
9428      X86Opcode = X86ISD::FHSUB;
9429    else
9430      return SDValue();
9431
9432    // Don't try to expand this build_vector into a pair of horizontal add/sub
9433    // if we can simply emit a pair of scalar add/sub.
9434    if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
9435      return SDValue();
9436
9437    // Convert this build_vector into two horizontal add/sub followed by
9438    // a concat vector.
9439    bool isUndefLO = NumUndefsLO == Half;
9440    bool isUndefHI = NumUndefsHI == Half;
9441    return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
9442                                 isUndefLO, isUndefHI);
9443  }
9444
9445  return SDValue();
9446}
9447
9448/// If a BUILD_VECTOR's source elements all apply the same bit operation and
9449/// one of their operands is constant, lower to a pair of BUILD_VECTOR and
9450/// just apply the bit to the vectors.
9451/// NOTE: Its not in our interest to start make a general purpose vectorizer
9452/// from this, but enough scalar bit operations are created from the later
9453/// legalization + scalarization stages to need basic support.
9454static SDValue lowerBuildVectorToBitOp(BuildVectorSDNode *Op,
9455                                       SelectionDAG &DAG) {
9456  SDLoc DL(Op);
9457  MVT VT = Op->getSimpleValueType(0);
9458  unsigned NumElems = VT.getVectorNumElements();
9459  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9460
9461  // Check that all elements have the same opcode.
9462  // TODO: Should we allow UNDEFS and if so how many?
9463  unsigned Opcode = Op->getOperand(0).getOpcode();
9464  for (unsigned i = 1; i < NumElems; ++i)
9465    if (Opcode != Op->getOperand(i).getOpcode())
9466      return SDValue();
9467
9468  // TODO: We may be able to add support for other Ops (ADD/SUB + shifts).
9469  bool IsShift = false;
9470  switch (Opcode) {
9471  default:
9472    return SDValue();
9473  case ISD::SHL:
9474  case ISD::SRL:
9475  case ISD::SRA:
9476    IsShift = true;
9477    break;
9478  case ISD::AND:
9479  case ISD::XOR:
9480  case ISD::OR:
9481    // Don't do this if the buildvector is a splat - we'd replace one
9482    // constant with an entire vector.
9483    if (Op->getSplatValue())
9484      return SDValue();
9485    if (!TLI.isOperationLegalOrPromote(Opcode, VT))
9486      return SDValue();
9487    break;
9488  }
9489
9490  SmallVector<SDValue, 4> LHSElts, RHSElts;
9491  for (SDValue Elt : Op->ops()) {
9492    SDValue LHS = Elt.getOperand(0);
9493    SDValue RHS = Elt.getOperand(1);
9494
9495    // We expect the canonicalized RHS operand to be the constant.
9496    if (!isa<ConstantSDNode>(RHS))
9497      return SDValue();
9498
9499    // Extend shift amounts.
9500    if (RHS.getValueSizeInBits() != VT.getScalarSizeInBits()) {
9501      if (!IsShift)
9502        return SDValue();
9503      RHS = DAG.getZExtOrTrunc(RHS, DL, VT.getScalarType());
9504    }
9505
9506    LHSElts.push_back(LHS);
9507    RHSElts.push_back(RHS);
9508  }
9509
9510  // Limit to shifts by uniform immediates.
9511  // TODO: Only accept vXi8/vXi64 special cases?
9512  // TODO: Permit non-uniform XOP/AVX2/MULLO cases?
9513  if (IsShift && any_of(RHSElts, [&](SDValue V) { return RHSElts[0] != V; }))
9514    return SDValue();
9515
9516  SDValue LHS = DAG.getBuildVector(VT, DL, LHSElts);
9517  SDValue RHS = DAG.getBuildVector(VT, DL, RHSElts);
9518  return DAG.getNode(Opcode, DL, VT, LHS, RHS);
9519}
9520
9521/// Create a vector constant without a load. SSE/AVX provide the bare minimum
9522/// functionality to do this, so it's all zeros, all ones, or some derivation
9523/// that is cheap to calculate.
9524static SDValue materializeVectorConstant(SDValue Op, SelectionDAG &DAG,
9525                                         const X86Subtarget &Subtarget) {
9526  SDLoc DL(Op);
9527  MVT VT = Op.getSimpleValueType();
9528
9529  // Vectors containing all zeros can be matched by pxor and xorps.
9530  if (ISD::isBuildVectorAllZeros(Op.getNode()))
9531    return Op;
9532
9533  // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
9534  // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
9535  // vpcmpeqd on 256-bit vectors.
9536  if (Subtarget.hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
9537    if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
9538      return Op;
9539
9540    return getOnesVector(VT, DAG, DL);
9541  }
9542
9543  return SDValue();
9544}
9545
9546/// Look for opportunities to create a VPERMV/VPERMILPV/PSHUFB variable permute
9547/// from a vector of source values and a vector of extraction indices.
9548/// The vectors might be manipulated to match the type of the permute op.
9549static SDValue createVariablePermute(MVT VT, SDValue SrcVec, SDValue IndicesVec,
9550                                     SDLoc &DL, SelectionDAG &DAG,
9551                                     const X86Subtarget &Subtarget) {
9552  MVT ShuffleVT = VT;
9553  EVT IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
9554  unsigned NumElts = VT.getVectorNumElements();
9555  unsigned SizeInBits = VT.getSizeInBits();
9556
9557  // Adjust IndicesVec to match VT size.
9558  assert(IndicesVec.getValueType().getVectorNumElements() >= NumElts &&
9559         "Illegal variable permute mask size");
9560  if (IndicesVec.getValueType().getVectorNumElements() > NumElts)
9561    IndicesVec = extractSubVector(IndicesVec, 0, DAG, SDLoc(IndicesVec),
9562                                  NumElts * VT.getScalarSizeInBits());
9563  IndicesVec = DAG.getZExtOrTrunc(IndicesVec, SDLoc(IndicesVec), IndicesVT);
9564
9565  // Handle SrcVec that don't match VT type.
9566  if (SrcVec.getValueSizeInBits() != SizeInBits) {
9567    if ((SrcVec.getValueSizeInBits() % SizeInBits) == 0) {
9568      // Handle larger SrcVec by treating it as a larger permute.
9569      unsigned Scale = SrcVec.getValueSizeInBits() / SizeInBits;
9570      VT = MVT::getVectorVT(VT.getScalarType(), Scale * NumElts);
9571      IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
9572      IndicesVec = widenSubVector(IndicesVT.getSimpleVT(), IndicesVec, false,
9573                                  Subtarget, DAG, SDLoc(IndicesVec));
9574      return extractSubVector(
9575          createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget), 0,
9576          DAG, DL, SizeInBits);
9577    } else if (SrcVec.getValueSizeInBits() < SizeInBits) {
9578      // Widen smaller SrcVec to match VT.
9579      SrcVec = widenSubVector(VT, SrcVec, false, Subtarget, DAG, SDLoc(SrcVec));
9580    } else
9581      return SDValue();
9582  }
9583
9584  auto ScaleIndices = [&DAG](SDValue Idx, uint64_t Scale) {
9585    assert(isPowerOf2_64(Scale) && "Illegal variable permute shuffle scale");
9586    EVT SrcVT = Idx.getValueType();
9587    unsigned NumDstBits = SrcVT.getScalarSizeInBits() / Scale;
9588    uint64_t IndexScale = 0;
9589    uint64_t IndexOffset = 0;
9590
9591    // If we're scaling a smaller permute op, then we need to repeat the
9592    // indices, scaling and offsetting them as well.
9593    // e.g. v4i32 -> v16i8 (Scale = 4)
9594    // IndexScale = v4i32 Splat(4 << 24 | 4 << 16 | 4 << 8 | 4)
9595    // IndexOffset = v4i32 Splat(3 << 24 | 2 << 16 | 1 << 8 | 0)
9596    for (uint64_t i = 0; i != Scale; ++i) {
9597      IndexScale |= Scale << (i * NumDstBits);
9598      IndexOffset |= i << (i * NumDstBits);
9599    }
9600
9601    Idx = DAG.getNode(ISD::MUL, SDLoc(Idx), SrcVT, Idx,
9602                      DAG.getConstant(IndexScale, SDLoc(Idx), SrcVT));
9603    Idx = DAG.getNode(ISD::ADD, SDLoc(Idx), SrcVT, Idx,
9604                      DAG.getConstant(IndexOffset, SDLoc(Idx), SrcVT));
9605    return Idx;
9606  };
9607
9608  unsigned Opcode = 0;
9609  switch (VT.SimpleTy) {
9610  default:
9611    break;
9612  case MVT::v16i8:
9613    if (Subtarget.hasSSSE3())
9614      Opcode = X86ISD::PSHUFB;
9615    break;
9616  case MVT::v8i16:
9617    if (Subtarget.hasVLX() && Subtarget.hasBWI())
9618      Opcode = X86ISD::VPERMV;
9619    else if (Subtarget.hasSSSE3()) {
9620      Opcode = X86ISD::PSHUFB;
9621      ShuffleVT = MVT::v16i8;
9622    }
9623    break;
9624  case MVT::v4f32:
9625  case MVT::v4i32:
9626    if (Subtarget.hasAVX()) {
9627      Opcode = X86ISD::VPERMILPV;
9628      ShuffleVT = MVT::v4f32;
9629    } else if (Subtarget.hasSSSE3()) {
9630      Opcode = X86ISD::PSHUFB;
9631      ShuffleVT = MVT::v16i8;
9632    }
9633    break;
9634  case MVT::v2f64:
9635  case MVT::v2i64:
9636    if (Subtarget.hasAVX()) {
9637      // VPERMILPD selects using bit#1 of the index vector, so scale IndicesVec.
9638      IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
9639      Opcode = X86ISD::VPERMILPV;
9640      ShuffleVT = MVT::v2f64;
9641    } else if (Subtarget.hasSSE41()) {
9642      // SSE41 can compare v2i64 - select between indices 0 and 1.
9643      return DAG.getSelectCC(
9644          DL, IndicesVec,
9645          getZeroVector(IndicesVT.getSimpleVT(), Subtarget, DAG, DL),
9646          DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {0, 0}),
9647          DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {1, 1}),
9648          ISD::CondCode::SETEQ);
9649    }
9650    break;
9651  case MVT::v32i8:
9652    if (Subtarget.hasVLX() && Subtarget.hasVBMI())
9653      Opcode = X86ISD::VPERMV;
9654    else if (Subtarget.hasXOP()) {
9655      SDValue LoSrc = extract128BitVector(SrcVec, 0, DAG, DL);
9656      SDValue HiSrc = extract128BitVector(SrcVec, 16, DAG, DL);
9657      SDValue LoIdx = extract128BitVector(IndicesVec, 0, DAG, DL);
9658      SDValue HiIdx = extract128BitVector(IndicesVec, 16, DAG, DL);
9659      return DAG.getNode(
9660          ISD::CONCAT_VECTORS, DL, VT,
9661          DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, LoIdx),
9662          DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, HiIdx));
9663    } else if (Subtarget.hasAVX()) {
9664      SDValue Lo = extract128BitVector(SrcVec, 0, DAG, DL);
9665      SDValue Hi = extract128BitVector(SrcVec, 16, DAG, DL);
9666      SDValue LoLo = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Lo);
9667      SDValue HiHi = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Hi, Hi);
9668      auto PSHUFBBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
9669                              ArrayRef<SDValue> Ops) {
9670        // Permute Lo and Hi and then select based on index range.
9671        // This works as SHUFB uses bits[3:0] to permute elements and we don't
9672        // care about the bit[7] as its just an index vector.
9673        SDValue Idx = Ops[2];
9674        EVT VT = Idx.getValueType();
9675        return DAG.getSelectCC(DL, Idx, DAG.getConstant(15, DL, VT),
9676                               DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[1], Idx),
9677                               DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[0], Idx),
9678                               ISD::CondCode::SETGT);
9679      };
9680      SDValue Ops[] = {LoLo, HiHi, IndicesVec};
9681      return SplitOpsAndApply(DAG, Subtarget, DL, MVT::v32i8, Ops,
9682                              PSHUFBBuilder);
9683    }
9684    break;
9685  case MVT::v16i16:
9686    if (Subtarget.hasVLX() && Subtarget.hasBWI())
9687      Opcode = X86ISD::VPERMV;
9688    else if (Subtarget.hasAVX()) {
9689      // Scale to v32i8 and perform as v32i8.
9690      IndicesVec = ScaleIndices(IndicesVec, 2);
9691      return DAG.getBitcast(
9692          VT, createVariablePermute(
9693                  MVT::v32i8, DAG.getBitcast(MVT::v32i8, SrcVec),
9694                  DAG.getBitcast(MVT::v32i8, IndicesVec), DL, DAG, Subtarget));
9695    }
9696    break;
9697  case MVT::v8f32:
9698  case MVT::v8i32:
9699    if (Subtarget.hasAVX2())
9700      Opcode = X86ISD::VPERMV;
9701    else if (Subtarget.hasAVX()) {
9702      SrcVec = DAG.getBitcast(MVT::v8f32, SrcVec);
9703      SDValue LoLo = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
9704                                          {0, 1, 2, 3, 0, 1, 2, 3});
9705      SDValue HiHi = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
9706                                          {4, 5, 6, 7, 4, 5, 6, 7});
9707      if (Subtarget.hasXOP())
9708        return DAG.getBitcast(
9709            VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v8f32, LoLo, HiHi,
9710                            IndicesVec, DAG.getTargetConstant(0, DL, MVT::i8)));
9711      // Permute Lo and Hi and then select based on index range.
9712      // This works as VPERMILPS only uses index bits[0:1] to permute elements.
9713      SDValue Res = DAG.getSelectCC(
9714          DL, IndicesVec, DAG.getConstant(3, DL, MVT::v8i32),
9715          DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, HiHi, IndicesVec),
9716          DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, LoLo, IndicesVec),
9717          ISD::CondCode::SETGT);
9718      return DAG.getBitcast(VT, Res);
9719    }
9720    break;
9721  case MVT::v4i64:
9722  case MVT::v4f64:
9723    if (Subtarget.hasAVX512()) {
9724      if (!Subtarget.hasVLX()) {
9725        MVT WidenSrcVT = MVT::getVectorVT(VT.getScalarType(), 8);
9726        SrcVec = widenSubVector(WidenSrcVT, SrcVec, false, Subtarget, DAG,
9727                                SDLoc(SrcVec));
9728        IndicesVec = widenSubVector(MVT::v8i64, IndicesVec, false, Subtarget,
9729                                    DAG, SDLoc(IndicesVec));
9730        SDValue Res = createVariablePermute(WidenSrcVT, SrcVec, IndicesVec, DL,
9731                                            DAG, Subtarget);
9732        return extract256BitVector(Res, 0, DAG, DL);
9733      }
9734      Opcode = X86ISD::VPERMV;
9735    } else if (Subtarget.hasAVX()) {
9736      SrcVec = DAG.getBitcast(MVT::v4f64, SrcVec);
9737      SDValue LoLo =
9738          DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {0, 1, 0, 1});
9739      SDValue HiHi =
9740          DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {2, 3, 2, 3});
9741      // VPERMIL2PD selects with bit#1 of the index vector, so scale IndicesVec.
9742      IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
9743      if (Subtarget.hasXOP())
9744        return DAG.getBitcast(
9745            VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v4f64, LoLo, HiHi,
9746                            IndicesVec, DAG.getTargetConstant(0, DL, MVT::i8)));
9747      // Permute Lo and Hi and then select based on index range.
9748      // This works as VPERMILPD only uses index bit[1] to permute elements.
9749      SDValue Res = DAG.getSelectCC(
9750          DL, IndicesVec, DAG.getConstant(2, DL, MVT::v4i64),
9751          DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, HiHi, IndicesVec),
9752          DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, LoLo, IndicesVec),
9753          ISD::CondCode::SETGT);
9754      return DAG.getBitcast(VT, Res);
9755    }
9756    break;
9757  case MVT::v64i8:
9758    if (Subtarget.hasVBMI())
9759      Opcode = X86ISD::VPERMV;
9760    break;
9761  case MVT::v32i16:
9762    if (Subtarget.hasBWI())
9763      Opcode = X86ISD::VPERMV;
9764    break;
9765  case MVT::v16f32:
9766  case MVT::v16i32:
9767  case MVT::v8f64:
9768  case MVT::v8i64:
9769    if (Subtarget.hasAVX512())
9770      Opcode = X86ISD::VPERMV;
9771    break;
9772  }
9773  if (!Opcode)
9774    return SDValue();
9775
9776  assert((VT.getSizeInBits() == ShuffleVT.getSizeInBits()) &&
9777         (VT.getScalarSizeInBits() % ShuffleVT.getScalarSizeInBits()) == 0 &&
9778         "Illegal variable permute shuffle type");
9779
9780  uint64_t Scale = VT.getScalarSizeInBits() / ShuffleVT.getScalarSizeInBits();
9781  if (Scale > 1)
9782    IndicesVec = ScaleIndices(IndicesVec, Scale);
9783
9784  EVT ShuffleIdxVT = EVT(ShuffleVT).changeVectorElementTypeToInteger();
9785  IndicesVec = DAG.getBitcast(ShuffleIdxVT, IndicesVec);
9786
9787  SrcVec = DAG.getBitcast(ShuffleVT, SrcVec);
9788  SDValue Res = Opcode == X86ISD::VPERMV
9789                    ? DAG.getNode(Opcode, DL, ShuffleVT, IndicesVec, SrcVec)
9790                    : DAG.getNode(Opcode, DL, ShuffleVT, SrcVec, IndicesVec);
9791  return DAG.getBitcast(VT, Res);
9792}
9793
9794// Tries to lower a BUILD_VECTOR composed of extract-extract chains that can be
9795// reasoned to be a permutation of a vector by indices in a non-constant vector.
9796// (build_vector (extract_elt V, (extract_elt I, 0)),
9797//               (extract_elt V, (extract_elt I, 1)),
9798//                    ...
9799// ->
9800// (vpermv I, V)
9801//
9802// TODO: Handle undefs
9803// TODO: Utilize pshufb and zero mask blending to support more efficient
9804// construction of vectors with constant-0 elements.
9805static SDValue
9806LowerBUILD_VECTORAsVariablePermute(SDValue V, SelectionDAG &DAG,
9807                                   const X86Subtarget &Subtarget) {
9808  SDValue SrcVec, IndicesVec;
9809  // Check for a match of the permute source vector and permute index elements.
9810  // This is done by checking that the i-th build_vector operand is of the form:
9811  // (extract_elt SrcVec, (extract_elt IndicesVec, i)).
9812  for (unsigned Idx = 0, E = V.getNumOperands(); Idx != E; ++Idx) {
9813    SDValue Op = V.getOperand(Idx);
9814    if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9815      return SDValue();
9816
9817    // If this is the first extract encountered in V, set the source vector,
9818    // otherwise verify the extract is from the previously defined source
9819    // vector.
9820    if (!SrcVec)
9821      SrcVec = Op.getOperand(0);
9822    else if (SrcVec != Op.getOperand(0))
9823      return SDValue();
9824    SDValue ExtractedIndex = Op->getOperand(1);
9825    // Peek through extends.
9826    if (ExtractedIndex.getOpcode() == ISD::ZERO_EXTEND ||
9827        ExtractedIndex.getOpcode() == ISD::SIGN_EXTEND)
9828      ExtractedIndex = ExtractedIndex.getOperand(0);
9829    if (ExtractedIndex.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9830      return SDValue();
9831
9832    // If this is the first extract from the index vector candidate, set the
9833    // indices vector, otherwise verify the extract is from the previously
9834    // defined indices vector.
9835    if (!IndicesVec)
9836      IndicesVec = ExtractedIndex.getOperand(0);
9837    else if (IndicesVec != ExtractedIndex.getOperand(0))
9838      return SDValue();
9839
9840    auto *PermIdx = dyn_cast<ConstantSDNode>(ExtractedIndex.getOperand(1));
9841    if (!PermIdx || PermIdx->getAPIntValue() != Idx)
9842      return SDValue();
9843  }
9844
9845  SDLoc DL(V);
9846  MVT VT = V.getSimpleValueType();
9847  return createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget);
9848}
9849
9850SDValue
9851X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
9852  SDLoc dl(Op);
9853
9854  MVT VT = Op.getSimpleValueType();
9855  MVT EltVT = VT.getVectorElementType();
9856  unsigned NumElems = Op.getNumOperands();
9857
9858  // Generate vectors for predicate vectors.
9859  if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512())
9860    return LowerBUILD_VECTORvXi1(Op, DAG, Subtarget);
9861
9862  if (SDValue VectorConstant = materializeVectorConstant(Op, DAG, Subtarget))
9863    return VectorConstant;
9864
9865  BuildVectorSDNode *BV = cast<BuildVectorSDNode>(Op.getNode());
9866  if (SDValue AddSub = lowerToAddSubOrFMAddSub(BV, Subtarget, DAG))
9867    return AddSub;
9868  if (SDValue HorizontalOp = LowerToHorizontalOp(BV, Subtarget, DAG))
9869    return HorizontalOp;
9870  if (SDValue Broadcast = lowerBuildVectorAsBroadcast(BV, Subtarget, DAG))
9871    return Broadcast;
9872  if (SDValue BitOp = lowerBuildVectorToBitOp(BV, DAG))
9873    return BitOp;
9874
9875  unsigned EVTBits = EltVT.getSizeInBits();
9876
9877  unsigned NumZero  = 0;
9878  unsigned NumNonZero = 0;
9879  uint64_t NonZeros = 0;
9880  bool IsAllConstants = true;
9881  SmallSet<SDValue, 8> Values;
9882  unsigned NumConstants = NumElems;
9883  for (unsigned i = 0; i < NumElems; ++i) {
9884    SDValue Elt = Op.getOperand(i);
9885    if (Elt.isUndef())
9886      continue;
9887    Values.insert(Elt);
9888    if (!isa<ConstantSDNode>(Elt) && !isa<ConstantFPSDNode>(Elt)) {
9889      IsAllConstants = false;
9890      NumConstants--;
9891    }
9892    if (X86::isZeroNode(Elt))
9893      NumZero++;
9894    else {
9895      assert(i < sizeof(NonZeros) * 8); // Make sure the shift is within range.
9896      NonZeros |= ((uint64_t)1 << i);
9897      NumNonZero++;
9898    }
9899  }
9900
9901  // All undef vector. Return an UNDEF.  All zero vectors were handled above.
9902  if (NumNonZero == 0)
9903    return DAG.getUNDEF(VT);
9904
9905  // If we are inserting one variable into a vector of non-zero constants, try
9906  // to avoid loading each constant element as a scalar. Load the constants as a
9907  // vector and then insert the variable scalar element. If insertion is not
9908  // supported, fall back to a shuffle to get the scalar blended with the
9909  // constants. Insertion into a zero vector is handled as a special-case
9910  // somewhere below here.
9911  if (NumConstants == NumElems - 1 && NumNonZero != 1 &&
9912      (isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT) ||
9913       isOperationLegalOrCustom(ISD::VECTOR_SHUFFLE, VT))) {
9914    // Create an all-constant vector. The variable element in the old
9915    // build vector is replaced by undef in the constant vector. Save the
9916    // variable scalar element and its index for use in the insertelement.
9917    LLVMContext &Context = *DAG.getContext();
9918    Type *EltType = Op.getValueType().getScalarType().getTypeForEVT(Context);
9919    SmallVector<Constant *, 16> ConstVecOps(NumElems, UndefValue::get(EltType));
9920    SDValue VarElt;
9921    SDValue InsIndex;
9922    for (unsigned i = 0; i != NumElems; ++i) {
9923      SDValue Elt = Op.getOperand(i);
9924      if (auto *C = dyn_cast<ConstantSDNode>(Elt))
9925        ConstVecOps[i] = ConstantInt::get(Context, C->getAPIntValue());
9926      else if (auto *C = dyn_cast<ConstantFPSDNode>(Elt))
9927        ConstVecOps[i] = ConstantFP::get(Context, C->getValueAPF());
9928      else if (!Elt.isUndef()) {
9929        assert(!VarElt.getNode() && !InsIndex.getNode() &&
9930               "Expected one variable element in this vector");
9931        VarElt = Elt;
9932        InsIndex = DAG.getConstant(i, dl, getVectorIdxTy(DAG.getDataLayout()));
9933      }
9934    }
9935    Constant *CV = ConstantVector::get(ConstVecOps);
9936    SDValue DAGConstVec = DAG.getConstantPool(CV, VT);
9937
9938    // The constants we just created may not be legal (eg, floating point). We
9939    // must lower the vector right here because we can not guarantee that we'll
9940    // legalize it before loading it. This is also why we could not just create
9941    // a new build vector here. If the build vector contains illegal constants,
9942    // it could get split back up into a series of insert elements.
9943    // TODO: Improve this by using shorter loads with broadcast/VZEXT_LOAD.
9944    SDValue LegalDAGConstVec = LowerConstantPool(DAGConstVec, DAG);
9945    MachineFunction &MF = DAG.getMachineFunction();
9946    MachinePointerInfo MPI = MachinePointerInfo::getConstantPool(MF);
9947    SDValue Ld = DAG.getLoad(VT, dl, DAG.getEntryNode(), LegalDAGConstVec, MPI);
9948    unsigned InsertC = cast<ConstantSDNode>(InsIndex)->getZExtValue();
9949    unsigned NumEltsInLow128Bits = 128 / VT.getScalarSizeInBits();
9950    if (InsertC < NumEltsInLow128Bits)
9951      return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ld, VarElt, InsIndex);
9952
9953    // There's no good way to insert into the high elements of a >128-bit
9954    // vector, so use shuffles to avoid an extract/insert sequence.
9955    assert(VT.getSizeInBits() > 128 && "Invalid insertion index?");
9956    assert(Subtarget.hasAVX() && "Must have AVX with >16-byte vector");
9957    SmallVector<int, 8> ShuffleMask;
9958    unsigned NumElts = VT.getVectorNumElements();
9959    for (unsigned i = 0; i != NumElts; ++i)
9960      ShuffleMask.push_back(i == InsertC ? NumElts : i);
9961    SDValue S2V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, VarElt);
9962    return DAG.getVectorShuffle(VT, dl, Ld, S2V, ShuffleMask);
9963  }
9964
9965  // Special case for single non-zero, non-undef, element.
9966  if (NumNonZero == 1) {
9967    unsigned Idx = countTrailingZeros(NonZeros);
9968    SDValue Item = Op.getOperand(Idx);
9969
9970    // If we have a constant or non-constant insertion into the low element of
9971    // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
9972    // the rest of the elements.  This will be matched as movd/movq/movss/movsd
9973    // depending on what the source datatype is.
9974    if (Idx == 0) {
9975      if (NumZero == 0)
9976        return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
9977
9978      if (EltVT == MVT::i32 || EltVT == MVT::f32 || EltVT == MVT::f64 ||
9979          (EltVT == MVT::i64 && Subtarget.is64Bit())) {
9980        assert((VT.is128BitVector() || VT.is256BitVector() ||
9981                VT.is512BitVector()) &&
9982               "Expected an SSE value type!");
9983        Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
9984        // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
9985        return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
9986      }
9987
9988      // We can't directly insert an i8 or i16 into a vector, so zero extend
9989      // it to i32 first.
9990      if (EltVT == MVT::i16 || EltVT == MVT::i8) {
9991        Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
9992        MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits()/32);
9993        Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, Item);
9994        Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
9995        return DAG.getBitcast(VT, Item);
9996      }
9997    }
9998
9999    // Is it a vector logical left shift?
10000    if (NumElems == 2 && Idx == 1 &&
10001        X86::isZeroNode(Op.getOperand(0)) &&
10002        !X86::isZeroNode(Op.getOperand(1))) {
10003      unsigned NumBits = VT.getSizeInBits();
10004      return getVShift(true, VT,
10005                       DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
10006                                   VT, Op.getOperand(1)),
10007                       NumBits/2, DAG, *this, dl);
10008    }
10009
10010    if (IsAllConstants) // Otherwise, it's better to do a constpool load.
10011      return SDValue();
10012
10013    // Otherwise, if this is a vector with i32 or f32 elements, and the element
10014    // is a non-constant being inserted into an element other than the low one,
10015    // we can't use a constant pool load.  Instead, use SCALAR_TO_VECTOR (aka
10016    // movd/movss) to move this into the low element, then shuffle it into
10017    // place.
10018    if (EVTBits == 32) {
10019      Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
10020      return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
10021    }
10022  }
10023
10024  // Splat is obviously ok. Let legalizer expand it to a shuffle.
10025  if (Values.size() == 1) {
10026    if (EVTBits == 32) {
10027      // Instead of a shuffle like this:
10028      // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
10029      // Check if it's possible to issue this instead.
10030      // shuffle (vload ptr)), undef, <1, 1, 1, 1>
10031      unsigned Idx = countTrailingZeros(NonZeros);
10032      SDValue Item = Op.getOperand(Idx);
10033      if (Op.getNode()->isOnlyUserOf(Item.getNode()))
10034        return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
10035    }
10036    return SDValue();
10037  }
10038
10039  // A vector full of immediates; various special cases are already
10040  // handled, so this is best done with a single constant-pool load.
10041  if (IsAllConstants)
10042    return SDValue();
10043
10044  if (SDValue V = LowerBUILD_VECTORAsVariablePermute(Op, DAG, Subtarget))
10045      return V;
10046
10047  // See if we can use a vector load to get all of the elements.
10048  {
10049    SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElems);
10050    if (SDValue LD =
10051            EltsFromConsecutiveLoads(VT, Ops, dl, DAG, Subtarget, false))
10052      return LD;
10053  }
10054
10055  // If this is a splat of pairs of 32-bit elements, we can use a narrower
10056  // build_vector and broadcast it.
10057  // TODO: We could probably generalize this more.
10058  if (Subtarget.hasAVX2() && EVTBits == 32 && Values.size() == 2) {
10059    SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
10060                       DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
10061    auto CanSplat = [](SDValue Op, unsigned NumElems, ArrayRef<SDValue> Ops) {
10062      // Make sure all the even/odd operands match.
10063      for (unsigned i = 2; i != NumElems; ++i)
10064        if (Ops[i % 2] != Op.getOperand(i))
10065          return false;
10066      return true;
10067    };
10068    if (CanSplat(Op, NumElems, Ops)) {
10069      MVT WideEltVT = VT.isFloatingPoint() ? MVT::f64 : MVT::i64;
10070      MVT NarrowVT = MVT::getVectorVT(EltVT, 4);
10071      // Create a new build vector and cast to v2i64/v2f64.
10072      SDValue NewBV = DAG.getBitcast(MVT::getVectorVT(WideEltVT, 2),
10073                                     DAG.getBuildVector(NarrowVT, dl, Ops));
10074      // Broadcast from v2i64/v2f64 and cast to final VT.
10075      MVT BcastVT = MVT::getVectorVT(WideEltVT, NumElems/2);
10076      return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, dl, BcastVT,
10077                                            NewBV));
10078    }
10079  }
10080
10081  // For AVX-length vectors, build the individual 128-bit pieces and use
10082  // shuffles to put them in place.
10083  if (VT.getSizeInBits() > 128) {
10084    MVT HVT = MVT::getVectorVT(EltVT, NumElems/2);
10085
10086    // Build both the lower and upper subvector.
10087    SDValue Lower =
10088        DAG.getBuildVector(HVT, dl, Op->ops().slice(0, NumElems / 2));
10089    SDValue Upper = DAG.getBuildVector(
10090        HVT, dl, Op->ops().slice(NumElems / 2, NumElems /2));
10091
10092    // Recreate the wider vector with the lower and upper part.
10093    return concatSubVectors(Lower, Upper, DAG, dl);
10094  }
10095
10096  // Let legalizer expand 2-wide build_vectors.
10097  if (EVTBits == 64) {
10098    if (NumNonZero == 1) {
10099      // One half is zero or undef.
10100      unsigned Idx = countTrailingZeros(NonZeros);
10101      SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
10102                               Op.getOperand(Idx));
10103      return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
10104    }
10105    return SDValue();
10106  }
10107
10108  // If element VT is < 32 bits, convert it to inserts into a zero vector.
10109  if (EVTBits == 8 && NumElems == 16)
10110    if (SDValue V = LowerBuildVectorv16i8(Op, NonZeros, NumNonZero, NumZero,
10111                                          DAG, Subtarget))
10112      return V;
10113
10114  if (EVTBits == 16 && NumElems == 8)
10115    if (SDValue V = LowerBuildVectorv8i16(Op, NonZeros, NumNonZero, NumZero,
10116                                          DAG, Subtarget))
10117      return V;
10118
10119  // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
10120  if (EVTBits == 32 && NumElems == 4)
10121    if (SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget))
10122      return V;
10123
10124  // If element VT is == 32 bits, turn it into a number of shuffles.
10125  if (NumElems == 4 && NumZero > 0) {
10126    SmallVector<SDValue, 8> Ops(NumElems);
10127    for (unsigned i = 0; i < 4; ++i) {
10128      bool isZero = !(NonZeros & (1ULL << i));
10129      if (isZero)
10130        Ops[i] = getZeroVector(VT, Subtarget, DAG, dl);
10131      else
10132        Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
10133    }
10134
10135    for (unsigned i = 0; i < 2; ++i) {
10136      switch ((NonZeros >> (i*2)) & 0x3) {
10137        default: llvm_unreachable("Unexpected NonZero count");
10138        case 0:
10139          Ops[i] = Ops[i*2];  // Must be a zero vector.
10140          break;
10141        case 1:
10142          Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2+1], Ops[i*2]);
10143          break;
10144        case 2:
10145          Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
10146          break;
10147        case 3:
10148          Ops[i] = getUnpackl(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
10149          break;
10150      }
10151    }
10152
10153    bool Reverse1 = (NonZeros & 0x3) == 2;
10154    bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
10155    int MaskVec[] = {
10156      Reverse1 ? 1 : 0,
10157      Reverse1 ? 0 : 1,
10158      static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
10159      static_cast<int>(Reverse2 ? NumElems   : NumElems+1)
10160    };
10161    return DAG.getVectorShuffle(VT, dl, Ops[0], Ops[1], MaskVec);
10162  }
10163
10164  assert(Values.size() > 1 && "Expected non-undef and non-splat vector");
10165
10166  // Check for a build vector from mostly shuffle plus few inserting.
10167  if (SDValue Sh = buildFromShuffleMostly(Op, DAG))
10168    return Sh;
10169
10170  // For SSE 4.1, use insertps to put the high elements into the low element.
10171  if (Subtarget.hasSSE41()) {
10172    SDValue Result;
10173    if (!Op.getOperand(0).isUndef())
10174      Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
10175    else
10176      Result = DAG.getUNDEF(VT);
10177
10178    for (unsigned i = 1; i < NumElems; ++i) {
10179      if (Op.getOperand(i).isUndef()) continue;
10180      Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
10181                           Op.getOperand(i), DAG.getIntPtrConstant(i, dl));
10182    }
10183    return Result;
10184  }
10185
10186  // Otherwise, expand into a number of unpckl*, start by extending each of
10187  // our (non-undef) elements to the full vector width with the element in the
10188  // bottom slot of the vector (which generates no code for SSE).
10189  SmallVector<SDValue, 8> Ops(NumElems);
10190  for (unsigned i = 0; i < NumElems; ++i) {
10191    if (!Op.getOperand(i).isUndef())
10192      Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
10193    else
10194      Ops[i] = DAG.getUNDEF(VT);
10195  }
10196
10197  // Next, we iteratively mix elements, e.g. for v4f32:
10198  //   Step 1: unpcklps 0, 1 ==> X: <?, ?, 1, 0>
10199  //         : unpcklps 2, 3 ==> Y: <?, ?, 3, 2>
10200  //   Step 2: unpcklpd X, Y ==>    <3, 2, 1, 0>
10201  for (unsigned Scale = 1; Scale < NumElems; Scale *= 2) {
10202    // Generate scaled UNPCKL shuffle mask.
10203    SmallVector<int, 16> Mask;
10204    for(unsigned i = 0; i != Scale; ++i)
10205      Mask.push_back(i);
10206    for (unsigned i = 0; i != Scale; ++i)
10207      Mask.push_back(NumElems+i);
10208    Mask.append(NumElems - Mask.size(), SM_SentinelUndef);
10209
10210    for (unsigned i = 0, e = NumElems / (2 * Scale); i != e; ++i)
10211      Ops[i] = DAG.getVectorShuffle(VT, dl, Ops[2*i], Ops[(2*i)+1], Mask);
10212  }
10213  return Ops[0];
10214}
10215
10216// 256-bit AVX can use the vinsertf128 instruction
10217// to create 256-bit vectors from two other 128-bit ones.
10218// TODO: Detect subvector broadcast here instead of DAG combine?
10219static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG,
10220                                      const X86Subtarget &Subtarget) {
10221  SDLoc dl(Op);
10222  MVT ResVT = Op.getSimpleValueType();
10223
10224  assert((ResVT.is256BitVector() ||
10225          ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
10226
10227  unsigned NumOperands = Op.getNumOperands();
10228  unsigned NumZero = 0;
10229  unsigned NumNonZero = 0;
10230  unsigned NonZeros = 0;
10231  for (unsigned i = 0; i != NumOperands; ++i) {
10232    SDValue SubVec = Op.getOperand(i);
10233    if (SubVec.isUndef())
10234      continue;
10235    if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
10236      ++NumZero;
10237    else {
10238      assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
10239      NonZeros |= 1 << i;
10240      ++NumNonZero;
10241    }
10242  }
10243
10244  // If we have more than 2 non-zeros, build each half separately.
10245  if (NumNonZero > 2) {
10246    MVT HalfVT = ResVT.getHalfNumVectorElementsVT();
10247    ArrayRef<SDUse> Ops = Op->ops();
10248    SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
10249                             Ops.slice(0, NumOperands/2));
10250    SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
10251                             Ops.slice(NumOperands/2));
10252    return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
10253  }
10254
10255  // Otherwise, build it up through insert_subvectors.
10256  SDValue Vec = NumZero ? getZeroVector(ResVT, Subtarget, DAG, dl)
10257                        : DAG.getUNDEF(ResVT);
10258
10259  MVT SubVT = Op.getOperand(0).getSimpleValueType();
10260  unsigned NumSubElems = SubVT.getVectorNumElements();
10261  for (unsigned i = 0; i != NumOperands; ++i) {
10262    if ((NonZeros & (1 << i)) == 0)
10263      continue;
10264
10265    Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec,
10266                      Op.getOperand(i),
10267                      DAG.getIntPtrConstant(i * NumSubElems, dl));
10268  }
10269
10270  return Vec;
10271}
10272
10273// Returns true if the given node is a type promotion (by concatenating i1
10274// zeros) of the result of a node that already zeros all upper bits of
10275// k-register.
10276// TODO: Merge this with LowerAVXCONCAT_VECTORS?
10277static SDValue LowerCONCAT_VECTORSvXi1(SDValue Op,
10278                                       const X86Subtarget &Subtarget,
10279                                       SelectionDAG & DAG) {
10280  SDLoc dl(Op);
10281  MVT ResVT = Op.getSimpleValueType();
10282  unsigned NumOperands = Op.getNumOperands();
10283
10284  assert(NumOperands > 1 && isPowerOf2_32(NumOperands) &&
10285         "Unexpected number of operands in CONCAT_VECTORS");
10286
10287  uint64_t Zeros = 0;
10288  uint64_t NonZeros = 0;
10289  for (unsigned i = 0; i != NumOperands; ++i) {
10290    SDValue SubVec = Op.getOperand(i);
10291    if (SubVec.isUndef())
10292      continue;
10293    assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
10294    if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
10295      Zeros |= (uint64_t)1 << i;
10296    else
10297      NonZeros |= (uint64_t)1 << i;
10298  }
10299
10300  unsigned NumElems = ResVT.getVectorNumElements();
10301
10302  // If we are inserting non-zero vector and there are zeros in LSBs and undef
10303  // in the MSBs we need to emit a KSHIFTL. The generic lowering to
10304  // insert_subvector will give us two kshifts.
10305  if (isPowerOf2_64(NonZeros) && Zeros != 0 && NonZeros > Zeros &&
10306      Log2_64(NonZeros) != NumOperands - 1) {
10307    MVT ShiftVT = ResVT;
10308    if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8)
10309      ShiftVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
10310    unsigned Idx = Log2_64(NonZeros);
10311    SDValue SubVec = Op.getOperand(Idx);
10312    unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
10313    SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ShiftVT,
10314                         DAG.getUNDEF(ShiftVT), SubVec,
10315                         DAG.getIntPtrConstant(0, dl));
10316    Op = DAG.getNode(X86ISD::KSHIFTL, dl, ShiftVT, SubVec,
10317                     DAG.getTargetConstant(Idx * SubVecNumElts, dl, MVT::i8));
10318    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResVT, Op,
10319                       DAG.getIntPtrConstant(0, dl));
10320  }
10321
10322  // If there are zero or one non-zeros we can handle this very simply.
10323  if (NonZeros == 0 || isPowerOf2_64(NonZeros)) {
10324    SDValue Vec = Zeros ? DAG.getConstant(0, dl, ResVT) : DAG.getUNDEF(ResVT);
10325    if (!NonZeros)
10326      return Vec;
10327    unsigned Idx = Log2_64(NonZeros);
10328    SDValue SubVec = Op.getOperand(Idx);
10329    unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
10330    return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, SubVec,
10331                       DAG.getIntPtrConstant(Idx * SubVecNumElts, dl));
10332  }
10333
10334  if (NumOperands > 2) {
10335    MVT HalfVT = ResVT.getHalfNumVectorElementsVT();
10336    ArrayRef<SDUse> Ops = Op->ops();
10337    SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
10338                             Ops.slice(0, NumOperands/2));
10339    SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
10340                             Ops.slice(NumOperands/2));
10341    return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
10342  }
10343
10344  assert(countPopulation(NonZeros) == 2 && "Simple cases not handled?");
10345
10346  if (ResVT.getVectorNumElements() >= 16)
10347    return Op; // The operation is legal with KUNPCK
10348
10349  SDValue Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT,
10350                            DAG.getUNDEF(ResVT), Op.getOperand(0),
10351                            DAG.getIntPtrConstant(0, dl));
10352  return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, Op.getOperand(1),
10353                     DAG.getIntPtrConstant(NumElems/2, dl));
10354}
10355
10356static SDValue LowerCONCAT_VECTORS(SDValue Op,
10357                                   const X86Subtarget &Subtarget,
10358                                   SelectionDAG &DAG) {
10359  MVT VT = Op.getSimpleValueType();
10360  if (VT.getVectorElementType() == MVT::i1)
10361    return LowerCONCAT_VECTORSvXi1(Op, Subtarget, DAG);
10362
10363  assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
10364         (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
10365          Op.getNumOperands() == 4)));
10366
10367  // AVX can use the vinsertf128 instruction to create 256-bit vectors
10368  // from two other 128-bit ones.
10369
10370  // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
10371  return LowerAVXCONCAT_VECTORS(Op, DAG, Subtarget);
10372}
10373
10374//===----------------------------------------------------------------------===//
10375// Vector shuffle lowering
10376//
10377// This is an experimental code path for lowering vector shuffles on x86. It is
10378// designed to handle arbitrary vector shuffles and blends, gracefully
10379// degrading performance as necessary. It works hard to recognize idiomatic
10380// shuffles and lower them to optimal instruction patterns without leaving
10381// a framework that allows reasonably efficient handling of all vector shuffle
10382// patterns.
10383//===----------------------------------------------------------------------===//
10384
10385/// Tiny helper function to identify a no-op mask.
10386///
10387/// This is a somewhat boring predicate function. It checks whether the mask
10388/// array input, which is assumed to be a single-input shuffle mask of the kind
10389/// used by the X86 shuffle instructions (not a fully general
10390/// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
10391/// in-place shuffle are 'no-op's.
10392static bool isNoopShuffleMask(ArrayRef<int> Mask) {
10393  for (int i = 0, Size = Mask.size(); i < Size; ++i) {
10394    assert(Mask[i] >= -1 && "Out of bound mask element!");
10395    if (Mask[i] >= 0 && Mask[i] != i)
10396      return false;
10397  }
10398  return true;
10399}
10400
10401/// Test whether there are elements crossing LaneSizeInBits lanes in this
10402/// shuffle mask.
10403///
10404/// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
10405/// and we routinely test for these.
10406static bool isLaneCrossingShuffleMask(unsigned LaneSizeInBits,
10407                                      unsigned ScalarSizeInBits,
10408                                      ArrayRef<int> Mask) {
10409  assert(LaneSizeInBits && ScalarSizeInBits &&
10410         (LaneSizeInBits % ScalarSizeInBits) == 0 &&
10411         "Illegal shuffle lane size");
10412  int LaneSize = LaneSizeInBits / ScalarSizeInBits;
10413  int Size = Mask.size();
10414  for (int i = 0; i < Size; ++i)
10415    if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
10416      return true;
10417  return false;
10418}
10419
10420/// Test whether there are elements crossing 128-bit lanes in this
10421/// shuffle mask.
10422static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
10423  return isLaneCrossingShuffleMask(128, VT.getScalarSizeInBits(), Mask);
10424}
10425
10426/// Test whether a shuffle mask is equivalent within each sub-lane.
10427///
10428/// This checks a shuffle mask to see if it is performing the same
10429/// lane-relative shuffle in each sub-lane. This trivially implies
10430/// that it is also not lane-crossing. It may however involve a blend from the
10431/// same lane of a second vector.
10432///
10433/// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
10434/// non-trivial to compute in the face of undef lanes. The representation is
10435/// suitable for use with existing 128-bit shuffles as entries from the second
10436/// vector have been remapped to [LaneSize, 2*LaneSize).
10437static bool isRepeatedShuffleMask(unsigned LaneSizeInBits, MVT VT,
10438                                  ArrayRef<int> Mask,
10439                                  SmallVectorImpl<int> &RepeatedMask) {
10440  auto LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
10441  RepeatedMask.assign(LaneSize, -1);
10442  int Size = Mask.size();
10443  for (int i = 0; i < Size; ++i) {
10444    assert(Mask[i] == SM_SentinelUndef || Mask[i] >= 0);
10445    if (Mask[i] < 0)
10446      continue;
10447    if ((Mask[i] % Size) / LaneSize != i / LaneSize)
10448      // This entry crosses lanes, so there is no way to model this shuffle.
10449      return false;
10450
10451    // Ok, handle the in-lane shuffles by detecting if and when they repeat.
10452    // Adjust second vector indices to start at LaneSize instead of Size.
10453    int LocalM = Mask[i] < Size ? Mask[i] % LaneSize
10454                                : Mask[i] % LaneSize + LaneSize;
10455    if (RepeatedMask[i % LaneSize] < 0)
10456      // This is the first non-undef entry in this slot of a 128-bit lane.
10457      RepeatedMask[i % LaneSize] = LocalM;
10458    else if (RepeatedMask[i % LaneSize] != LocalM)
10459      // Found a mismatch with the repeated mask.
10460      return false;
10461  }
10462  return true;
10463}
10464
10465/// Test whether a shuffle mask is equivalent within each 128-bit lane.
10466static bool
10467is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
10468                                SmallVectorImpl<int> &RepeatedMask) {
10469  return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
10470}
10471
10472static bool
10473is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask) {
10474  SmallVector<int, 32> RepeatedMask;
10475  return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
10476}
10477
10478/// Test whether a shuffle mask is equivalent within each 256-bit lane.
10479static bool
10480is256BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
10481                                SmallVectorImpl<int> &RepeatedMask) {
10482  return isRepeatedShuffleMask(256, VT, Mask, RepeatedMask);
10483}
10484
10485/// Test whether a target shuffle mask is equivalent within each sub-lane.
10486/// Unlike isRepeatedShuffleMask we must respect SM_SentinelZero.
10487static bool isRepeatedTargetShuffleMask(unsigned LaneSizeInBits, MVT VT,
10488                                        ArrayRef<int> Mask,
10489                                        SmallVectorImpl<int> &RepeatedMask) {
10490  int LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
10491  RepeatedMask.assign(LaneSize, SM_SentinelUndef);
10492  int Size = Mask.size();
10493  for (int i = 0; i < Size; ++i) {
10494    assert(isUndefOrZero(Mask[i]) || (Mask[i] >= 0));
10495    if (Mask[i] == SM_SentinelUndef)
10496      continue;
10497    if (Mask[i] == SM_SentinelZero) {
10498      if (!isUndefOrZero(RepeatedMask[i % LaneSize]))
10499        return false;
10500      RepeatedMask[i % LaneSize] = SM_SentinelZero;
10501      continue;
10502    }
10503    if ((Mask[i] % Size) / LaneSize != i / LaneSize)
10504      // This entry crosses lanes, so there is no way to model this shuffle.
10505      return false;
10506
10507    // Ok, handle the in-lane shuffles by detecting if and when they repeat.
10508    // Adjust second vector indices to start at LaneSize instead of Size.
10509    int LocalM =
10510        Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + LaneSize;
10511    if (RepeatedMask[i % LaneSize] == SM_SentinelUndef)
10512      // This is the first non-undef entry in this slot of a 128-bit lane.
10513      RepeatedMask[i % LaneSize] = LocalM;
10514    else if (RepeatedMask[i % LaneSize] != LocalM)
10515      // Found a mismatch with the repeated mask.
10516      return false;
10517  }
10518  return true;
10519}
10520
10521/// Checks whether a shuffle mask is equivalent to an explicit list of
10522/// arguments.
10523///
10524/// This is a fast way to test a shuffle mask against a fixed pattern:
10525///
10526///   if (isShuffleEquivalent(Mask, 3, 2, {1, 0})) { ... }
10527///
10528/// It returns true if the mask is exactly as wide as the argument list, and
10529/// each element of the mask is either -1 (signifying undef) or the value given
10530/// in the argument.
10531static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask,
10532                                ArrayRef<int> ExpectedMask) {
10533  if (Mask.size() != ExpectedMask.size())
10534    return false;
10535
10536  int Size = Mask.size();
10537
10538  // If the values are build vectors, we can look through them to find
10539  // equivalent inputs that make the shuffles equivalent.
10540  auto *BV1 = dyn_cast<BuildVectorSDNode>(V1);
10541  auto *BV2 = dyn_cast<BuildVectorSDNode>(V2);
10542
10543  for (int i = 0; i < Size; ++i) {
10544    assert(Mask[i] >= -1 && "Out of bound mask element!");
10545    if (Mask[i] >= 0 && Mask[i] != ExpectedMask[i]) {
10546      auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
10547      auto *ExpectedBV = ExpectedMask[i] < Size ? BV1 : BV2;
10548      if (!MaskBV || !ExpectedBV ||
10549          MaskBV->getOperand(Mask[i] % Size) !=
10550              ExpectedBV->getOperand(ExpectedMask[i] % Size))
10551        return false;
10552    }
10553  }
10554
10555  return true;
10556}
10557
10558/// Checks whether a target shuffle mask is equivalent to an explicit pattern.
10559///
10560/// The masks must be exactly the same width.
10561///
10562/// If an element in Mask matches SM_SentinelUndef (-1) then the corresponding
10563/// value in ExpectedMask is always accepted. Otherwise the indices must match.
10564///
10565/// SM_SentinelZero is accepted as a valid negative index but must match in
10566/// both.
10567static bool isTargetShuffleEquivalent(ArrayRef<int> Mask,
10568                                      ArrayRef<int> ExpectedMask,
10569                                      SDValue V1 = SDValue(),
10570                                      SDValue V2 = SDValue()) {
10571  int Size = Mask.size();
10572  if (Size != (int)ExpectedMask.size())
10573    return false;
10574  assert(isUndefOrZeroOrInRange(ExpectedMask, 0, 2 * Size) &&
10575         "Illegal target shuffle mask");
10576
10577  // Check for out-of-range target shuffle mask indices.
10578  if (!isUndefOrZeroOrInRange(Mask, 0, 2 * Size))
10579    return false;
10580
10581  // If the values are build vectors, we can look through them to find
10582  // equivalent inputs that make the shuffles equivalent.
10583  auto *BV1 = dyn_cast_or_null<BuildVectorSDNode>(V1);
10584  auto *BV2 = dyn_cast_or_null<BuildVectorSDNode>(V2);
10585  BV1 = ((BV1 && Size != (int)BV1->getNumOperands()) ? nullptr : BV1);
10586  BV2 = ((BV2 && Size != (int)BV2->getNumOperands()) ? nullptr : BV2);
10587
10588  for (int i = 0; i < Size; ++i) {
10589    if (Mask[i] == SM_SentinelUndef || Mask[i] == ExpectedMask[i])
10590      continue;
10591    if (0 <= Mask[i] && 0 <= ExpectedMask[i]) {
10592      auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
10593      auto *ExpectedBV = ExpectedMask[i] < Size ? BV1 : BV2;
10594      if (MaskBV && ExpectedBV &&
10595          MaskBV->getOperand(Mask[i] % Size) ==
10596              ExpectedBV->getOperand(ExpectedMask[i] % Size))
10597        continue;
10598    }
10599    // TODO - handle SM_Sentinel equivalences.
10600    return false;
10601  }
10602  return true;
10603}
10604
10605// Attempt to create a shuffle mask from a VSELECT condition mask.
10606static bool createShuffleMaskFromVSELECT(SmallVectorImpl<int> &Mask,
10607                                         SDValue Cond) {
10608  if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
10609    return false;
10610
10611  unsigned Size = Cond.getValueType().getVectorNumElements();
10612  Mask.resize(Size, SM_SentinelUndef);
10613
10614  for (int i = 0; i != (int)Size; ++i) {
10615    SDValue CondElt = Cond.getOperand(i);
10616    Mask[i] = i;
10617    // Arbitrarily choose from the 2nd operand if the select condition element
10618    // is undef.
10619    // TODO: Can we do better by matching patterns such as even/odd?
10620    if (CondElt.isUndef() || isNullConstant(CondElt))
10621      Mask[i] += Size;
10622  }
10623
10624  return true;
10625}
10626
10627// Check if the shuffle mask is suitable for the AVX vpunpcklwd or vpunpckhwd
10628// instructions.
10629static bool isUnpackWdShuffleMask(ArrayRef<int> Mask, MVT VT) {
10630  if (VT != MVT::v8i32 && VT != MVT::v8f32)
10631    return false;
10632
10633  SmallVector<int, 8> Unpcklwd;
10634  createUnpackShuffleMask(MVT::v8i16, Unpcklwd, /* Lo = */ true,
10635                          /* Unary = */ false);
10636  SmallVector<int, 8> Unpckhwd;
10637  createUnpackShuffleMask(MVT::v8i16, Unpckhwd, /* Lo = */ false,
10638                          /* Unary = */ false);
10639  bool IsUnpackwdMask = (isTargetShuffleEquivalent(Mask, Unpcklwd) ||
10640                         isTargetShuffleEquivalent(Mask, Unpckhwd));
10641  return IsUnpackwdMask;
10642}
10643
10644static bool is128BitUnpackShuffleMask(ArrayRef<int> Mask) {
10645  // Create 128-bit vector type based on mask size.
10646  MVT EltVT = MVT::getIntegerVT(128 / Mask.size());
10647  MVT VT = MVT::getVectorVT(EltVT, Mask.size());
10648
10649  // We can't assume a canonical shuffle mask, so try the commuted version too.
10650  SmallVector<int, 4> CommutedMask(Mask.begin(), Mask.end());
10651  ShuffleVectorSDNode::commuteMask(CommutedMask);
10652
10653  // Match any of unary/binary or low/high.
10654  for (unsigned i = 0; i != 4; ++i) {
10655    SmallVector<int, 16> UnpackMask;
10656    createUnpackShuffleMask(VT, UnpackMask, (i >> 1) % 2, i % 2);
10657    if (isTargetShuffleEquivalent(Mask, UnpackMask) ||
10658        isTargetShuffleEquivalent(CommutedMask, UnpackMask))
10659      return true;
10660  }
10661  return false;
10662}
10663
10664/// Return true if a shuffle mask chooses elements identically in its top and
10665/// bottom halves. For example, any splat mask has the same top and bottom
10666/// halves. If an element is undefined in only one half of the mask, the halves
10667/// are not considered identical.
10668static bool hasIdenticalHalvesShuffleMask(ArrayRef<int> Mask) {
10669  assert(Mask.size() % 2 == 0 && "Expecting even number of elements in mask");
10670  unsigned HalfSize = Mask.size() / 2;
10671  for (unsigned i = 0; i != HalfSize; ++i) {
10672    if (Mask[i] != Mask[i + HalfSize])
10673      return false;
10674  }
10675  return true;
10676}
10677
10678/// Get a 4-lane 8-bit shuffle immediate for a mask.
10679///
10680/// This helper function produces an 8-bit shuffle immediate corresponding to
10681/// the ubiquitous shuffle encoding scheme used in x86 instructions for
10682/// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
10683/// example.
10684///
10685/// NB: We rely heavily on "undef" masks preserving the input lane.
10686static unsigned getV4X86ShuffleImm(ArrayRef<int> Mask) {
10687  assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
10688  assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
10689  assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
10690  assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
10691  assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
10692
10693  unsigned Imm = 0;
10694  Imm |= (Mask[0] < 0 ? 0 : Mask[0]) << 0;
10695  Imm |= (Mask[1] < 0 ? 1 : Mask[1]) << 2;
10696  Imm |= (Mask[2] < 0 ? 2 : Mask[2]) << 4;
10697  Imm |= (Mask[3] < 0 ? 3 : Mask[3]) << 6;
10698  return Imm;
10699}
10700
10701static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, const SDLoc &DL,
10702                                          SelectionDAG &DAG) {
10703  return DAG.getTargetConstant(getV4X86ShuffleImm(Mask), DL, MVT::i8);
10704}
10705
10706// The Shuffle result is as follow:
10707// 0*a[0]0*a[1]...0*a[n] , n >=0 where a[] elements in a ascending order.
10708// Each Zeroable's element correspond to a particular Mask's element.
10709// As described in computeZeroableShuffleElements function.
10710//
10711// The function looks for a sub-mask that the nonzero elements are in
10712// increasing order. If such sub-mask exist. The function returns true.
10713static bool isNonZeroElementsInOrder(const APInt &Zeroable,
10714                                     ArrayRef<int> Mask, const EVT &VectorType,
10715                                     bool &IsZeroSideLeft) {
10716  int NextElement = -1;
10717  // Check if the Mask's nonzero elements are in increasing order.
10718  for (int i = 0, e = Mask.size(); i < e; i++) {
10719    // Checks if the mask's zeros elements are built from only zeros.
10720    assert(Mask[i] >= -1 && "Out of bound mask element!");
10721    if (Mask[i] < 0)
10722      return false;
10723    if (Zeroable[i])
10724      continue;
10725    // Find the lowest non zero element
10726    if (NextElement < 0) {
10727      NextElement = Mask[i] != 0 ? VectorType.getVectorNumElements() : 0;
10728      IsZeroSideLeft = NextElement != 0;
10729    }
10730    // Exit if the mask's non zero elements are not in increasing order.
10731    if (NextElement != Mask[i])
10732      return false;
10733    NextElement++;
10734  }
10735  return true;
10736}
10737
10738/// Try to lower a shuffle with a single PSHUFB of V1 or V2.
10739static SDValue lowerShuffleWithPSHUFB(const SDLoc &DL, MVT VT,
10740                                      ArrayRef<int> Mask, SDValue V1,
10741                                      SDValue V2, const APInt &Zeroable,
10742                                      const X86Subtarget &Subtarget,
10743                                      SelectionDAG &DAG) {
10744  int Size = Mask.size();
10745  int LaneSize = 128 / VT.getScalarSizeInBits();
10746  const int NumBytes = VT.getSizeInBits() / 8;
10747  const int NumEltBytes = VT.getScalarSizeInBits() / 8;
10748
10749  assert((Subtarget.hasSSSE3() && VT.is128BitVector()) ||
10750         (Subtarget.hasAVX2() && VT.is256BitVector()) ||
10751         (Subtarget.hasBWI() && VT.is512BitVector()));
10752
10753  SmallVector<SDValue, 64> PSHUFBMask(NumBytes);
10754  // Sign bit set in i8 mask means zero element.
10755  SDValue ZeroMask = DAG.getConstant(0x80, DL, MVT::i8);
10756
10757  SDValue V;
10758  for (int i = 0; i < NumBytes; ++i) {
10759    int M = Mask[i / NumEltBytes];
10760    if (M < 0) {
10761      PSHUFBMask[i] = DAG.getUNDEF(MVT::i8);
10762      continue;
10763    }
10764    if (Zeroable[i / NumEltBytes]) {
10765      PSHUFBMask[i] = ZeroMask;
10766      continue;
10767    }
10768
10769    // We can only use a single input of V1 or V2.
10770    SDValue SrcV = (M >= Size ? V2 : V1);
10771    if (V && V != SrcV)
10772      return SDValue();
10773    V = SrcV;
10774    M %= Size;
10775
10776    // PSHUFB can't cross lanes, ensure this doesn't happen.
10777    if ((M / LaneSize) != ((i / NumEltBytes) / LaneSize))
10778      return SDValue();
10779
10780    M = M % LaneSize;
10781    M = M * NumEltBytes + (i % NumEltBytes);
10782    PSHUFBMask[i] = DAG.getConstant(M, DL, MVT::i8);
10783  }
10784  assert(V && "Failed to find a source input");
10785
10786  MVT I8VT = MVT::getVectorVT(MVT::i8, NumBytes);
10787  return DAG.getBitcast(
10788      VT, DAG.getNode(X86ISD::PSHUFB, DL, I8VT, DAG.getBitcast(I8VT, V),
10789                      DAG.getBuildVector(I8VT, DL, PSHUFBMask)));
10790}
10791
10792static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
10793                           const X86Subtarget &Subtarget, SelectionDAG &DAG,
10794                           const SDLoc &dl);
10795
10796// X86 has dedicated shuffle that can be lowered to VEXPAND
10797static SDValue lowerShuffleToEXPAND(const SDLoc &DL, MVT VT,
10798                                    const APInt &Zeroable,
10799                                    ArrayRef<int> Mask, SDValue &V1,
10800                                    SDValue &V2, SelectionDAG &DAG,
10801                                    const X86Subtarget &Subtarget) {
10802  bool IsLeftZeroSide = true;
10803  if (!isNonZeroElementsInOrder(Zeroable, Mask, V1.getValueType(),
10804                                IsLeftZeroSide))
10805    return SDValue();
10806  unsigned VEXPANDMask = (~Zeroable).getZExtValue();
10807  MVT IntegerType =
10808      MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
10809  SDValue MaskNode = DAG.getConstant(VEXPANDMask, DL, IntegerType);
10810  unsigned NumElts = VT.getVectorNumElements();
10811  assert((NumElts == 4 || NumElts == 8 || NumElts == 16) &&
10812         "Unexpected number of vector elements");
10813  SDValue VMask = getMaskNode(MaskNode, MVT::getVectorVT(MVT::i1, NumElts),
10814                              Subtarget, DAG, DL);
10815  SDValue ZeroVector = getZeroVector(VT, Subtarget, DAG, DL);
10816  SDValue ExpandedVector = IsLeftZeroSide ? V2 : V1;
10817  return DAG.getNode(X86ISD::EXPAND, DL, VT, ExpandedVector, ZeroVector, VMask);
10818}
10819
10820static bool matchShuffleWithUNPCK(MVT VT, SDValue &V1, SDValue &V2,
10821                                  unsigned &UnpackOpcode, bool IsUnary,
10822                                  ArrayRef<int> TargetMask, const SDLoc &DL,
10823                                  SelectionDAG &DAG,
10824                                  const X86Subtarget &Subtarget) {
10825  int NumElts = VT.getVectorNumElements();
10826
10827  bool Undef1 = true, Undef2 = true, Zero1 = true, Zero2 = true;
10828  for (int i = 0; i != NumElts; i += 2) {
10829    int M1 = TargetMask[i + 0];
10830    int M2 = TargetMask[i + 1];
10831    Undef1 &= (SM_SentinelUndef == M1);
10832    Undef2 &= (SM_SentinelUndef == M2);
10833    Zero1 &= isUndefOrZero(M1);
10834    Zero2 &= isUndefOrZero(M2);
10835  }
10836  assert(!((Undef1 || Zero1) && (Undef2 || Zero2)) &&
10837         "Zeroable shuffle detected");
10838
10839  // Attempt to match the target mask against the unpack lo/hi mask patterns.
10840  SmallVector<int, 64> Unpckl, Unpckh;
10841  createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, IsUnary);
10842  if (isTargetShuffleEquivalent(TargetMask, Unpckl)) {
10843    UnpackOpcode = X86ISD::UNPCKL;
10844    V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
10845    V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
10846    return true;
10847  }
10848
10849  createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, IsUnary);
10850  if (isTargetShuffleEquivalent(TargetMask, Unpckh)) {
10851    UnpackOpcode = X86ISD::UNPCKH;
10852    V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
10853    V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
10854    return true;
10855  }
10856
10857  // If an unary shuffle, attempt to match as an unpack lo/hi with zero.
10858  if (IsUnary && (Zero1 || Zero2)) {
10859    // Don't bother if we can blend instead.
10860    if ((Subtarget.hasSSE41() || VT == MVT::v2i64 || VT == MVT::v2f64) &&
10861        isSequentialOrUndefOrZeroInRange(TargetMask, 0, NumElts, 0))
10862      return false;
10863
10864    bool MatchLo = true, MatchHi = true;
10865    for (int i = 0; (i != NumElts) && (MatchLo || MatchHi); ++i) {
10866      int M = TargetMask[i];
10867
10868      // Ignore if the input is known to be zero or the index is undef.
10869      if ((((i & 1) == 0) && Zero1) || (((i & 1) == 1) && Zero2) ||
10870          (M == SM_SentinelUndef))
10871        continue;
10872
10873      MatchLo &= (M == Unpckl[i]);
10874      MatchHi &= (M == Unpckh[i]);
10875    }
10876
10877    if (MatchLo || MatchHi) {
10878      UnpackOpcode = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
10879      V2 = Zero2 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
10880      V1 = Zero1 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
10881      return true;
10882    }
10883  }
10884
10885  // If a binary shuffle, commute and try again.
10886  if (!IsUnary) {
10887    ShuffleVectorSDNode::commuteMask(Unpckl);
10888    if (isTargetShuffleEquivalent(TargetMask, Unpckl)) {
10889      UnpackOpcode = X86ISD::UNPCKL;
10890      std::swap(V1, V2);
10891      return true;
10892    }
10893
10894    ShuffleVectorSDNode::commuteMask(Unpckh);
10895    if (isTargetShuffleEquivalent(TargetMask, Unpckh)) {
10896      UnpackOpcode = X86ISD::UNPCKH;
10897      std::swap(V1, V2);
10898      return true;
10899    }
10900  }
10901
10902  return false;
10903}
10904
10905// X86 has dedicated unpack instructions that can handle specific blend
10906// operations: UNPCKH and UNPCKL.
10907static SDValue lowerShuffleWithUNPCK(const SDLoc &DL, MVT VT,
10908                                     ArrayRef<int> Mask, SDValue V1, SDValue V2,
10909                                     SelectionDAG &DAG) {
10910  SmallVector<int, 8> Unpckl;
10911  createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, /* Unary = */ false);
10912  if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
10913    return DAG.getNode(X86ISD::UNPCKL, DL, VT, V1, V2);
10914
10915  SmallVector<int, 8> Unpckh;
10916  createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, /* Unary = */ false);
10917  if (isShuffleEquivalent(V1, V2, Mask, Unpckh))
10918    return DAG.getNode(X86ISD::UNPCKH, DL, VT, V1, V2);
10919
10920  // Commute and try again.
10921  ShuffleVectorSDNode::commuteMask(Unpckl);
10922  if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
10923    return DAG.getNode(X86ISD::UNPCKL, DL, VT, V2, V1);
10924
10925  ShuffleVectorSDNode::commuteMask(Unpckh);
10926  if (isShuffleEquivalent(V1, V2, Mask, Unpckh))
10927    return DAG.getNode(X86ISD::UNPCKH, DL, VT, V2, V1);
10928
10929  return SDValue();
10930}
10931
10932static bool matchShuffleAsVPMOV(ArrayRef<int> Mask, bool SwappedOps,
10933                                int Delta) {
10934  int Size = (int)Mask.size();
10935  int Split = Size / Delta;
10936  int TruncatedVectorStart = SwappedOps ? Size : 0;
10937
10938  // Match for mask starting with e.g.: <8, 10, 12, 14,... or <0, 2, 4, 6,...
10939  if (!isSequentialOrUndefInRange(Mask, 0, Split, TruncatedVectorStart, Delta))
10940    return false;
10941
10942  // The rest of the mask should not refer to the truncated vector's elements.
10943  if (isAnyInRange(Mask.slice(Split, Size - Split), TruncatedVectorStart,
10944                   TruncatedVectorStart + Size))
10945    return false;
10946
10947  return true;
10948}
10949
10950// Try to lower trunc+vector_shuffle to a vpmovdb or a vpmovdw instruction.
10951//
10952// An example is the following:
10953//
10954// t0: ch = EntryToken
10955//           t2: v4i64,ch = CopyFromReg t0, Register:v4i64 %0
10956//         t25: v4i32 = truncate t2
10957//       t41: v8i16 = bitcast t25
10958//       t21: v8i16 = BUILD_VECTOR undef:i16, undef:i16, undef:i16, undef:i16,
10959//       Constant:i16<0>, Constant:i16<0>, Constant:i16<0>, Constant:i16<0>
10960//     t51: v8i16 = vector_shuffle<0,2,4,6,12,13,14,15> t41, t21
10961//   t18: v2i64 = bitcast t51
10962//
10963// Without avx512vl, this is lowered to:
10964//
10965// vpmovqd %zmm0, %ymm0
10966// vpshufb {{.*#+}} xmm0 =
10967// xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
10968//
10969// But when avx512vl is available, one can just use a single vpmovdw
10970// instruction.
10971static SDValue lowerShuffleWithVPMOV(const SDLoc &DL, ArrayRef<int> Mask,
10972                                     MVT VT, SDValue V1, SDValue V2,
10973                                     SelectionDAG &DAG,
10974                                     const X86Subtarget &Subtarget) {
10975  if (VT != MVT::v16i8 && VT != MVT::v8i16)
10976    return SDValue();
10977
10978  if (Mask.size() != VT.getVectorNumElements())
10979    return SDValue();
10980
10981  bool SwappedOps = false;
10982
10983  if (!ISD::isBuildVectorAllZeros(V2.getNode())) {
10984    if (!ISD::isBuildVectorAllZeros(V1.getNode()))
10985      return SDValue();
10986
10987    std::swap(V1, V2);
10988    SwappedOps = true;
10989  }
10990
10991  // Look for:
10992  //
10993  // bitcast (truncate <8 x i32> %vec to <8 x i16>) to <16 x i8>
10994  // bitcast (truncate <4 x i64> %vec to <4 x i32>) to <8 x i16>
10995  //
10996  // and similar ones.
10997  if (V1.getOpcode() != ISD::BITCAST)
10998    return SDValue();
10999  if (V1.getOperand(0).getOpcode() != ISD::TRUNCATE)
11000    return SDValue();
11001
11002  SDValue Src = V1.getOperand(0).getOperand(0);
11003  MVT SrcVT = Src.getSimpleValueType();
11004
11005  // The vptrunc** instructions truncating 128 bit and 256 bit vectors
11006  // are only available with avx512vl.
11007  if (!SrcVT.is512BitVector() && !Subtarget.hasVLX())
11008    return SDValue();
11009
11010  // Down Convert Word to Byte is only available with avx512bw. The case with
11011  // 256-bit output doesn't contain a shuffle and is therefore not handled here.
11012  if (SrcVT.getVectorElementType() == MVT::i16 && VT == MVT::v16i8 &&
11013      !Subtarget.hasBWI())
11014    return SDValue();
11015
11016  // The first half/quarter of the mask should refer to every second/fourth
11017  // element of the vector truncated and bitcasted.
11018  if (!matchShuffleAsVPMOV(Mask, SwappedOps, 2) &&
11019      !matchShuffleAsVPMOV(Mask, SwappedOps, 4))
11020    return SDValue();
11021
11022  return DAG.getNode(X86ISD::VTRUNC, DL, VT, Src);
11023}
11024
11025// X86 has dedicated pack instructions that can handle specific truncation
11026// operations: PACKSS and PACKUS.
11027static bool matchShuffleWithPACK(MVT VT, MVT &SrcVT, SDValue &V1, SDValue &V2,
11028                                 unsigned &PackOpcode, ArrayRef<int> TargetMask,
11029                                 SelectionDAG &DAG,
11030                                 const X86Subtarget &Subtarget) {
11031  unsigned NumElts = VT.getVectorNumElements();
11032  unsigned BitSize = VT.getScalarSizeInBits();
11033  MVT PackSVT = MVT::getIntegerVT(BitSize * 2);
11034  MVT PackVT = MVT::getVectorVT(PackSVT, NumElts / 2);
11035
11036  auto MatchPACK = [&](SDValue N1, SDValue N2) {
11037    SDValue VV1 = DAG.getBitcast(PackVT, N1);
11038    SDValue VV2 = DAG.getBitcast(PackVT, N2);
11039    if (Subtarget.hasSSE41() || PackSVT == MVT::i16) {
11040      APInt ZeroMask = APInt::getHighBitsSet(BitSize * 2, BitSize);
11041      if ((N1.isUndef() || DAG.MaskedValueIsZero(VV1, ZeroMask)) &&
11042          (N2.isUndef() || DAG.MaskedValueIsZero(VV2, ZeroMask))) {
11043        V1 = VV1;
11044        V2 = VV2;
11045        SrcVT = PackVT;
11046        PackOpcode = X86ISD::PACKUS;
11047        return true;
11048      }
11049    }
11050    if ((N1.isUndef() || DAG.ComputeNumSignBits(VV1) > BitSize) &&
11051        (N2.isUndef() || DAG.ComputeNumSignBits(VV2) > BitSize)) {
11052      V1 = VV1;
11053      V2 = VV2;
11054      SrcVT = PackVT;
11055      PackOpcode = X86ISD::PACKSS;
11056      return true;
11057    }
11058    return false;
11059  };
11060
11061  // Try binary shuffle.
11062  SmallVector<int, 32> BinaryMask;
11063  createPackShuffleMask(VT, BinaryMask, false);
11064  if (isTargetShuffleEquivalent(TargetMask, BinaryMask, V1, V2))
11065    if (MatchPACK(V1, V2))
11066      return true;
11067
11068  // Try unary shuffle.
11069  SmallVector<int, 32> UnaryMask;
11070  createPackShuffleMask(VT, UnaryMask, true);
11071  if (isTargetShuffleEquivalent(TargetMask, UnaryMask, V1))
11072    if (MatchPACK(V1, V1))
11073      return true;
11074
11075  return false;
11076}
11077
11078static SDValue lowerShuffleWithPACK(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
11079                                    SDValue V1, SDValue V2, SelectionDAG &DAG,
11080                                    const X86Subtarget &Subtarget) {
11081  MVT PackVT;
11082  unsigned PackOpcode;
11083  if (matchShuffleWithPACK(VT, PackVT, V1, V2, PackOpcode, Mask, DAG,
11084                           Subtarget))
11085    return DAG.getNode(PackOpcode, DL, VT, DAG.getBitcast(PackVT, V1),
11086                       DAG.getBitcast(PackVT, V2));
11087
11088  return SDValue();
11089}
11090
11091/// Try to emit a bitmask instruction for a shuffle.
11092///
11093/// This handles cases where we can model a blend exactly as a bitmask due to
11094/// one of the inputs being zeroable.
11095static SDValue lowerShuffleAsBitMask(const SDLoc &DL, MVT VT, SDValue V1,
11096                                     SDValue V2, ArrayRef<int> Mask,
11097                                     const APInt &Zeroable,
11098                                     const X86Subtarget &Subtarget,
11099                                     SelectionDAG &DAG) {
11100  MVT MaskVT = VT;
11101  MVT EltVT = VT.getVectorElementType();
11102  SDValue Zero, AllOnes;
11103  // Use f64 if i64 isn't legal.
11104  if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
11105    EltVT = MVT::f64;
11106    MaskVT = MVT::getVectorVT(EltVT, Mask.size());
11107  }
11108
11109  MVT LogicVT = VT;
11110  if (EltVT == MVT::f32 || EltVT == MVT::f64) {
11111    Zero = DAG.getConstantFP(0.0, DL, EltVT);
11112    AllOnes = DAG.getConstantFP(
11113        APFloat::getAllOnesValue(EltVT.getSizeInBits(), true), DL, EltVT);
11114    LogicVT =
11115        MVT::getVectorVT(EltVT == MVT::f64 ? MVT::i64 : MVT::i32, Mask.size());
11116  } else {
11117    Zero = DAG.getConstant(0, DL, EltVT);
11118    AllOnes = DAG.getAllOnesConstant(DL, EltVT);
11119  }
11120
11121  SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
11122  SDValue V;
11123  for (int i = 0, Size = Mask.size(); i < Size; ++i) {
11124    if (Zeroable[i])
11125      continue;
11126    if (Mask[i] % Size != i)
11127      return SDValue(); // Not a blend.
11128    if (!V)
11129      V = Mask[i] < Size ? V1 : V2;
11130    else if (V != (Mask[i] < Size ? V1 : V2))
11131      return SDValue(); // Can only let one input through the mask.
11132
11133    VMaskOps[i] = AllOnes;
11134  }
11135  if (!V)
11136    return SDValue(); // No non-zeroable elements!
11137
11138  SDValue VMask = DAG.getBuildVector(MaskVT, DL, VMaskOps);
11139  VMask = DAG.getBitcast(LogicVT, VMask);
11140  V = DAG.getBitcast(LogicVT, V);
11141  SDValue And = DAG.getNode(ISD::AND, DL, LogicVT, V, VMask);
11142  return DAG.getBitcast(VT, And);
11143}
11144
11145/// Try to emit a blend instruction for a shuffle using bit math.
11146///
11147/// This is used as a fallback approach when first class blend instructions are
11148/// unavailable. Currently it is only suitable for integer vectors, but could
11149/// be generalized for floating point vectors if desirable.
11150static SDValue lowerShuffleAsBitBlend(const SDLoc &DL, MVT VT, SDValue V1,
11151                                      SDValue V2, ArrayRef<int> Mask,
11152                                      SelectionDAG &DAG) {
11153  assert(VT.isInteger() && "Only supports integer vector types!");
11154  MVT EltVT = VT.getVectorElementType();
11155  SDValue Zero = DAG.getConstant(0, DL, EltVT);
11156  SDValue AllOnes = DAG.getAllOnesConstant(DL, EltVT);
11157  SmallVector<SDValue, 16> MaskOps;
11158  for (int i = 0, Size = Mask.size(); i < Size; ++i) {
11159    if (Mask[i] >= 0 && Mask[i] != i && Mask[i] != i + Size)
11160      return SDValue(); // Shuffled input!
11161    MaskOps.push_back(Mask[i] < Size ? AllOnes : Zero);
11162  }
11163
11164  SDValue V1Mask = DAG.getBuildVector(VT, DL, MaskOps);
11165  V1 = DAG.getNode(ISD::AND, DL, VT, V1, V1Mask);
11166  V2 = DAG.getNode(X86ISD::ANDNP, DL, VT, V1Mask, V2);
11167  return DAG.getNode(ISD::OR, DL, VT, V1, V2);
11168}
11169
11170static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
11171                                    SDValue PreservedSrc,
11172                                    const X86Subtarget &Subtarget,
11173                                    SelectionDAG &DAG);
11174
11175static bool matchShuffleAsBlend(SDValue V1, SDValue V2,
11176                                MutableArrayRef<int> Mask,
11177                                const APInt &Zeroable, bool &ForceV1Zero,
11178                                bool &ForceV2Zero, uint64_t &BlendMask) {
11179  bool V1IsZeroOrUndef =
11180      V1.isUndef() || ISD::isBuildVectorAllZeros(V1.getNode());
11181  bool V2IsZeroOrUndef =
11182      V2.isUndef() || ISD::isBuildVectorAllZeros(V2.getNode());
11183
11184  BlendMask = 0;
11185  ForceV1Zero = false, ForceV2Zero = false;
11186  assert(Mask.size() <= 64 && "Shuffle mask too big for blend mask");
11187
11188  // Attempt to generate the binary blend mask. If an input is zero then
11189  // we can use any lane.
11190  for (int i = 0, Size = Mask.size(); i < Size; ++i) {
11191    int M = Mask[i];
11192    if (M == SM_SentinelUndef)
11193      continue;
11194    if (M == i)
11195      continue;
11196    if (M == i + Size) {
11197      BlendMask |= 1ull << i;
11198      continue;
11199    }
11200    if (Zeroable[i]) {
11201      if (V1IsZeroOrUndef) {
11202        ForceV1Zero = true;
11203        Mask[i] = i;
11204        continue;
11205      }
11206      if (V2IsZeroOrUndef) {
11207        ForceV2Zero = true;
11208        BlendMask |= 1ull << i;
11209        Mask[i] = i + Size;
11210        continue;
11211      }
11212    }
11213    return false;
11214  }
11215  return true;
11216}
11217
11218static uint64_t scaleVectorShuffleBlendMask(uint64_t BlendMask, int Size,
11219                                            int Scale) {
11220  uint64_t ScaledMask = 0;
11221  for (int i = 0; i != Size; ++i)
11222    if (BlendMask & (1ull << i))
11223      ScaledMask |= ((1ull << Scale) - 1) << (i * Scale);
11224  return ScaledMask;
11225}
11226
11227/// Try to emit a blend instruction for a shuffle.
11228///
11229/// This doesn't do any checks for the availability of instructions for blending
11230/// these values. It relies on the availability of the X86ISD::BLENDI pattern to
11231/// be matched in the backend with the type given. What it does check for is
11232/// that the shuffle mask is a blend, or convertible into a blend with zero.
11233static SDValue lowerShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
11234                                   SDValue V2, ArrayRef<int> Original,
11235                                   const APInt &Zeroable,
11236                                   const X86Subtarget &Subtarget,
11237                                   SelectionDAG &DAG) {
11238  uint64_t BlendMask = 0;
11239  bool ForceV1Zero = false, ForceV2Zero = false;
11240  SmallVector<int, 64> Mask(Original.begin(), Original.end());
11241  if (!matchShuffleAsBlend(V1, V2, Mask, Zeroable, ForceV1Zero, ForceV2Zero,
11242                           BlendMask))
11243    return SDValue();
11244
11245  // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
11246  if (ForceV1Zero)
11247    V1 = getZeroVector(VT, Subtarget, DAG, DL);
11248  if (ForceV2Zero)
11249    V2 = getZeroVector(VT, Subtarget, DAG, DL);
11250
11251  switch (VT.SimpleTy) {
11252  case MVT::v4i64:
11253  case MVT::v8i32:
11254    assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!");
11255    LLVM_FALLTHROUGH;
11256  case MVT::v4f64:
11257  case MVT::v8f32:
11258    assert(Subtarget.hasAVX() && "256-bit float blends require AVX!");
11259    LLVM_FALLTHROUGH;
11260  case MVT::v2f64:
11261  case MVT::v2i64:
11262  case MVT::v4f32:
11263  case MVT::v4i32:
11264  case MVT::v8i16:
11265    assert(Subtarget.hasSSE41() && "128-bit blends require SSE41!");
11266    return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
11267                       DAG.getTargetConstant(BlendMask, DL, MVT::i8));
11268  case MVT::v16i16: {
11269    assert(Subtarget.hasAVX2() && "v16i16 blends require AVX2!");
11270    SmallVector<int, 8> RepeatedMask;
11271    if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
11272      // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
11273      assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
11274      BlendMask = 0;
11275      for (int i = 0; i < 8; ++i)
11276        if (RepeatedMask[i] >= 8)
11277          BlendMask |= 1ull << i;
11278      return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
11279                         DAG.getTargetConstant(BlendMask, DL, MVT::i8));
11280    }
11281    // Use PBLENDW for lower/upper lanes and then blend lanes.
11282    // TODO - we should allow 2 PBLENDW here and leave shuffle combine to
11283    // merge to VSELECT where useful.
11284    uint64_t LoMask = BlendMask & 0xFF;
11285    uint64_t HiMask = (BlendMask >> 8) & 0xFF;
11286    if (LoMask == 0 || LoMask == 255 || HiMask == 0 || HiMask == 255) {
11287      SDValue Lo = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
11288                               DAG.getTargetConstant(LoMask, DL, MVT::i8));
11289      SDValue Hi = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
11290                               DAG.getTargetConstant(HiMask, DL, MVT::i8));
11291      return DAG.getVectorShuffle(
11292          MVT::v16i16, DL, Lo, Hi,
11293          {0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31});
11294    }
11295    LLVM_FALLTHROUGH;
11296  }
11297  case MVT::v32i8:
11298    assert(Subtarget.hasAVX2() && "256-bit byte-blends require AVX2!");
11299    LLVM_FALLTHROUGH;
11300  case MVT::v16i8: {
11301    assert(Subtarget.hasSSE41() && "128-bit byte-blends require SSE41!");
11302
11303    // Attempt to lower to a bitmask if we can. VPAND is faster than VPBLENDVB.
11304    if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
11305                                               Subtarget, DAG))
11306      return Masked;
11307
11308    if (Subtarget.hasBWI() && Subtarget.hasVLX()) {
11309      MVT IntegerType =
11310          MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
11311      SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
11312      return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
11313    }
11314
11315    // Scale the blend by the number of bytes per element.
11316    int Scale = VT.getScalarSizeInBits() / 8;
11317
11318    // This form of blend is always done on bytes. Compute the byte vector
11319    // type.
11320    MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
11321
11322    // x86 allows load folding with blendvb from the 2nd source operand. But
11323    // we are still using LLVM select here (see comment below), so that's V1.
11324    // If V2 can be load-folded and V1 cannot be load-folded, then commute to
11325    // allow that load-folding possibility.
11326    if (!ISD::isNormalLoad(V1.getNode()) && ISD::isNormalLoad(V2.getNode())) {
11327      ShuffleVectorSDNode::commuteMask(Mask);
11328      std::swap(V1, V2);
11329    }
11330
11331    // Compute the VSELECT mask. Note that VSELECT is really confusing in the
11332    // mix of LLVM's code generator and the x86 backend. We tell the code
11333    // generator that boolean values in the elements of an x86 vector register
11334    // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
11335    // mapping a select to operand #1, and 'false' mapping to operand #2. The
11336    // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
11337    // of the element (the remaining are ignored) and 0 in that high bit would
11338    // mean operand #1 while 1 in the high bit would mean operand #2. So while
11339    // the LLVM model for boolean values in vector elements gets the relevant
11340    // bit set, it is set backwards and over constrained relative to x86's
11341    // actual model.
11342    SmallVector<SDValue, 32> VSELECTMask;
11343    for (int i = 0, Size = Mask.size(); i < Size; ++i)
11344      for (int j = 0; j < Scale; ++j)
11345        VSELECTMask.push_back(
11346            Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
11347                        : DAG.getConstant(Mask[i] < Size ? -1 : 0, DL,
11348                                          MVT::i8));
11349
11350    V1 = DAG.getBitcast(BlendVT, V1);
11351    V2 = DAG.getBitcast(BlendVT, V2);
11352    return DAG.getBitcast(
11353        VT,
11354        DAG.getSelect(DL, BlendVT, DAG.getBuildVector(BlendVT, DL, VSELECTMask),
11355                      V1, V2));
11356  }
11357  case MVT::v16f32:
11358  case MVT::v8f64:
11359  case MVT::v8i64:
11360  case MVT::v16i32:
11361  case MVT::v32i16:
11362  case MVT::v64i8: {
11363    // Attempt to lower to a bitmask if we can. Only if not optimizing for size.
11364    bool OptForSize = DAG.shouldOptForSize();
11365    if (!OptForSize) {
11366      if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
11367                                                 Subtarget, DAG))
11368        return Masked;
11369    }
11370
11371    // Otherwise load an immediate into a GPR, cast to k-register, and use a
11372    // masked move.
11373    MVT IntegerType =
11374        MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
11375    SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
11376    return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
11377  }
11378  default:
11379    llvm_unreachable("Not a supported integer vector type!");
11380  }
11381}
11382
11383/// Try to lower as a blend of elements from two inputs followed by
11384/// a single-input permutation.
11385///
11386/// This matches the pattern where we can blend elements from two inputs and
11387/// then reduce the shuffle to a single-input permutation.
11388static SDValue lowerShuffleAsBlendAndPermute(const SDLoc &DL, MVT VT,
11389                                             SDValue V1, SDValue V2,
11390                                             ArrayRef<int> Mask,
11391                                             SelectionDAG &DAG,
11392                                             bool ImmBlends = false) {
11393  // We build up the blend mask while checking whether a blend is a viable way
11394  // to reduce the shuffle.
11395  SmallVector<int, 32> BlendMask(Mask.size(), -1);
11396  SmallVector<int, 32> PermuteMask(Mask.size(), -1);
11397
11398  for (int i = 0, Size = Mask.size(); i < Size; ++i) {
11399    if (Mask[i] < 0)
11400      continue;
11401
11402    assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
11403
11404    if (BlendMask[Mask[i] % Size] < 0)
11405      BlendMask[Mask[i] % Size] = Mask[i];
11406    else if (BlendMask[Mask[i] % Size] != Mask[i])
11407      return SDValue(); // Can't blend in the needed input!
11408
11409    PermuteMask[i] = Mask[i] % Size;
11410  }
11411
11412  // If only immediate blends, then bail if the blend mask can't be widened to
11413  // i16.
11414  unsigned EltSize = VT.getScalarSizeInBits();
11415  if (ImmBlends && EltSize == 8 && !canWidenShuffleElements(BlendMask))
11416    return SDValue();
11417
11418  SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
11419  return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
11420}
11421
11422/// Try to lower as an unpack of elements from two inputs followed by
11423/// a single-input permutation.
11424///
11425/// This matches the pattern where we can unpack elements from two inputs and
11426/// then reduce the shuffle to a single-input (wider) permutation.
11427static SDValue lowerShuffleAsUNPCKAndPermute(const SDLoc &DL, MVT VT,
11428                                             SDValue V1, SDValue V2,
11429                                             ArrayRef<int> Mask,
11430                                             SelectionDAG &DAG) {
11431  int NumElts = Mask.size();
11432  int NumLanes = VT.getSizeInBits() / 128;
11433  int NumLaneElts = NumElts / NumLanes;
11434  int NumHalfLaneElts = NumLaneElts / 2;
11435
11436  bool MatchLo = true, MatchHi = true;
11437  SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
11438
11439  // Determine UNPCKL/UNPCKH type and operand order.
11440  for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
11441    for (int Elt = 0; Elt != NumLaneElts; ++Elt) {
11442      int M = Mask[Lane + Elt];
11443      if (M < 0)
11444        continue;
11445
11446      SDValue &Op = Ops[Elt & 1];
11447      if (M < NumElts && (Op.isUndef() || Op == V1))
11448        Op = V1;
11449      else if (NumElts <= M && (Op.isUndef() || Op == V2))
11450        Op = V2;
11451      else
11452        return SDValue();
11453
11454      int Lo = Lane, Mid = Lane + NumHalfLaneElts, Hi = Lane + NumLaneElts;
11455      MatchLo &= isUndefOrInRange(M, Lo, Mid) ||
11456                 isUndefOrInRange(M, NumElts + Lo, NumElts + Mid);
11457      MatchHi &= isUndefOrInRange(M, Mid, Hi) ||
11458                 isUndefOrInRange(M, NumElts + Mid, NumElts + Hi);
11459      if (!MatchLo && !MatchHi)
11460        return SDValue();
11461    }
11462  }
11463  assert((MatchLo ^ MatchHi) && "Failed to match UNPCKLO/UNPCKHI");
11464
11465  // Now check that each pair of elts come from the same unpack pair
11466  // and set the permute mask based on each pair.
11467  // TODO - Investigate cases where we permute individual elements.
11468  SmallVector<int, 32> PermuteMask(NumElts, -1);
11469  for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
11470    for (int Elt = 0; Elt != NumLaneElts; Elt += 2) {
11471      int M0 = Mask[Lane + Elt + 0];
11472      int M1 = Mask[Lane + Elt + 1];
11473      if (0 <= M0 && 0 <= M1 &&
11474          (M0 % NumHalfLaneElts) != (M1 % NumHalfLaneElts))
11475        return SDValue();
11476      if (0 <= M0)
11477        PermuteMask[Lane + Elt + 0] = Lane + (2 * (M0 % NumHalfLaneElts));
11478      if (0 <= M1)
11479        PermuteMask[Lane + Elt + 1] = Lane + (2 * (M1 % NumHalfLaneElts)) + 1;
11480    }
11481  }
11482
11483  unsigned UnpckOp = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
11484  SDValue Unpck = DAG.getNode(UnpckOp, DL, VT, Ops);
11485  return DAG.getVectorShuffle(VT, DL, Unpck, DAG.getUNDEF(VT), PermuteMask);
11486}
11487
11488/// Helper to form a PALIGNR-based rotate+permute, merging 2 inputs and then
11489/// permuting the elements of the result in place.
11490static SDValue lowerShuffleAsByteRotateAndPermute(
11491    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
11492    const X86Subtarget &Subtarget, SelectionDAG &DAG) {
11493  if ((VT.is128BitVector() && !Subtarget.hasSSSE3()) ||
11494      (VT.is256BitVector() && !Subtarget.hasAVX2()) ||
11495      (VT.is512BitVector() && !Subtarget.hasBWI()))
11496    return SDValue();
11497
11498  // We don't currently support lane crossing permutes.
11499  if (is128BitLaneCrossingShuffleMask(VT, Mask))
11500    return SDValue();
11501
11502  int Scale = VT.getScalarSizeInBits() / 8;
11503  int NumLanes = VT.getSizeInBits() / 128;
11504  int NumElts = VT.getVectorNumElements();
11505  int NumEltsPerLane = NumElts / NumLanes;
11506
11507  // Determine range of mask elts.
11508  bool Blend1 = true;
11509  bool Blend2 = true;
11510  std::pair<int, int> Range1 = std::make_pair(INT_MAX, INT_MIN);
11511  std::pair<int, int> Range2 = std::make_pair(INT_MAX, INT_MIN);
11512  for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
11513    for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
11514      int M = Mask[Lane + Elt];
11515      if (M < 0)
11516        continue;
11517      if (M < NumElts) {
11518        Blend1 &= (M == (Lane + Elt));
11519        assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
11520        M = M % NumEltsPerLane;
11521        Range1.first = std::min(Range1.first, M);
11522        Range1.second = std::max(Range1.second, M);
11523      } else {
11524        M -= NumElts;
11525        Blend2 &= (M == (Lane + Elt));
11526        assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
11527        M = M % NumEltsPerLane;
11528        Range2.first = std::min(Range2.first, M);
11529        Range2.second = std::max(Range2.second, M);
11530      }
11531    }
11532  }
11533
11534  // Bail if we don't need both elements.
11535  // TODO - it might be worth doing this for unary shuffles if the permute
11536  // can be widened.
11537  if (!(0 <= Range1.first && Range1.second < NumEltsPerLane) ||
11538      !(0 <= Range2.first && Range2.second < NumEltsPerLane))
11539    return SDValue();
11540
11541  if (VT.getSizeInBits() > 128 && (Blend1 || Blend2))
11542    return SDValue();
11543
11544  // Rotate the 2 ops so we can access both ranges, then permute the result.
11545  auto RotateAndPermute = [&](SDValue Lo, SDValue Hi, int RotAmt, int Ofs) {
11546    MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
11547    SDValue Rotate = DAG.getBitcast(
11548        VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, DAG.getBitcast(ByteVT, Hi),
11549                        DAG.getBitcast(ByteVT, Lo),
11550                        DAG.getTargetConstant(Scale * RotAmt, DL, MVT::i8)));
11551    SmallVector<int, 64> PermMask(NumElts, SM_SentinelUndef);
11552    for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
11553      for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
11554        int M = Mask[Lane + Elt];
11555        if (M < 0)
11556          continue;
11557        if (M < NumElts)
11558          PermMask[Lane + Elt] = Lane + ((M + Ofs - RotAmt) % NumEltsPerLane);
11559        else
11560          PermMask[Lane + Elt] = Lane + ((M - Ofs - RotAmt) % NumEltsPerLane);
11561      }
11562    }
11563    return DAG.getVectorShuffle(VT, DL, Rotate, DAG.getUNDEF(VT), PermMask);
11564  };
11565
11566  // Check if the ranges are small enough to rotate from either direction.
11567  if (Range2.second < Range1.first)
11568    return RotateAndPermute(V1, V2, Range1.first, 0);
11569  if (Range1.second < Range2.first)
11570    return RotateAndPermute(V2, V1, Range2.first, NumElts);
11571  return SDValue();
11572}
11573
11574/// Generic routine to decompose a shuffle and blend into independent
11575/// blends and permutes.
11576///
11577/// This matches the extremely common pattern for handling combined
11578/// shuffle+blend operations on newer X86 ISAs where we have very fast blend
11579/// operations. It will try to pick the best arrangement of shuffles and
11580/// blends.
11581static SDValue lowerShuffleAsDecomposedShuffleBlend(
11582    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
11583    const X86Subtarget &Subtarget, SelectionDAG &DAG) {
11584  // Shuffle the input elements into the desired positions in V1 and V2 and
11585  // blend them together.
11586  SmallVector<int, 32> V1Mask(Mask.size(), -1);
11587  SmallVector<int, 32> V2Mask(Mask.size(), -1);
11588  SmallVector<int, 32> BlendMask(Mask.size(), -1);
11589  for (int i = 0, Size = Mask.size(); i < Size; ++i)
11590    if (Mask[i] >= 0 && Mask[i] < Size) {
11591      V1Mask[i] = Mask[i];
11592      BlendMask[i] = i;
11593    } else if (Mask[i] >= Size) {
11594      V2Mask[i] = Mask[i] - Size;
11595      BlendMask[i] = i + Size;
11596    }
11597
11598  // Try to lower with the simpler initial blend/unpack/rotate strategies unless
11599  // one of the input shuffles would be a no-op. We prefer to shuffle inputs as
11600  // the shuffle may be able to fold with a load or other benefit. However, when
11601  // we'll have to do 2x as many shuffles in order to achieve this, a 2-input
11602  // pre-shuffle first is a better strategy.
11603  if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask)) {
11604    // Only prefer immediate blends to unpack/rotate.
11605    if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
11606                                                          DAG, true))
11607      return BlendPerm;
11608    if (SDValue UnpackPerm = lowerShuffleAsUNPCKAndPermute(DL, VT, V1, V2, Mask,
11609                                                           DAG))
11610      return UnpackPerm;
11611    if (SDValue RotatePerm = lowerShuffleAsByteRotateAndPermute(
11612            DL, VT, V1, V2, Mask, Subtarget, DAG))
11613      return RotatePerm;
11614    // Unpack/rotate failed - try again with variable blends.
11615    if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
11616                                                          DAG))
11617      return BlendPerm;
11618  }
11619
11620  V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
11621  V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
11622  return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
11623}
11624
11625/// Try to lower a vector shuffle as a rotation.
11626///
11627/// This is used for support PALIGNR for SSSE3 or VALIGND/Q for AVX512.
11628static int matchShuffleAsRotate(SDValue &V1, SDValue &V2, ArrayRef<int> Mask) {
11629  int NumElts = Mask.size();
11630
11631  // We need to detect various ways of spelling a rotation:
11632  //   [11, 12, 13, 14, 15,  0,  1,  2]
11633  //   [-1, 12, 13, 14, -1, -1,  1, -1]
11634  //   [-1, -1, -1, -1, -1, -1,  1,  2]
11635  //   [ 3,  4,  5,  6,  7,  8,  9, 10]
11636  //   [-1,  4,  5,  6, -1, -1,  9, -1]
11637  //   [-1,  4,  5,  6, -1, -1, -1, -1]
11638  int Rotation = 0;
11639  SDValue Lo, Hi;
11640  for (int i = 0; i < NumElts; ++i) {
11641    int M = Mask[i];
11642    assert((M == SM_SentinelUndef || (0 <= M && M < (2*NumElts))) &&
11643           "Unexpected mask index.");
11644    if (M < 0)
11645      continue;
11646
11647    // Determine where a rotated vector would have started.
11648    int StartIdx = i - (M % NumElts);
11649    if (StartIdx == 0)
11650      // The identity rotation isn't interesting, stop.
11651      return -1;
11652
11653    // If we found the tail of a vector the rotation must be the missing
11654    // front. If we found the head of a vector, it must be how much of the
11655    // head.
11656    int CandidateRotation = StartIdx < 0 ? -StartIdx : NumElts - StartIdx;
11657
11658    if (Rotation == 0)
11659      Rotation = CandidateRotation;
11660    else if (Rotation != CandidateRotation)
11661      // The rotations don't match, so we can't match this mask.
11662      return -1;
11663
11664    // Compute which value this mask is pointing at.
11665    SDValue MaskV = M < NumElts ? V1 : V2;
11666
11667    // Compute which of the two target values this index should be assigned
11668    // to. This reflects whether the high elements are remaining or the low
11669    // elements are remaining.
11670    SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
11671
11672    // Either set up this value if we've not encountered it before, or check
11673    // that it remains consistent.
11674    if (!TargetV)
11675      TargetV = MaskV;
11676    else if (TargetV != MaskV)
11677      // This may be a rotation, but it pulls from the inputs in some
11678      // unsupported interleaving.
11679      return -1;
11680  }
11681
11682  // Check that we successfully analyzed the mask, and normalize the results.
11683  assert(Rotation != 0 && "Failed to locate a viable rotation!");
11684  assert((Lo || Hi) && "Failed to find a rotated input vector!");
11685  if (!Lo)
11686    Lo = Hi;
11687  else if (!Hi)
11688    Hi = Lo;
11689
11690  V1 = Lo;
11691  V2 = Hi;
11692
11693  return Rotation;
11694}
11695
11696/// Try to lower a vector shuffle as a byte rotation.
11697///
11698/// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
11699/// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
11700/// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
11701/// try to generically lower a vector shuffle through such an pattern. It
11702/// does not check for the profitability of lowering either as PALIGNR or
11703/// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
11704/// This matches shuffle vectors that look like:
11705///
11706///   v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
11707///
11708/// Essentially it concatenates V1 and V2, shifts right by some number of
11709/// elements, and takes the low elements as the result. Note that while this is
11710/// specified as a *right shift* because x86 is little-endian, it is a *left
11711/// rotate* of the vector lanes.
11712static int matchShuffleAsByteRotate(MVT VT, SDValue &V1, SDValue &V2,
11713                                    ArrayRef<int> Mask) {
11714  // Don't accept any shuffles with zero elements.
11715  if (any_of(Mask, [](int M) { return M == SM_SentinelZero; }))
11716    return -1;
11717
11718  // PALIGNR works on 128-bit lanes.
11719  SmallVector<int, 16> RepeatedMask;
11720  if (!is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedMask))
11721    return -1;
11722
11723  int Rotation = matchShuffleAsRotate(V1, V2, RepeatedMask);
11724  if (Rotation <= 0)
11725    return -1;
11726
11727  // PALIGNR rotates bytes, so we need to scale the
11728  // rotation based on how many bytes are in the vector lane.
11729  int NumElts = RepeatedMask.size();
11730  int Scale = 16 / NumElts;
11731  return Rotation * Scale;
11732}
11733
11734static SDValue lowerShuffleAsByteRotate(const SDLoc &DL, MVT VT, SDValue V1,
11735                                        SDValue V2, ArrayRef<int> Mask,
11736                                        const X86Subtarget &Subtarget,
11737                                        SelectionDAG &DAG) {
11738  assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
11739
11740  SDValue Lo = V1, Hi = V2;
11741  int ByteRotation = matchShuffleAsByteRotate(VT, Lo, Hi, Mask);
11742  if (ByteRotation <= 0)
11743    return SDValue();
11744
11745  // Cast the inputs to i8 vector of correct length to match PALIGNR or
11746  // PSLLDQ/PSRLDQ.
11747  MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
11748  Lo = DAG.getBitcast(ByteVT, Lo);
11749  Hi = DAG.getBitcast(ByteVT, Hi);
11750
11751  // SSSE3 targets can use the palignr instruction.
11752  if (Subtarget.hasSSSE3()) {
11753    assert((!VT.is512BitVector() || Subtarget.hasBWI()) &&
11754           "512-bit PALIGNR requires BWI instructions");
11755    return DAG.getBitcast(
11756        VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, Lo, Hi,
11757                        DAG.getTargetConstant(ByteRotation, DL, MVT::i8)));
11758  }
11759
11760  assert(VT.is128BitVector() &&
11761         "Rotate-based lowering only supports 128-bit lowering!");
11762  assert(Mask.size() <= 16 &&
11763         "Can shuffle at most 16 bytes in a 128-bit vector!");
11764  assert(ByteVT == MVT::v16i8 &&
11765         "SSE2 rotate lowering only needed for v16i8!");
11766
11767  // Default SSE2 implementation
11768  int LoByteShift = 16 - ByteRotation;
11769  int HiByteShift = ByteRotation;
11770
11771  SDValue LoShift =
11772      DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Lo,
11773                  DAG.getTargetConstant(LoByteShift, DL, MVT::i8));
11774  SDValue HiShift =
11775      DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Hi,
11776                  DAG.getTargetConstant(HiByteShift, DL, MVT::i8));
11777  return DAG.getBitcast(VT,
11778                        DAG.getNode(ISD::OR, DL, MVT::v16i8, LoShift, HiShift));
11779}
11780
11781/// Try to lower a vector shuffle as a dword/qword rotation.
11782///
11783/// AVX512 has a VALIGND/VALIGNQ instructions that will do an arbitrary
11784/// rotation of the concatenation of two vectors; This routine will
11785/// try to generically lower a vector shuffle through such an pattern.
11786///
11787/// Essentially it concatenates V1 and V2, shifts right by some number of
11788/// elements, and takes the low elements as the result. Note that while this is
11789/// specified as a *right shift* because x86 is little-endian, it is a *left
11790/// rotate* of the vector lanes.
11791static SDValue lowerShuffleAsRotate(const SDLoc &DL, MVT VT, SDValue V1,
11792                                    SDValue V2, ArrayRef<int> Mask,
11793                                    const X86Subtarget &Subtarget,
11794                                    SelectionDAG &DAG) {
11795  assert((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) &&
11796         "Only 32-bit and 64-bit elements are supported!");
11797
11798  // 128/256-bit vectors are only supported with VLX.
11799  assert((Subtarget.hasVLX() || (!VT.is128BitVector() && !VT.is256BitVector()))
11800         && "VLX required for 128/256-bit vectors");
11801
11802  SDValue Lo = V1, Hi = V2;
11803  int Rotation = matchShuffleAsRotate(Lo, Hi, Mask);
11804  if (Rotation <= 0)
11805    return SDValue();
11806
11807  return DAG.getNode(X86ISD::VALIGN, DL, VT, Lo, Hi,
11808                     DAG.getTargetConstant(Rotation, DL, MVT::i8));
11809}
11810
11811/// Try to lower a vector shuffle as a byte shift sequence.
11812static SDValue lowerShuffleAsByteShiftMask(const SDLoc &DL, MVT VT, SDValue V1,
11813                                           SDValue V2, ArrayRef<int> Mask,
11814                                           const APInt &Zeroable,
11815                                           const X86Subtarget &Subtarget,
11816                                           SelectionDAG &DAG) {
11817  assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
11818  assert(VT.is128BitVector() && "Only 128-bit vectors supported");
11819
11820  // We need a shuffle that has zeros at one/both ends and a sequential
11821  // shuffle from one source within.
11822  unsigned ZeroLo = Zeroable.countTrailingOnes();
11823  unsigned ZeroHi = Zeroable.countLeadingOnes();
11824  if (!ZeroLo && !ZeroHi)
11825    return SDValue();
11826
11827  unsigned NumElts = Mask.size();
11828  unsigned Len = NumElts - (ZeroLo + ZeroHi);
11829  if (!isSequentialOrUndefInRange(Mask, ZeroLo, Len, Mask[ZeroLo]))
11830    return SDValue();
11831
11832  unsigned Scale = VT.getScalarSizeInBits() / 8;
11833  ArrayRef<int> StubMask = Mask.slice(ZeroLo, Len);
11834  if (!isUndefOrInRange(StubMask, 0, NumElts) &&
11835      !isUndefOrInRange(StubMask, NumElts, 2 * NumElts))
11836    return SDValue();
11837
11838  SDValue Res = Mask[ZeroLo] < (int)NumElts ? V1 : V2;
11839  Res = DAG.getBitcast(MVT::v16i8, Res);
11840
11841  // Use VSHLDQ/VSRLDQ ops to zero the ends of a vector and leave an
11842  // inner sequential set of elements, possibly offset:
11843  // 01234567 --> zzzzzz01 --> 1zzzzzzz
11844  // 01234567 --> 4567zzzz --> zzzzz456
11845  // 01234567 --> z0123456 --> 3456zzzz --> zz3456zz
11846  if (ZeroLo == 0) {
11847    unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
11848    Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11849                      DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
11850    Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
11851                      DAG.getTargetConstant(Scale * ZeroHi, DL, MVT::i8));
11852  } else if (ZeroHi == 0) {
11853    unsigned Shift = Mask[ZeroLo] % NumElts;
11854    Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
11855                      DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
11856    Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11857                      DAG.getTargetConstant(Scale * ZeroLo, DL, MVT::i8));
11858  } else if (!Subtarget.hasSSSE3()) {
11859    // If we don't have PSHUFB then its worth avoiding an AND constant mask
11860    // by performing 3 byte shifts. Shuffle combining can kick in above that.
11861    // TODO: There may be some cases where VSH{LR}DQ+PAND is still better.
11862    unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
11863    Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11864                      DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
11865    Shift += Mask[ZeroLo] % NumElts;
11866    Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
11867                      DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
11868    Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11869                      DAG.getTargetConstant(Scale * ZeroLo, DL, MVT::i8));
11870  } else
11871    return SDValue();
11872
11873  return DAG.getBitcast(VT, Res);
11874}
11875
11876/// Try to lower a vector shuffle as a bit shift (shifts in zeros).
11877///
11878/// Attempts to match a shuffle mask against the PSLL(W/D/Q/DQ) and
11879/// PSRL(W/D/Q/DQ) SSE2 and AVX2 logical bit-shift instructions. The function
11880/// matches elements from one of the input vectors shuffled to the left or
11881/// right with zeroable elements 'shifted in'. It handles both the strictly
11882/// bit-wise element shifts and the byte shift across an entire 128-bit double
11883/// quad word lane.
11884///
11885/// PSHL : (little-endian) left bit shift.
11886/// [ zz, 0, zz,  2 ]
11887/// [ -1, 4, zz, -1 ]
11888/// PSRL : (little-endian) right bit shift.
11889/// [  1, zz,  3, zz]
11890/// [ -1, -1,  7, zz]
11891/// PSLLDQ : (little-endian) left byte shift
11892/// [ zz,  0,  1,  2,  3,  4,  5,  6]
11893/// [ zz, zz, -1, -1,  2,  3,  4, -1]
11894/// [ zz, zz, zz, zz, zz, zz, -1,  1]
11895/// PSRLDQ : (little-endian) right byte shift
11896/// [  5, 6,  7, zz, zz, zz, zz, zz]
11897/// [ -1, 5,  6,  7, zz, zz, zz, zz]
11898/// [  1, 2, -1, -1, -1, -1, zz, zz]
11899static int matchShuffleAsShift(MVT &ShiftVT, unsigned &Opcode,
11900                               unsigned ScalarSizeInBits, ArrayRef<int> Mask,
11901                               int MaskOffset, const APInt &Zeroable,
11902                               const X86Subtarget &Subtarget) {
11903  int Size = Mask.size();
11904  unsigned SizeInBits = Size * ScalarSizeInBits;
11905
11906  auto CheckZeros = [&](int Shift, int Scale, bool Left) {
11907    for (int i = 0; i < Size; i += Scale)
11908      for (int j = 0; j < Shift; ++j)
11909        if (!Zeroable[i + j + (Left ? 0 : (Scale - Shift))])
11910          return false;
11911
11912    return true;
11913  };
11914
11915  auto MatchShift = [&](int Shift, int Scale, bool Left) {
11916    for (int i = 0; i != Size; i += Scale) {
11917      unsigned Pos = Left ? i + Shift : i;
11918      unsigned Low = Left ? i : i + Shift;
11919      unsigned Len = Scale - Shift;
11920      if (!isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset))
11921        return -1;
11922    }
11923
11924    int ShiftEltBits = ScalarSizeInBits * Scale;
11925    bool ByteShift = ShiftEltBits > 64;
11926    Opcode = Left ? (ByteShift ? X86ISD::VSHLDQ : X86ISD::VSHLI)
11927                  : (ByteShift ? X86ISD::VSRLDQ : X86ISD::VSRLI);
11928    int ShiftAmt = Shift * ScalarSizeInBits / (ByteShift ? 8 : 1);
11929
11930    // Normalize the scale for byte shifts to still produce an i64 element
11931    // type.
11932    Scale = ByteShift ? Scale / 2 : Scale;
11933
11934    // We need to round trip through the appropriate type for the shift.
11935    MVT ShiftSVT = MVT::getIntegerVT(ScalarSizeInBits * Scale);
11936    ShiftVT = ByteShift ? MVT::getVectorVT(MVT::i8, SizeInBits / 8)
11937                        : MVT::getVectorVT(ShiftSVT, Size / Scale);
11938    return (int)ShiftAmt;
11939  };
11940
11941  // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
11942  // keep doubling the size of the integer elements up to that. We can
11943  // then shift the elements of the integer vector by whole multiples of
11944  // their width within the elements of the larger integer vector. Test each
11945  // multiple to see if we can find a match with the moved element indices
11946  // and that the shifted in elements are all zeroable.
11947  unsigned MaxWidth = ((SizeInBits == 512) && !Subtarget.hasBWI() ? 64 : 128);
11948  for (int Scale = 2; Scale * ScalarSizeInBits <= MaxWidth; Scale *= 2)
11949    for (int Shift = 1; Shift != Scale; ++Shift)
11950      for (bool Left : {true, false})
11951        if (CheckZeros(Shift, Scale, Left)) {
11952          int ShiftAmt = MatchShift(Shift, Scale, Left);
11953          if (0 < ShiftAmt)
11954            return ShiftAmt;
11955        }
11956
11957  // no match
11958  return -1;
11959}
11960
11961static SDValue lowerShuffleAsShift(const SDLoc &DL, MVT VT, SDValue V1,
11962                                   SDValue V2, ArrayRef<int> Mask,
11963                                   const APInt &Zeroable,
11964                                   const X86Subtarget &Subtarget,
11965                                   SelectionDAG &DAG) {
11966  int Size = Mask.size();
11967  assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
11968
11969  MVT ShiftVT;
11970  SDValue V = V1;
11971  unsigned Opcode;
11972
11973  // Try to match shuffle against V1 shift.
11974  int ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
11975                                     Mask, 0, Zeroable, Subtarget);
11976
11977  // If V1 failed, try to match shuffle against V2 shift.
11978  if (ShiftAmt < 0) {
11979    ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
11980                                   Mask, Size, Zeroable, Subtarget);
11981    V = V2;
11982  }
11983
11984  if (ShiftAmt < 0)
11985    return SDValue();
11986
11987  assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
11988         "Illegal integer vector type");
11989  V = DAG.getBitcast(ShiftVT, V);
11990  V = DAG.getNode(Opcode, DL, ShiftVT, V,
11991                  DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
11992  return DAG.getBitcast(VT, V);
11993}
11994
11995// EXTRQ: Extract Len elements from lower half of source, starting at Idx.
11996// Remainder of lower half result is zero and upper half is all undef.
11997static bool matchShuffleAsEXTRQ(MVT VT, SDValue &V1, SDValue &V2,
11998                                ArrayRef<int> Mask, uint64_t &BitLen,
11999                                uint64_t &BitIdx, const APInt &Zeroable) {
12000  int Size = Mask.size();
12001  int HalfSize = Size / 2;
12002  assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
12003  assert(!Zeroable.isAllOnesValue() && "Fully zeroable shuffle mask");
12004
12005  // Upper half must be undefined.
12006  if (!isUndefUpperHalf(Mask))
12007    return false;
12008
12009  // Determine the extraction length from the part of the
12010  // lower half that isn't zeroable.
12011  int Len = HalfSize;
12012  for (; Len > 0; --Len)
12013    if (!Zeroable[Len - 1])
12014      break;
12015  assert(Len > 0 && "Zeroable shuffle mask");
12016
12017  // Attempt to match first Len sequential elements from the lower half.
12018  SDValue Src;
12019  int Idx = -1;
12020  for (int i = 0; i != Len; ++i) {
12021    int M = Mask[i];
12022    if (M == SM_SentinelUndef)
12023      continue;
12024    SDValue &V = (M < Size ? V1 : V2);
12025    M = M % Size;
12026
12027    // The extracted elements must start at a valid index and all mask
12028    // elements must be in the lower half.
12029    if (i > M || M >= HalfSize)
12030      return false;
12031
12032    if (Idx < 0 || (Src == V && Idx == (M - i))) {
12033      Src = V;
12034      Idx = M - i;
12035      continue;
12036    }
12037    return false;
12038  }
12039
12040  if (!Src || Idx < 0)
12041    return false;
12042
12043  assert((Idx + Len) <= HalfSize && "Illegal extraction mask");
12044  BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
12045  BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
12046  V1 = Src;
12047  return true;
12048}
12049
12050// INSERTQ: Extract lowest Len elements from lower half of second source and
12051// insert over first source, starting at Idx.
12052// { A[0], .., A[Idx-1], B[0], .., B[Len-1], A[Idx+Len], .., UNDEF, ... }
12053static bool matchShuffleAsINSERTQ(MVT VT, SDValue &V1, SDValue &V2,
12054                                  ArrayRef<int> Mask, uint64_t &BitLen,
12055                                  uint64_t &BitIdx) {
12056  int Size = Mask.size();
12057  int HalfSize = Size / 2;
12058  assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
12059
12060  // Upper half must be undefined.
12061  if (!isUndefUpperHalf(Mask))
12062    return false;
12063
12064  for (int Idx = 0; Idx != HalfSize; ++Idx) {
12065    SDValue Base;
12066
12067    // Attempt to match first source from mask before insertion point.
12068    if (isUndefInRange(Mask, 0, Idx)) {
12069      /* EMPTY */
12070    } else if (isSequentialOrUndefInRange(Mask, 0, Idx, 0)) {
12071      Base = V1;
12072    } else if (isSequentialOrUndefInRange(Mask, 0, Idx, Size)) {
12073      Base = V2;
12074    } else {
12075      continue;
12076    }
12077
12078    // Extend the extraction length looking to match both the insertion of
12079    // the second source and the remaining elements of the first.
12080    for (int Hi = Idx + 1; Hi <= HalfSize; ++Hi) {
12081      SDValue Insert;
12082      int Len = Hi - Idx;
12083
12084      // Match insertion.
12085      if (isSequentialOrUndefInRange(Mask, Idx, Len, 0)) {
12086        Insert = V1;
12087      } else if (isSequentialOrUndefInRange(Mask, Idx, Len, Size)) {
12088        Insert = V2;
12089      } else {
12090        continue;
12091      }
12092
12093      // Match the remaining elements of the lower half.
12094      if (isUndefInRange(Mask, Hi, HalfSize - Hi)) {
12095        /* EMPTY */
12096      } else if ((!Base || (Base == V1)) &&
12097                 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi, Hi)) {
12098        Base = V1;
12099      } else if ((!Base || (Base == V2)) &&
12100                 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi,
12101                                            Size + Hi)) {
12102        Base = V2;
12103      } else {
12104        continue;
12105      }
12106
12107      BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
12108      BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
12109      V1 = Base;
12110      V2 = Insert;
12111      return true;
12112    }
12113  }
12114
12115  return false;
12116}
12117
12118/// Try to lower a vector shuffle using SSE4a EXTRQ/INSERTQ.
12119static SDValue lowerShuffleWithSSE4A(const SDLoc &DL, MVT VT, SDValue V1,
12120                                     SDValue V2, ArrayRef<int> Mask,
12121                                     const APInt &Zeroable, SelectionDAG &DAG) {
12122  uint64_t BitLen, BitIdx;
12123  if (matchShuffleAsEXTRQ(VT, V1, V2, Mask, BitLen, BitIdx, Zeroable))
12124    return DAG.getNode(X86ISD::EXTRQI, DL, VT, V1,
12125                       DAG.getTargetConstant(BitLen, DL, MVT::i8),
12126                       DAG.getTargetConstant(BitIdx, DL, MVT::i8));
12127
12128  if (matchShuffleAsINSERTQ(VT, V1, V2, Mask, BitLen, BitIdx))
12129    return DAG.getNode(X86ISD::INSERTQI, DL, VT, V1 ? V1 : DAG.getUNDEF(VT),
12130                       V2 ? V2 : DAG.getUNDEF(VT),
12131                       DAG.getTargetConstant(BitLen, DL, MVT::i8),
12132                       DAG.getTargetConstant(BitIdx, DL, MVT::i8));
12133
12134  return SDValue();
12135}
12136
12137/// Lower a vector shuffle as a zero or any extension.
12138///
12139/// Given a specific number of elements, element bit width, and extension
12140/// stride, produce either a zero or any extension based on the available
12141/// features of the subtarget. The extended elements are consecutive and
12142/// begin and can start from an offsetted element index in the input; to
12143/// avoid excess shuffling the offset must either being in the bottom lane
12144/// or at the start of a higher lane. All extended elements must be from
12145/// the same lane.
12146static SDValue lowerShuffleAsSpecificZeroOrAnyExtend(
12147    const SDLoc &DL, MVT VT, int Scale, int Offset, bool AnyExt, SDValue InputV,
12148    ArrayRef<int> Mask, const X86Subtarget &Subtarget, SelectionDAG &DAG) {
12149  assert(Scale > 1 && "Need a scale to extend.");
12150  int EltBits = VT.getScalarSizeInBits();
12151  int NumElements = VT.getVectorNumElements();
12152  int NumEltsPerLane = 128 / EltBits;
12153  int OffsetLane = Offset / NumEltsPerLane;
12154  assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
12155         "Only 8, 16, and 32 bit elements can be extended.");
12156  assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
12157  assert(0 <= Offset && "Extension offset must be positive.");
12158  assert((Offset < NumEltsPerLane || Offset % NumEltsPerLane == 0) &&
12159         "Extension offset must be in the first lane or start an upper lane.");
12160
12161  // Check that an index is in same lane as the base offset.
12162  auto SafeOffset = [&](int Idx) {
12163    return OffsetLane == (Idx / NumEltsPerLane);
12164  };
12165
12166  // Shift along an input so that the offset base moves to the first element.
12167  auto ShuffleOffset = [&](SDValue V) {
12168    if (!Offset)
12169      return V;
12170
12171    SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
12172    for (int i = 0; i * Scale < NumElements; ++i) {
12173      int SrcIdx = i + Offset;
12174      ShMask[i] = SafeOffset(SrcIdx) ? SrcIdx : -1;
12175    }
12176    return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), ShMask);
12177  };
12178
12179  // Found a valid a/zext mask! Try various lowering strategies based on the
12180  // input type and available ISA extensions.
12181  if (Subtarget.hasSSE41()) {
12182    // Not worth offsetting 128-bit vectors if scale == 2, a pattern using
12183    // PUNPCK will catch this in a later shuffle match.
12184    if (Offset && Scale == 2 && VT.is128BitVector())
12185      return SDValue();
12186    MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
12187                                 NumElements / Scale);
12188    InputV = ShuffleOffset(InputV);
12189    InputV = getExtendInVec(AnyExt ? ISD::ANY_EXTEND : ISD::ZERO_EXTEND, DL,
12190                            ExtVT, InputV, DAG);
12191    return DAG.getBitcast(VT, InputV);
12192  }
12193
12194  assert(VT.is128BitVector() && "Only 128-bit vectors can be extended.");
12195
12196  // For any extends we can cheat for larger element sizes and use shuffle
12197  // instructions that can fold with a load and/or copy.
12198  if (AnyExt && EltBits == 32) {
12199    int PSHUFDMask[4] = {Offset, -1, SafeOffset(Offset + 1) ? Offset + 1 : -1,
12200                         -1};
12201    return DAG.getBitcast(
12202        VT, DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
12203                        DAG.getBitcast(MVT::v4i32, InputV),
12204                        getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
12205  }
12206  if (AnyExt && EltBits == 16 && Scale > 2) {
12207    int PSHUFDMask[4] = {Offset / 2, -1,
12208                         SafeOffset(Offset + 1) ? (Offset + 1) / 2 : -1, -1};
12209    InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
12210                         DAG.getBitcast(MVT::v4i32, InputV),
12211                         getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
12212    int PSHUFWMask[4] = {1, -1, -1, -1};
12213    unsigned OddEvenOp = (Offset & 1) ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
12214    return DAG.getBitcast(
12215        VT, DAG.getNode(OddEvenOp, DL, MVT::v8i16,
12216                        DAG.getBitcast(MVT::v8i16, InputV),
12217                        getV4X86ShuffleImm8ForMask(PSHUFWMask, DL, DAG)));
12218  }
12219
12220  // The SSE4A EXTRQ instruction can efficiently extend the first 2 lanes
12221  // to 64-bits.
12222  if ((Scale * EltBits) == 64 && EltBits < 32 && Subtarget.hasSSE4A()) {
12223    assert(NumElements == (int)Mask.size() && "Unexpected shuffle mask size!");
12224    assert(VT.is128BitVector() && "Unexpected vector width!");
12225
12226    int LoIdx = Offset * EltBits;
12227    SDValue Lo = DAG.getBitcast(
12228        MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
12229                                DAG.getTargetConstant(EltBits, DL, MVT::i8),
12230                                DAG.getTargetConstant(LoIdx, DL, MVT::i8)));
12231
12232    if (isUndefUpperHalf(Mask) || !SafeOffset(Offset + 1))
12233      return DAG.getBitcast(VT, Lo);
12234
12235    int HiIdx = (Offset + 1) * EltBits;
12236    SDValue Hi = DAG.getBitcast(
12237        MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
12238                                DAG.getTargetConstant(EltBits, DL, MVT::i8),
12239                                DAG.getTargetConstant(HiIdx, DL, MVT::i8)));
12240    return DAG.getBitcast(VT,
12241                          DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, Lo, Hi));
12242  }
12243
12244  // If this would require more than 2 unpack instructions to expand, use
12245  // pshufb when available. We can only use more than 2 unpack instructions
12246  // when zero extending i8 elements which also makes it easier to use pshufb.
12247  if (Scale > 4 && EltBits == 8 && Subtarget.hasSSSE3()) {
12248    assert(NumElements == 16 && "Unexpected byte vector width!");
12249    SDValue PSHUFBMask[16];
12250    for (int i = 0; i < 16; ++i) {
12251      int Idx = Offset + (i / Scale);
12252      if ((i % Scale == 0 && SafeOffset(Idx))) {
12253        PSHUFBMask[i] = DAG.getConstant(Idx, DL, MVT::i8);
12254        continue;
12255      }
12256      PSHUFBMask[i] =
12257          AnyExt ? DAG.getUNDEF(MVT::i8) : DAG.getConstant(0x80, DL, MVT::i8);
12258    }
12259    InputV = DAG.getBitcast(MVT::v16i8, InputV);
12260    return DAG.getBitcast(
12261        VT, DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
12262                        DAG.getBuildVector(MVT::v16i8, DL, PSHUFBMask)));
12263  }
12264
12265  // If we are extending from an offset, ensure we start on a boundary that
12266  // we can unpack from.
12267  int AlignToUnpack = Offset % (NumElements / Scale);
12268  if (AlignToUnpack) {
12269    SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
12270    for (int i = AlignToUnpack; i < NumElements; ++i)
12271      ShMask[i - AlignToUnpack] = i;
12272    InputV = DAG.getVectorShuffle(VT, DL, InputV, DAG.getUNDEF(VT), ShMask);
12273    Offset -= AlignToUnpack;
12274  }
12275
12276  // Otherwise emit a sequence of unpacks.
12277  do {
12278    unsigned UnpackLoHi = X86ISD::UNPCKL;
12279    if (Offset >= (NumElements / 2)) {
12280      UnpackLoHi = X86ISD::UNPCKH;
12281      Offset -= (NumElements / 2);
12282    }
12283
12284    MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
12285    SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
12286                         : getZeroVector(InputVT, Subtarget, DAG, DL);
12287    InputV = DAG.getBitcast(InputVT, InputV);
12288    InputV = DAG.getNode(UnpackLoHi, DL, InputVT, InputV, Ext);
12289    Scale /= 2;
12290    EltBits *= 2;
12291    NumElements /= 2;
12292  } while (Scale > 1);
12293  return DAG.getBitcast(VT, InputV);
12294}
12295
12296/// Try to lower a vector shuffle as a zero extension on any microarch.
12297///
12298/// This routine will try to do everything in its power to cleverly lower
12299/// a shuffle which happens to match the pattern of a zero extend. It doesn't
12300/// check for the profitability of this lowering,  it tries to aggressively
12301/// match this pattern. It will use all of the micro-architectural details it
12302/// can to emit an efficient lowering. It handles both blends with all-zero
12303/// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
12304/// masking out later).
12305///
12306/// The reason we have dedicated lowering for zext-style shuffles is that they
12307/// are both incredibly common and often quite performance sensitive.
12308static SDValue lowerShuffleAsZeroOrAnyExtend(
12309    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
12310    const APInt &Zeroable, const X86Subtarget &Subtarget,
12311    SelectionDAG &DAG) {
12312  int Bits = VT.getSizeInBits();
12313  int NumLanes = Bits / 128;
12314  int NumElements = VT.getVectorNumElements();
12315  int NumEltsPerLane = NumElements / NumLanes;
12316  assert(VT.getScalarSizeInBits() <= 32 &&
12317         "Exceeds 32-bit integer zero extension limit");
12318  assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
12319
12320  // Define a helper function to check a particular ext-scale and lower to it if
12321  // valid.
12322  auto Lower = [&](int Scale) -> SDValue {
12323    SDValue InputV;
12324    bool AnyExt = true;
12325    int Offset = 0;
12326    int Matches = 0;
12327    for (int i = 0; i < NumElements; ++i) {
12328      int M = Mask[i];
12329      if (M < 0)
12330        continue; // Valid anywhere but doesn't tell us anything.
12331      if (i % Scale != 0) {
12332        // Each of the extended elements need to be zeroable.
12333        if (!Zeroable[i])
12334          return SDValue();
12335
12336        // We no longer are in the anyext case.
12337        AnyExt = false;
12338        continue;
12339      }
12340
12341      // Each of the base elements needs to be consecutive indices into the
12342      // same input vector.
12343      SDValue V = M < NumElements ? V1 : V2;
12344      M = M % NumElements;
12345      if (!InputV) {
12346        InputV = V;
12347        Offset = M - (i / Scale);
12348      } else if (InputV != V)
12349        return SDValue(); // Flip-flopping inputs.
12350
12351      // Offset must start in the lowest 128-bit lane or at the start of an
12352      // upper lane.
12353      // FIXME: Is it ever worth allowing a negative base offset?
12354      if (!((0 <= Offset && Offset < NumEltsPerLane) ||
12355            (Offset % NumEltsPerLane) == 0))
12356        return SDValue();
12357
12358      // If we are offsetting, all referenced entries must come from the same
12359      // lane.
12360      if (Offset && (Offset / NumEltsPerLane) != (M / NumEltsPerLane))
12361        return SDValue();
12362
12363      if ((M % NumElements) != (Offset + (i / Scale)))
12364        return SDValue(); // Non-consecutive strided elements.
12365      Matches++;
12366    }
12367
12368    // If we fail to find an input, we have a zero-shuffle which should always
12369    // have already been handled.
12370    // FIXME: Maybe handle this here in case during blending we end up with one?
12371    if (!InputV)
12372      return SDValue();
12373
12374    // If we are offsetting, don't extend if we only match a single input, we
12375    // can always do better by using a basic PSHUF or PUNPCK.
12376    if (Offset != 0 && Matches < 2)
12377      return SDValue();
12378
12379    return lowerShuffleAsSpecificZeroOrAnyExtend(DL, VT, Scale, Offset, AnyExt,
12380                                                 InputV, Mask, Subtarget, DAG);
12381  };
12382
12383  // The widest scale possible for extending is to a 64-bit integer.
12384  assert(Bits % 64 == 0 &&
12385         "The number of bits in a vector must be divisible by 64 on x86!");
12386  int NumExtElements = Bits / 64;
12387
12388  // Each iteration, try extending the elements half as much, but into twice as
12389  // many elements.
12390  for (; NumExtElements < NumElements; NumExtElements *= 2) {
12391    assert(NumElements % NumExtElements == 0 &&
12392           "The input vector size must be divisible by the extended size.");
12393    if (SDValue V = Lower(NumElements / NumExtElements))
12394      return V;
12395  }
12396
12397  // General extends failed, but 128-bit vectors may be able to use MOVQ.
12398  if (Bits != 128)
12399    return SDValue();
12400
12401  // Returns one of the source operands if the shuffle can be reduced to a
12402  // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
12403  auto CanZExtLowHalf = [&]() {
12404    for (int i = NumElements / 2; i != NumElements; ++i)
12405      if (!Zeroable[i])
12406        return SDValue();
12407    if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
12408      return V1;
12409    if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
12410      return V2;
12411    return SDValue();
12412  };
12413
12414  if (SDValue V = CanZExtLowHalf()) {
12415    V = DAG.getBitcast(MVT::v2i64, V);
12416    V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
12417    return DAG.getBitcast(VT, V);
12418  }
12419
12420  // No viable ext lowering found.
12421  return SDValue();
12422}
12423
12424/// Try to get a scalar value for a specific element of a vector.
12425///
12426/// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
12427static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
12428                                              SelectionDAG &DAG) {
12429  MVT VT = V.getSimpleValueType();
12430  MVT EltVT = VT.getVectorElementType();
12431  V = peekThroughBitcasts(V);
12432
12433  // If the bitcasts shift the element size, we can't extract an equivalent
12434  // element from it.
12435  MVT NewVT = V.getSimpleValueType();
12436  if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
12437    return SDValue();
12438
12439  if (V.getOpcode() == ISD::BUILD_VECTOR ||
12440      (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR)) {
12441    // Ensure the scalar operand is the same size as the destination.
12442    // FIXME: Add support for scalar truncation where possible.
12443    SDValue S = V.getOperand(Idx);
12444    if (EltVT.getSizeInBits() == S.getSimpleValueType().getSizeInBits())
12445      return DAG.getBitcast(EltVT, S);
12446  }
12447
12448  return SDValue();
12449}
12450
12451/// Helper to test for a load that can be folded with x86 shuffles.
12452///
12453/// This is particularly important because the set of instructions varies
12454/// significantly based on whether the operand is a load or not.
12455static bool isShuffleFoldableLoad(SDValue V) {
12456  V = peekThroughBitcasts(V);
12457  return ISD::isNON_EXTLoad(V.getNode());
12458}
12459
12460/// Try to lower insertion of a single element into a zero vector.
12461///
12462/// This is a common pattern that we have especially efficient patterns to lower
12463/// across all subtarget feature sets.
12464static SDValue lowerShuffleAsElementInsertion(
12465    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
12466    const APInt &Zeroable, const X86Subtarget &Subtarget,
12467    SelectionDAG &DAG) {
12468  MVT ExtVT = VT;
12469  MVT EltVT = VT.getVectorElementType();
12470
12471  int V2Index =
12472      find_if(Mask, [&Mask](int M) { return M >= (int)Mask.size(); }) -
12473      Mask.begin();
12474  bool IsV1Zeroable = true;
12475  for (int i = 0, Size = Mask.size(); i < Size; ++i)
12476    if (i != V2Index && !Zeroable[i]) {
12477      IsV1Zeroable = false;
12478      break;
12479    }
12480
12481  // Check for a single input from a SCALAR_TO_VECTOR node.
12482  // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
12483  // all the smarts here sunk into that routine. However, the current
12484  // lowering of BUILD_VECTOR makes that nearly impossible until the old
12485  // vector shuffle lowering is dead.
12486  SDValue V2S = getScalarValueForVectorElement(V2, Mask[V2Index] - Mask.size(),
12487                                               DAG);
12488  if (V2S && DAG.getTargetLoweringInfo().isTypeLegal(V2S.getValueType())) {
12489    // We need to zext the scalar if it is smaller than an i32.
12490    V2S = DAG.getBitcast(EltVT, V2S);
12491    if (EltVT == MVT::i8 || EltVT == MVT::i16) {
12492      // Using zext to expand a narrow element won't work for non-zero
12493      // insertions.
12494      if (!IsV1Zeroable)
12495        return SDValue();
12496
12497      // Zero-extend directly to i32.
12498      ExtVT = MVT::getVectorVT(MVT::i32, ExtVT.getSizeInBits() / 32);
12499      V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
12500    }
12501    V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
12502  } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
12503             EltVT == MVT::i16) {
12504    // Either not inserting from the low element of the input or the input
12505    // element size is too small to use VZEXT_MOVL to clear the high bits.
12506    return SDValue();
12507  }
12508
12509  if (!IsV1Zeroable) {
12510    // If V1 can't be treated as a zero vector we have fewer options to lower
12511    // this. We can't support integer vectors or non-zero targets cheaply, and
12512    // the V1 elements can't be permuted in any way.
12513    assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
12514    if (!VT.isFloatingPoint() || V2Index != 0)
12515      return SDValue();
12516    SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
12517    V1Mask[V2Index] = -1;
12518    if (!isNoopShuffleMask(V1Mask))
12519      return SDValue();
12520    if (!VT.is128BitVector())
12521      return SDValue();
12522
12523    // Otherwise, use MOVSD or MOVSS.
12524    assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&
12525           "Only two types of floating point element types to handle!");
12526    return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
12527                       ExtVT, V1, V2);
12528  }
12529
12530  // This lowering only works for the low element with floating point vectors.
12531  if (VT.isFloatingPoint() && V2Index != 0)
12532    return SDValue();
12533
12534  V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
12535  if (ExtVT != VT)
12536    V2 = DAG.getBitcast(VT, V2);
12537
12538  if (V2Index != 0) {
12539    // If we have 4 or fewer lanes we can cheaply shuffle the element into
12540    // the desired position. Otherwise it is more efficient to do a vector
12541    // shift left. We know that we can do a vector shift left because all
12542    // the inputs are zero.
12543    if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
12544      SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
12545      V2Shuffle[V2Index] = 0;
12546      V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
12547    } else {
12548      V2 = DAG.getBitcast(MVT::v16i8, V2);
12549      V2 = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, V2,
12550                       DAG.getTargetConstant(
12551                           V2Index * EltVT.getSizeInBits() / 8, DL, MVT::i8));
12552      V2 = DAG.getBitcast(VT, V2);
12553    }
12554  }
12555  return V2;
12556}
12557
12558/// Try to lower broadcast of a single - truncated - integer element,
12559/// coming from a scalar_to_vector/build_vector node \p V0 with larger elements.
12560///
12561/// This assumes we have AVX2.
12562static SDValue lowerShuffleAsTruncBroadcast(const SDLoc &DL, MVT VT, SDValue V0,
12563                                            int BroadcastIdx,
12564                                            const X86Subtarget &Subtarget,
12565                                            SelectionDAG &DAG) {
12566  assert(Subtarget.hasAVX2() &&
12567         "We can only lower integer broadcasts with AVX2!");
12568
12569  EVT EltVT = VT.getVectorElementType();
12570  EVT V0VT = V0.getValueType();
12571
12572  assert(VT.isInteger() && "Unexpected non-integer trunc broadcast!");
12573  assert(V0VT.isVector() && "Unexpected non-vector vector-sized value!");
12574
12575  EVT V0EltVT = V0VT.getVectorElementType();
12576  if (!V0EltVT.isInteger())
12577    return SDValue();
12578
12579  const unsigned EltSize = EltVT.getSizeInBits();
12580  const unsigned V0EltSize = V0EltVT.getSizeInBits();
12581
12582  // This is only a truncation if the original element type is larger.
12583  if (V0EltSize <= EltSize)
12584    return SDValue();
12585
12586  assert(((V0EltSize % EltSize) == 0) &&
12587         "Scalar type sizes must all be powers of 2 on x86!");
12588
12589  const unsigned V0Opc = V0.getOpcode();
12590  const unsigned Scale = V0EltSize / EltSize;
12591  const unsigned V0BroadcastIdx = BroadcastIdx / Scale;
12592
12593  if ((V0Opc != ISD::SCALAR_TO_VECTOR || V0BroadcastIdx != 0) &&
12594      V0Opc != ISD::BUILD_VECTOR)
12595    return SDValue();
12596
12597  SDValue Scalar = V0.getOperand(V0BroadcastIdx);
12598
12599  // If we're extracting non-least-significant bits, shift so we can truncate.
12600  // Hopefully, we can fold away the trunc/srl/load into the broadcast.
12601  // Even if we can't (and !isShuffleFoldableLoad(Scalar)), prefer
12602  // vpbroadcast+vmovd+shr to vpshufb(m)+vmovd.
12603  if (const int OffsetIdx = BroadcastIdx % Scale)
12604    Scalar = DAG.getNode(ISD::SRL, DL, Scalar.getValueType(), Scalar,
12605                         DAG.getConstant(OffsetIdx * EltSize, DL, MVT::i8));
12606
12607  return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
12608                     DAG.getNode(ISD::TRUNCATE, DL, EltVT, Scalar));
12609}
12610
12611/// Test whether this can be lowered with a single SHUFPS instruction.
12612///
12613/// This is used to disable more specialized lowerings when the shufps lowering
12614/// will happen to be efficient.
12615static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
12616  // This routine only handles 128-bit shufps.
12617  assert(Mask.size() == 4 && "Unsupported mask size!");
12618  assert(Mask[0] >= -1 && Mask[0] < 8 && "Out of bound mask element!");
12619  assert(Mask[1] >= -1 && Mask[1] < 8 && "Out of bound mask element!");
12620  assert(Mask[2] >= -1 && Mask[2] < 8 && "Out of bound mask element!");
12621  assert(Mask[3] >= -1 && Mask[3] < 8 && "Out of bound mask element!");
12622
12623  // To lower with a single SHUFPS we need to have the low half and high half
12624  // each requiring a single input.
12625  if (Mask[0] >= 0 && Mask[1] >= 0 && (Mask[0] < 4) != (Mask[1] < 4))
12626    return false;
12627  if (Mask[2] >= 0 && Mask[3] >= 0 && (Mask[2] < 4) != (Mask[3] < 4))
12628    return false;
12629
12630  return true;
12631}
12632
12633/// If we are extracting two 128-bit halves of a vector and shuffling the
12634/// result, match that to a 256-bit AVX2 vperm* instruction to avoid a
12635/// multi-shuffle lowering.
12636static SDValue lowerShuffleOfExtractsAsVperm(const SDLoc &DL, SDValue N0,
12637                                             SDValue N1, ArrayRef<int> Mask,
12638                                             SelectionDAG &DAG) {
12639  EVT VT = N0.getValueType();
12640  assert((VT.is128BitVector() &&
12641          (VT.getScalarSizeInBits() == 32 || VT.getScalarSizeInBits() == 64)) &&
12642         "VPERM* family of shuffles requires 32-bit or 64-bit elements");
12643
12644  // Check that both sources are extracts of the same source vector.
12645  if (!N0.hasOneUse() || !N1.hasOneUse() ||
12646      N0.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
12647      N1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
12648      N0.getOperand(0) != N1.getOperand(0))
12649    return SDValue();
12650
12651  SDValue WideVec = N0.getOperand(0);
12652  EVT WideVT = WideVec.getValueType();
12653  if (!WideVT.is256BitVector() || !isa<ConstantSDNode>(N0.getOperand(1)) ||
12654      !isa<ConstantSDNode>(N1.getOperand(1)))
12655    return SDValue();
12656
12657  // Match extracts of each half of the wide source vector. Commute the shuffle
12658  // if the extract of the low half is N1.
12659  unsigned NumElts = VT.getVectorNumElements();
12660  SmallVector<int, 4> NewMask(Mask.begin(), Mask.end());
12661  const APInt &ExtIndex0 = N0.getConstantOperandAPInt(1);
12662  const APInt &ExtIndex1 = N1.getConstantOperandAPInt(1);
12663  if (ExtIndex1 == 0 && ExtIndex0 == NumElts)
12664    ShuffleVectorSDNode::commuteMask(NewMask);
12665  else if (ExtIndex0 != 0 || ExtIndex1 != NumElts)
12666    return SDValue();
12667
12668  // Final bailout: if the mask is simple, we are better off using an extract
12669  // and a simple narrow shuffle. Prefer extract+unpack(h/l)ps to vpermps
12670  // because that avoids a constant load from memory.
12671  if (NumElts == 4 &&
12672      (isSingleSHUFPSMask(NewMask) || is128BitUnpackShuffleMask(NewMask)))
12673    return SDValue();
12674
12675  // Extend the shuffle mask with undef elements.
12676  NewMask.append(NumElts, -1);
12677
12678  // shuf (extract X, 0), (extract X, 4), M --> extract (shuf X, undef, M'), 0
12679  SDValue Shuf = DAG.getVectorShuffle(WideVT, DL, WideVec, DAG.getUNDEF(WideVT),
12680                                      NewMask);
12681  // This is free: ymm -> xmm.
12682  return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Shuf,
12683                     DAG.getIntPtrConstant(0, DL));
12684}
12685
12686/// Try to lower broadcast of a single element.
12687///
12688/// For convenience, this code also bundles all of the subtarget feature set
12689/// filtering. While a little annoying to re-dispatch on type here, there isn't
12690/// a convenient way to factor it out.
12691static SDValue lowerShuffleAsBroadcast(const SDLoc &DL, MVT VT, SDValue V1,
12692                                       SDValue V2, ArrayRef<int> Mask,
12693                                       const X86Subtarget &Subtarget,
12694                                       SelectionDAG &DAG) {
12695  if (!((Subtarget.hasSSE3() && VT == MVT::v2f64) ||
12696        (Subtarget.hasAVX() && VT.isFloatingPoint()) ||
12697        (Subtarget.hasAVX2() && VT.isInteger())))
12698    return SDValue();
12699
12700  // With MOVDDUP (v2f64) we can broadcast from a register or a load, otherwise
12701  // we can only broadcast from a register with AVX2.
12702  unsigned NumElts = Mask.size();
12703  unsigned NumEltBits = VT.getScalarSizeInBits();
12704  unsigned Opcode = (VT == MVT::v2f64 && !Subtarget.hasAVX2())
12705                        ? X86ISD::MOVDDUP
12706                        : X86ISD::VBROADCAST;
12707  bool BroadcastFromReg = (Opcode == X86ISD::MOVDDUP) || Subtarget.hasAVX2();
12708
12709  // Check that the mask is a broadcast.
12710  int BroadcastIdx = -1;
12711  for (int i = 0; i != (int)NumElts; ++i) {
12712    SmallVector<int, 8> BroadcastMask(NumElts, i);
12713    if (isShuffleEquivalent(V1, V2, Mask, BroadcastMask)) {
12714      BroadcastIdx = i;
12715      break;
12716    }
12717  }
12718
12719  if (BroadcastIdx < 0)
12720    return SDValue();
12721  assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
12722                                            "a sorted mask where the broadcast "
12723                                            "comes from V1.");
12724
12725  // Go up the chain of (vector) values to find a scalar load that we can
12726  // combine with the broadcast.
12727  int BitOffset = BroadcastIdx * NumEltBits;
12728  SDValue V = V1;
12729  for (;;) {
12730    switch (V.getOpcode()) {
12731    case ISD::BITCAST: {
12732      V = V.getOperand(0);
12733      continue;
12734    }
12735    case ISD::CONCAT_VECTORS: {
12736      int OpBitWidth = V.getOperand(0).getValueSizeInBits();
12737      int OpIdx = BitOffset / OpBitWidth;
12738      V = V.getOperand(OpIdx);
12739      BitOffset %= OpBitWidth;
12740      continue;
12741    }
12742    case ISD::INSERT_SUBVECTOR: {
12743      SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
12744      auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
12745      if (!ConstantIdx)
12746        break;
12747
12748      int EltBitWidth = VOuter.getScalarValueSizeInBits();
12749      int Idx = (int)ConstantIdx->getZExtValue();
12750      int NumSubElts = (int)VInner.getSimpleValueType().getVectorNumElements();
12751      int BeginOffset = Idx * EltBitWidth;
12752      int EndOffset = BeginOffset + NumSubElts * EltBitWidth;
12753      if (BeginOffset <= BitOffset && BitOffset < EndOffset) {
12754        BitOffset -= BeginOffset;
12755        V = VInner;
12756      } else {
12757        V = VOuter;
12758      }
12759      continue;
12760    }
12761    }
12762    break;
12763  }
12764  assert((BitOffset % NumEltBits) == 0 && "Illegal bit-offset");
12765  BroadcastIdx = BitOffset / NumEltBits;
12766
12767  // Do we need to bitcast the source to retrieve the original broadcast index?
12768  bool BitCastSrc = V.getScalarValueSizeInBits() != NumEltBits;
12769
12770  // Check if this is a broadcast of a scalar. We special case lowering
12771  // for scalars so that we can more effectively fold with loads.
12772  // If the original value has a larger element type than the shuffle, the
12773  // broadcast element is in essence truncated. Make that explicit to ease
12774  // folding.
12775  if (BitCastSrc && VT.isInteger())
12776    if (SDValue TruncBroadcast = lowerShuffleAsTruncBroadcast(
12777            DL, VT, V, BroadcastIdx, Subtarget, DAG))
12778      return TruncBroadcast;
12779
12780  MVT BroadcastVT = VT;
12781
12782  // Also check the simpler case, where we can directly reuse the scalar.
12783  if (!BitCastSrc &&
12784      ((V.getOpcode() == ISD::BUILD_VECTOR && V.hasOneUse()) ||
12785       (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0))) {
12786    V = V.getOperand(BroadcastIdx);
12787
12788    // If we can't broadcast from a register, check that the input is a load.
12789    if (!BroadcastFromReg && !isShuffleFoldableLoad(V))
12790      return SDValue();
12791  } else if (MayFoldLoad(V) && cast<LoadSDNode>(V)->isSimple()) {
12792    // 32-bit targets need to load i64 as a f64 and then bitcast the result.
12793    if (!Subtarget.is64Bit() && VT.getScalarType() == MVT::i64) {
12794      BroadcastVT = MVT::getVectorVT(MVT::f64, VT.getVectorNumElements());
12795      Opcode = (BroadcastVT.is128BitVector() && !Subtarget.hasAVX2())
12796                   ? X86ISD::MOVDDUP
12797                   : Opcode;
12798    }
12799
12800    // If we are broadcasting a load that is only used by the shuffle
12801    // then we can reduce the vector load to the broadcasted scalar load.
12802    LoadSDNode *Ld = cast<LoadSDNode>(V);
12803    SDValue BaseAddr = Ld->getOperand(1);
12804    EVT SVT = BroadcastVT.getScalarType();
12805    unsigned Offset = BroadcastIdx * SVT.getStoreSize();
12806    assert((int)(Offset * 8) == BitOffset && "Unexpected bit-offset");
12807    SDValue NewAddr = DAG.getMemBasePlusOffset(BaseAddr, Offset, DL);
12808    V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
12809                    DAG.getMachineFunction().getMachineMemOperand(
12810                        Ld->getMemOperand(), Offset, SVT.getStoreSize()));
12811    DAG.makeEquivalentMemoryOrdering(Ld, V);
12812  } else if (!BroadcastFromReg) {
12813    // We can't broadcast from a vector register.
12814    return SDValue();
12815  } else if (BitOffset != 0) {
12816    // We can only broadcast from the zero-element of a vector register,
12817    // but it can be advantageous to broadcast from the zero-element of a
12818    // subvector.
12819    if (!VT.is256BitVector() && !VT.is512BitVector())
12820      return SDValue();
12821
12822    // VPERMQ/VPERMPD can perform the cross-lane shuffle directly.
12823    if (VT == MVT::v4f64 || VT == MVT::v4i64)
12824      return SDValue();
12825
12826    // Only broadcast the zero-element of a 128-bit subvector.
12827    if ((BitOffset % 128) != 0)
12828      return SDValue();
12829
12830    assert((BitOffset % V.getScalarValueSizeInBits()) == 0 &&
12831           "Unexpected bit-offset");
12832    assert((V.getValueSizeInBits() == 256 || V.getValueSizeInBits() == 512) &&
12833           "Unexpected vector size");
12834    unsigned ExtractIdx = BitOffset / V.getScalarValueSizeInBits();
12835    V = extract128BitVector(V, ExtractIdx, DAG, DL);
12836  }
12837
12838  if (Opcode == X86ISD::MOVDDUP && !V.getValueType().isVector())
12839    V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
12840                    DAG.getBitcast(MVT::f64, V));
12841
12842  // Bitcast back to the same scalar type as BroadcastVT.
12843  if (V.getValueType().getScalarType() != BroadcastVT.getScalarType()) {
12844    assert(NumEltBits == BroadcastVT.getScalarSizeInBits() &&
12845           "Unexpected vector element size");
12846    MVT ExtVT;
12847    if (V.getValueType().isVector()) {
12848      unsigned NumSrcElts = V.getValueSizeInBits() / NumEltBits;
12849      ExtVT = MVT::getVectorVT(BroadcastVT.getScalarType(), NumSrcElts);
12850    } else {
12851      ExtVT = BroadcastVT.getScalarType();
12852    }
12853    V = DAG.getBitcast(ExtVT, V);
12854  }
12855
12856  // 32-bit targets need to load i64 as a f64 and then bitcast the result.
12857  if (!Subtarget.is64Bit() && V.getValueType() == MVT::i64) {
12858    V = DAG.getBitcast(MVT::f64, V);
12859    unsigned NumBroadcastElts = BroadcastVT.getVectorNumElements();
12860    BroadcastVT = MVT::getVectorVT(MVT::f64, NumBroadcastElts);
12861  }
12862
12863  // We only support broadcasting from 128-bit vectors to minimize the
12864  // number of patterns we need to deal with in isel. So extract down to
12865  // 128-bits, removing as many bitcasts as possible.
12866  if (V.getValueSizeInBits() > 128) {
12867    MVT ExtVT = V.getSimpleValueType().getScalarType();
12868    ExtVT = MVT::getVectorVT(ExtVT, 128 / ExtVT.getScalarSizeInBits());
12869    V = extract128BitVector(peekThroughBitcasts(V), 0, DAG, DL);
12870    V = DAG.getBitcast(ExtVT, V);
12871  }
12872
12873  return DAG.getBitcast(VT, DAG.getNode(Opcode, DL, BroadcastVT, V));
12874}
12875
12876// Check for whether we can use INSERTPS to perform the shuffle. We only use
12877// INSERTPS when the V1 elements are already in the correct locations
12878// because otherwise we can just always use two SHUFPS instructions which
12879// are much smaller to encode than a SHUFPS and an INSERTPS. We can also
12880// perform INSERTPS if a single V1 element is out of place and all V2
12881// elements are zeroable.
12882static bool matchShuffleAsInsertPS(SDValue &V1, SDValue &V2,
12883                                   unsigned &InsertPSMask,
12884                                   const APInt &Zeroable,
12885                                   ArrayRef<int> Mask, SelectionDAG &DAG) {
12886  assert(V1.getSimpleValueType().is128BitVector() && "Bad operand type!");
12887  assert(V2.getSimpleValueType().is128BitVector() && "Bad operand type!");
12888  assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
12889
12890  // Attempt to match INSERTPS with one element from VA or VB being
12891  // inserted into VA (or undef). If successful, V1, V2 and InsertPSMask
12892  // are updated.
12893  auto matchAsInsertPS = [&](SDValue VA, SDValue VB,
12894                             ArrayRef<int> CandidateMask) {
12895    unsigned ZMask = 0;
12896    int VADstIndex = -1;
12897    int VBDstIndex = -1;
12898    bool VAUsedInPlace = false;
12899
12900    for (int i = 0; i < 4; ++i) {
12901      // Synthesize a zero mask from the zeroable elements (includes undefs).
12902      if (Zeroable[i]) {
12903        ZMask |= 1 << i;
12904        continue;
12905      }
12906
12907      // Flag if we use any VA inputs in place.
12908      if (i == CandidateMask[i]) {
12909        VAUsedInPlace = true;
12910        continue;
12911      }
12912
12913      // We can only insert a single non-zeroable element.
12914      if (VADstIndex >= 0 || VBDstIndex >= 0)
12915        return false;
12916
12917      if (CandidateMask[i] < 4) {
12918        // VA input out of place for insertion.
12919        VADstIndex = i;
12920      } else {
12921        // VB input for insertion.
12922        VBDstIndex = i;
12923      }
12924    }
12925
12926    // Don't bother if we have no (non-zeroable) element for insertion.
12927    if (VADstIndex < 0 && VBDstIndex < 0)
12928      return false;
12929
12930    // Determine element insertion src/dst indices. The src index is from the
12931    // start of the inserted vector, not the start of the concatenated vector.
12932    unsigned VBSrcIndex = 0;
12933    if (VADstIndex >= 0) {
12934      // If we have a VA input out of place, we use VA as the V2 element
12935      // insertion and don't use the original V2 at all.
12936      VBSrcIndex = CandidateMask[VADstIndex];
12937      VBDstIndex = VADstIndex;
12938      VB = VA;
12939    } else {
12940      VBSrcIndex = CandidateMask[VBDstIndex] - 4;
12941    }
12942
12943    // If no V1 inputs are used in place, then the result is created only from
12944    // the zero mask and the V2 insertion - so remove V1 dependency.
12945    if (!VAUsedInPlace)
12946      VA = DAG.getUNDEF(MVT::v4f32);
12947
12948    // Update V1, V2 and InsertPSMask accordingly.
12949    V1 = VA;
12950    V2 = VB;
12951
12952    // Insert the V2 element into the desired position.
12953    InsertPSMask = VBSrcIndex << 6 | VBDstIndex << 4 | ZMask;
12954    assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
12955    return true;
12956  };
12957
12958  if (matchAsInsertPS(V1, V2, Mask))
12959    return true;
12960
12961  // Commute and try again.
12962  SmallVector<int, 4> CommutedMask(Mask.begin(), Mask.end());
12963  ShuffleVectorSDNode::commuteMask(CommutedMask);
12964  if (matchAsInsertPS(V2, V1, CommutedMask))
12965    return true;
12966
12967  return false;
12968}
12969
12970static SDValue lowerShuffleAsInsertPS(const SDLoc &DL, SDValue V1, SDValue V2,
12971                                      ArrayRef<int> Mask, const APInt &Zeroable,
12972                                      SelectionDAG &DAG) {
12973  assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
12974  assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
12975
12976  // Attempt to match the insertps pattern.
12977  unsigned InsertPSMask;
12978  if (!matchShuffleAsInsertPS(V1, V2, InsertPSMask, Zeroable, Mask, DAG))
12979    return SDValue();
12980
12981  // Insert the V2 element into the desired position.
12982  return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
12983                     DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
12984}
12985
12986/// Try to lower a shuffle as a permute of the inputs followed by an
12987/// UNPCK instruction.
12988///
12989/// This specifically targets cases where we end up with alternating between
12990/// the two inputs, and so can permute them into something that feeds a single
12991/// UNPCK instruction. Note that this routine only targets integer vectors
12992/// because for floating point vectors we have a generalized SHUFPS lowering
12993/// strategy that handles everything that doesn't *exactly* match an unpack,
12994/// making this clever lowering unnecessary.
12995static SDValue lowerShuffleAsPermuteAndUnpack(
12996    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
12997    const X86Subtarget &Subtarget, SelectionDAG &DAG) {
12998  assert(!VT.isFloatingPoint() &&
12999         "This routine only supports integer vectors.");
13000  assert(VT.is128BitVector() &&
13001         "This routine only works on 128-bit vectors.");
13002  assert(!V2.isUndef() &&
13003         "This routine should only be used when blending two inputs.");
13004  assert(Mask.size() >= 2 && "Single element masks are invalid.");
13005
13006  int Size = Mask.size();
13007
13008  int NumLoInputs =
13009      count_if(Mask, [Size](int M) { return M >= 0 && M % Size < Size / 2; });
13010  int NumHiInputs =
13011      count_if(Mask, [Size](int M) { return M % Size >= Size / 2; });
13012
13013  bool UnpackLo = NumLoInputs >= NumHiInputs;
13014
13015  auto TryUnpack = [&](int ScalarSize, int Scale) {
13016    SmallVector<int, 16> V1Mask((unsigned)Size, -1);
13017    SmallVector<int, 16> V2Mask((unsigned)Size, -1);
13018
13019    for (int i = 0; i < Size; ++i) {
13020      if (Mask[i] < 0)
13021        continue;
13022
13023      // Each element of the unpack contains Scale elements from this mask.
13024      int UnpackIdx = i / Scale;
13025
13026      // We only handle the case where V1 feeds the first slots of the unpack.
13027      // We rely on canonicalization to ensure this is the case.
13028      if ((UnpackIdx % 2 == 0) != (Mask[i] < Size))
13029        return SDValue();
13030
13031      // Setup the mask for this input. The indexing is tricky as we have to
13032      // handle the unpack stride.
13033      SmallVectorImpl<int> &VMask = (UnpackIdx % 2 == 0) ? V1Mask : V2Mask;
13034      VMask[(UnpackIdx / 2) * Scale + i % Scale + (UnpackLo ? 0 : Size / 2)] =
13035          Mask[i] % Size;
13036    }
13037
13038    // If we will have to shuffle both inputs to use the unpack, check whether
13039    // we can just unpack first and shuffle the result. If so, skip this unpack.
13040    if ((NumLoInputs == 0 || NumHiInputs == 0) && !isNoopShuffleMask(V1Mask) &&
13041        !isNoopShuffleMask(V2Mask))
13042      return SDValue();
13043
13044    // Shuffle the inputs into place.
13045    V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
13046    V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
13047
13048    // Cast the inputs to the type we will use to unpack them.
13049    MVT UnpackVT = MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), Size / Scale);
13050    V1 = DAG.getBitcast(UnpackVT, V1);
13051    V2 = DAG.getBitcast(UnpackVT, V2);
13052
13053    // Unpack the inputs and cast the result back to the desired type.
13054    return DAG.getBitcast(
13055        VT, DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
13056                        UnpackVT, V1, V2));
13057  };
13058
13059  // We try each unpack from the largest to the smallest to try and find one
13060  // that fits this mask.
13061  int OrigScalarSize = VT.getScalarSizeInBits();
13062  for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2)
13063    if (SDValue Unpack = TryUnpack(ScalarSize, ScalarSize / OrigScalarSize))
13064      return Unpack;
13065
13066  // If we're shuffling with a zero vector then we're better off not doing
13067  // VECTOR_SHUFFLE(UNPCK()) as we lose track of those zero elements.
13068  if (ISD::isBuildVectorAllZeros(V1.getNode()) ||
13069      ISD::isBuildVectorAllZeros(V2.getNode()))
13070    return SDValue();
13071
13072  // If none of the unpack-rooted lowerings worked (or were profitable) try an
13073  // initial unpack.
13074  if (NumLoInputs == 0 || NumHiInputs == 0) {
13075    assert((NumLoInputs > 0 || NumHiInputs > 0) &&
13076           "We have to have *some* inputs!");
13077    int HalfOffset = NumLoInputs == 0 ? Size / 2 : 0;
13078
13079    // FIXME: We could consider the total complexity of the permute of each
13080    // possible unpacking. Or at the least we should consider how many
13081    // half-crossings are created.
13082    // FIXME: We could consider commuting the unpacks.
13083
13084    SmallVector<int, 32> PermMask((unsigned)Size, -1);
13085    for (int i = 0; i < Size; ++i) {
13086      if (Mask[i] < 0)
13087        continue;
13088
13089      assert(Mask[i] % Size >= HalfOffset && "Found input from wrong half!");
13090
13091      PermMask[i] =
13092          2 * ((Mask[i] % Size) - HalfOffset) + (Mask[i] < Size ? 0 : 1);
13093    }
13094    return DAG.getVectorShuffle(
13095        VT, DL, DAG.getNode(NumLoInputs == 0 ? X86ISD::UNPCKH : X86ISD::UNPCKL,
13096                            DL, VT, V1, V2),
13097        DAG.getUNDEF(VT), PermMask);
13098  }
13099
13100  return SDValue();
13101}
13102
13103/// Handle lowering of 2-lane 64-bit floating point shuffles.
13104///
13105/// This is the basis function for the 2-lane 64-bit shuffles as we have full
13106/// support for floating point shuffles but not integer shuffles. These
13107/// instructions will incur a domain crossing penalty on some chips though so
13108/// it is better to avoid lowering through this for integer vectors where
13109/// possible.
13110static SDValue lowerV2F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13111                                 const APInt &Zeroable, SDValue V1, SDValue V2,
13112                                 const X86Subtarget &Subtarget,
13113                                 SelectionDAG &DAG) {
13114  assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
13115  assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
13116  assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
13117
13118  if (V2.isUndef()) {
13119    // Check for being able to broadcast a single element.
13120    if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2f64, V1, V2,
13121                                                    Mask, Subtarget, DAG))
13122      return Broadcast;
13123
13124    // Straight shuffle of a single input vector. Simulate this by using the
13125    // single input as both of the "inputs" to this instruction..
13126    unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
13127
13128    if (Subtarget.hasAVX()) {
13129      // If we have AVX, we can use VPERMILPS which will allow folding a load
13130      // into the shuffle.
13131      return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
13132                         DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
13133    }
13134
13135    return DAG.getNode(
13136        X86ISD::SHUFP, DL, MVT::v2f64,
13137        Mask[0] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
13138        Mask[1] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
13139        DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
13140  }
13141  assert(Mask[0] >= 0 && "No undef lanes in multi-input v2 shuffles!");
13142  assert(Mask[1] >= 0 && "No undef lanes in multi-input v2 shuffles!");
13143  assert(Mask[0] < 2 && "We sort V1 to be the first input.");
13144  assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
13145
13146  if (Subtarget.hasAVX2())
13147    if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
13148      return Extract;
13149
13150  // When loading a scalar and then shuffling it into a vector we can often do
13151  // the insertion cheaply.
13152  if (SDValue Insertion = lowerShuffleAsElementInsertion(
13153          DL, MVT::v2f64, V1, V2, Mask, Zeroable, Subtarget, DAG))
13154    return Insertion;
13155  // Try inverting the insertion since for v2 masks it is easy to do and we
13156  // can't reliably sort the mask one way or the other.
13157  int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
13158                        Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
13159  if (SDValue Insertion = lowerShuffleAsElementInsertion(
13160          DL, MVT::v2f64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
13161    return Insertion;
13162
13163  // Try to use one of the special instruction patterns to handle two common
13164  // blend patterns if a zero-blend above didn't work.
13165  if (isShuffleEquivalent(V1, V2, Mask, {0, 3}) ||
13166      isShuffleEquivalent(V1, V2, Mask, {1, 3}))
13167    if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
13168      // We can either use a special instruction to load over the low double or
13169      // to move just the low double.
13170      return DAG.getNode(
13171          X86ISD::MOVSD, DL, MVT::v2f64, V2,
13172          DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
13173
13174  if (Subtarget.hasSSE41())
13175    if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
13176                                            Zeroable, Subtarget, DAG))
13177      return Blend;
13178
13179  // Use dedicated unpack instructions for masks that match their pattern.
13180  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2f64, Mask, V1, V2, DAG))
13181    return V;
13182
13183  unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
13184  return DAG.getNode(X86ISD::SHUFP, DL, MVT::v2f64, V1, V2,
13185                     DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
13186}
13187
13188/// Handle lowering of 2-lane 64-bit integer shuffles.
13189///
13190/// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
13191/// the integer unit to minimize domain crossing penalties. However, for blends
13192/// it falls back to the floating point shuffle operation with appropriate bit
13193/// casting.
13194static SDValue lowerV2I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13195                                 const APInt &Zeroable, SDValue V1, SDValue V2,
13196                                 const X86Subtarget &Subtarget,
13197                                 SelectionDAG &DAG) {
13198  assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
13199  assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
13200  assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
13201
13202  if (V2.isUndef()) {
13203    // Check for being able to broadcast a single element.
13204    if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2i64, V1, V2,
13205                                                    Mask, Subtarget, DAG))
13206      return Broadcast;
13207
13208    // Straight shuffle of a single input vector. For everything from SSE2
13209    // onward this has a single fast instruction with no scary immediates.
13210    // We have to map the mask as it is actually a v4i32 shuffle instruction.
13211    V1 = DAG.getBitcast(MVT::v4i32, V1);
13212    int WidenedMask[4] = {
13213        std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
13214        std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
13215    return DAG.getBitcast(
13216        MVT::v2i64,
13217        DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
13218                    getV4X86ShuffleImm8ForMask(WidenedMask, DL, DAG)));
13219  }
13220  assert(Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!");
13221  assert(Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!");
13222  assert(Mask[0] < 2 && "We sort V1 to be the first input.");
13223  assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
13224
13225  if (Subtarget.hasAVX2())
13226    if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
13227      return Extract;
13228
13229  // Try to use shift instructions.
13230  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask,
13231                                          Zeroable, Subtarget, DAG))
13232    return Shift;
13233
13234  // When loading a scalar and then shuffling it into a vector we can often do
13235  // the insertion cheaply.
13236  if (SDValue Insertion = lowerShuffleAsElementInsertion(
13237          DL, MVT::v2i64, V1, V2, Mask, Zeroable, Subtarget, DAG))
13238    return Insertion;
13239  // Try inverting the insertion since for v2 masks it is easy to do and we
13240  // can't reliably sort the mask one way or the other.
13241  int InverseMask[2] = {Mask[0] ^ 2, Mask[1] ^ 2};
13242  if (SDValue Insertion = lowerShuffleAsElementInsertion(
13243          DL, MVT::v2i64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
13244    return Insertion;
13245
13246  // We have different paths for blend lowering, but they all must use the
13247  // *exact* same predicate.
13248  bool IsBlendSupported = Subtarget.hasSSE41();
13249  if (IsBlendSupported)
13250    if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
13251                                            Zeroable, Subtarget, DAG))
13252      return Blend;
13253
13254  // Use dedicated unpack instructions for masks that match their pattern.
13255  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2i64, Mask, V1, V2, DAG))
13256    return V;
13257
13258  // Try to use byte rotation instructions.
13259  // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
13260  if (Subtarget.hasSSSE3()) {
13261    if (Subtarget.hasVLX())
13262      if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v2i64, V1, V2, Mask,
13263                                                Subtarget, DAG))
13264        return Rotate;
13265
13266    if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v2i64, V1, V2, Mask,
13267                                                  Subtarget, DAG))
13268      return Rotate;
13269  }
13270
13271  // If we have direct support for blends, we should lower by decomposing into
13272  // a permute. That will be faster than the domain cross.
13273  if (IsBlendSupported)
13274    return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v2i64, V1, V2, Mask,
13275                                                Subtarget, DAG);
13276
13277  // We implement this with SHUFPD which is pretty lame because it will likely
13278  // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
13279  // However, all the alternatives are still more cycles and newer chips don't
13280  // have this problem. It would be really nice if x86 had better shuffles here.
13281  V1 = DAG.getBitcast(MVT::v2f64, V1);
13282  V2 = DAG.getBitcast(MVT::v2f64, V2);
13283  return DAG.getBitcast(MVT::v2i64,
13284                        DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
13285}
13286
13287/// Lower a vector shuffle using the SHUFPS instruction.
13288///
13289/// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
13290/// It makes no assumptions about whether this is the *best* lowering, it simply
13291/// uses it.
13292static SDValue lowerShuffleWithSHUFPS(const SDLoc &DL, MVT VT,
13293                                      ArrayRef<int> Mask, SDValue V1,
13294                                      SDValue V2, SelectionDAG &DAG) {
13295  SDValue LowV = V1, HighV = V2;
13296  int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
13297
13298  int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
13299
13300  if (NumV2Elements == 1) {
13301    int V2Index = find_if(Mask, [](int M) { return M >= 4; }) - Mask.begin();
13302
13303    // Compute the index adjacent to V2Index and in the same half by toggling
13304    // the low bit.
13305    int V2AdjIndex = V2Index ^ 1;
13306
13307    if (Mask[V2AdjIndex] < 0) {
13308      // Handles all the cases where we have a single V2 element and an undef.
13309      // This will only ever happen in the high lanes because we commute the
13310      // vector otherwise.
13311      if (V2Index < 2)
13312        std::swap(LowV, HighV);
13313      NewMask[V2Index] -= 4;
13314    } else {
13315      // Handle the case where the V2 element ends up adjacent to a V1 element.
13316      // To make this work, blend them together as the first step.
13317      int V1Index = V2AdjIndex;
13318      int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
13319      V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
13320                       getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
13321
13322      // Now proceed to reconstruct the final blend as we have the necessary
13323      // high or low half formed.
13324      if (V2Index < 2) {
13325        LowV = V2;
13326        HighV = V1;
13327      } else {
13328        HighV = V2;
13329      }
13330      NewMask[V1Index] = 2; // We put the V1 element in V2[2].
13331      NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
13332    }
13333  } else if (NumV2Elements == 2) {
13334    if (Mask[0] < 4 && Mask[1] < 4) {
13335      // Handle the easy case where we have V1 in the low lanes and V2 in the
13336      // high lanes.
13337      NewMask[2] -= 4;
13338      NewMask[3] -= 4;
13339    } else if (Mask[2] < 4 && Mask[3] < 4) {
13340      // We also handle the reversed case because this utility may get called
13341      // when we detect a SHUFPS pattern but can't easily commute the shuffle to
13342      // arrange things in the right direction.
13343      NewMask[0] -= 4;
13344      NewMask[1] -= 4;
13345      HighV = V1;
13346      LowV = V2;
13347    } else {
13348      // We have a mixture of V1 and V2 in both low and high lanes. Rather than
13349      // trying to place elements directly, just blend them and set up the final
13350      // shuffle to place them.
13351
13352      // The first two blend mask elements are for V1, the second two are for
13353      // V2.
13354      int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
13355                          Mask[2] < 4 ? Mask[2] : Mask[3],
13356                          (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
13357                          (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
13358      V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
13359                       getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
13360
13361      // Now we do a normal shuffle of V1 by giving V1 as both operands to
13362      // a blend.
13363      LowV = HighV = V1;
13364      NewMask[0] = Mask[0] < 4 ? 0 : 2;
13365      NewMask[1] = Mask[0] < 4 ? 2 : 0;
13366      NewMask[2] = Mask[2] < 4 ? 1 : 3;
13367      NewMask[3] = Mask[2] < 4 ? 3 : 1;
13368    }
13369  }
13370  return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
13371                     getV4X86ShuffleImm8ForMask(NewMask, DL, DAG));
13372}
13373
13374/// Lower 4-lane 32-bit floating point shuffles.
13375///
13376/// Uses instructions exclusively from the floating point unit to minimize
13377/// domain crossing penalties, as these are sufficient to implement all v4f32
13378/// shuffles.
13379static SDValue lowerV4F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13380                                 const APInt &Zeroable, SDValue V1, SDValue V2,
13381                                 const X86Subtarget &Subtarget,
13382                                 SelectionDAG &DAG) {
13383  assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
13384  assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
13385  assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
13386
13387  int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
13388
13389  if (NumV2Elements == 0) {
13390    // Check for being able to broadcast a single element.
13391    if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f32, V1, V2,
13392                                                    Mask, Subtarget, DAG))
13393      return Broadcast;
13394
13395    // Use even/odd duplicate instructions for masks that match their pattern.
13396    if (Subtarget.hasSSE3()) {
13397      if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
13398        return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
13399      if (isShuffleEquivalent(V1, V2, Mask, {1, 1, 3, 3}))
13400        return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
13401    }
13402
13403    if (Subtarget.hasAVX()) {
13404      // If we have AVX, we can use VPERMILPS which will allow folding a load
13405      // into the shuffle.
13406      return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
13407                         getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
13408    }
13409
13410    // Use MOVLHPS/MOVHLPS to simulate unary shuffles. These are only valid
13411    // in SSE1 because otherwise they are widened to v2f64 and never get here.
13412    if (!Subtarget.hasSSE2()) {
13413      if (isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1}))
13414        return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V1);
13415      if (isShuffleEquivalent(V1, V2, Mask, {2, 3, 2, 3}))
13416        return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V1, V1);
13417    }
13418
13419    // Otherwise, use a straight shuffle of a single input vector. We pass the
13420    // input vector to both operands to simulate this with a SHUFPS.
13421    return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
13422                       getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
13423  }
13424
13425  if (Subtarget.hasAVX2())
13426    if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
13427      return Extract;
13428
13429  // There are special ways we can lower some single-element blends. However, we
13430  // have custom ways we can lower more complex single-element blends below that
13431  // we defer to if both this and BLENDPS fail to match, so restrict this to
13432  // when the V2 input is targeting element 0 of the mask -- that is the fast
13433  // case here.
13434  if (NumV2Elements == 1 && Mask[0] >= 4)
13435    if (SDValue V = lowerShuffleAsElementInsertion(
13436            DL, MVT::v4f32, V1, V2, Mask, Zeroable, Subtarget, DAG))
13437      return V;
13438
13439  if (Subtarget.hasSSE41()) {
13440    if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
13441                                            Zeroable, Subtarget, DAG))
13442      return Blend;
13443
13444    // Use INSERTPS if we can complete the shuffle efficiently.
13445    if (SDValue V = lowerShuffleAsInsertPS(DL, V1, V2, Mask, Zeroable, DAG))
13446      return V;
13447
13448    if (!isSingleSHUFPSMask(Mask))
13449      if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, MVT::v4f32, V1,
13450                                                            V2, Mask, DAG))
13451        return BlendPerm;
13452  }
13453
13454  // Use low/high mov instructions. These are only valid in SSE1 because
13455  // otherwise they are widened to v2f64 and never get here.
13456  if (!Subtarget.hasSSE2()) {
13457    if (isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5}))
13458      return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V2);
13459    if (isShuffleEquivalent(V1, V2, Mask, {2, 3, 6, 7}))
13460      return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V2, V1);
13461  }
13462
13463  // Use dedicated unpack instructions for masks that match their pattern.
13464  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f32, Mask, V1, V2, DAG))
13465    return V;
13466
13467  // Otherwise fall back to a SHUFPS lowering strategy.
13468  return lowerShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
13469}
13470
13471/// Lower 4-lane i32 vector shuffles.
13472///
13473/// We try to handle these with integer-domain shuffles where we can, but for
13474/// blends we use the floating point domain blend instructions.
13475static SDValue lowerV4I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13476                                 const APInt &Zeroable, SDValue V1, SDValue V2,
13477                                 const X86Subtarget &Subtarget,
13478                                 SelectionDAG &DAG) {
13479  assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
13480  assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
13481  assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
13482
13483  // Whenever we can lower this as a zext, that instruction is strictly faster
13484  // than any alternative. It also allows us to fold memory operands into the
13485  // shuffle in many cases.
13486  if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2, Mask,
13487                                                   Zeroable, Subtarget, DAG))
13488    return ZExt;
13489
13490  int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
13491
13492  if (NumV2Elements == 0) {
13493    // Try to use broadcast unless the mask only has one non-undef element.
13494    if (count_if(Mask, [](int M) { return M >= 0 && M < 4; }) > 1) {
13495      if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i32, V1, V2,
13496                                                      Mask, Subtarget, DAG))
13497        return Broadcast;
13498    }
13499
13500    // Straight shuffle of a single input vector. For everything from SSE2
13501    // onward this has a single fast instruction with no scary immediates.
13502    // We coerce the shuffle pattern to be compatible with UNPCK instructions
13503    // but we aren't actually going to use the UNPCK instruction because doing
13504    // so prevents folding a load into this instruction or making a copy.
13505    const int UnpackLoMask[] = {0, 0, 1, 1};
13506    const int UnpackHiMask[] = {2, 2, 3, 3};
13507    if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 1, 1}))
13508      Mask = UnpackLoMask;
13509    else if (isShuffleEquivalent(V1, V2, Mask, {2, 2, 3, 3}))
13510      Mask = UnpackHiMask;
13511
13512    return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
13513                       getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
13514  }
13515
13516  if (Subtarget.hasAVX2())
13517    if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
13518      return Extract;
13519
13520  // Try to use shift instructions.
13521  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask,
13522                                          Zeroable, Subtarget, DAG))
13523    return Shift;
13524
13525  // There are special ways we can lower some single-element blends.
13526  if (NumV2Elements == 1)
13527    if (SDValue V = lowerShuffleAsElementInsertion(
13528            DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
13529      return V;
13530
13531  // We have different paths for blend lowering, but they all must use the
13532  // *exact* same predicate.
13533  bool IsBlendSupported = Subtarget.hasSSE41();
13534  if (IsBlendSupported)
13535    if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
13536                                            Zeroable, Subtarget, DAG))
13537      return Blend;
13538
13539  if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask,
13540                                             Zeroable, Subtarget, DAG))
13541    return Masked;
13542
13543  // Use dedicated unpack instructions for masks that match their pattern.
13544  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i32, Mask, V1, V2, DAG))
13545    return V;
13546
13547  // Try to use byte rotation instructions.
13548  // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
13549  if (Subtarget.hasSSSE3()) {
13550    if (Subtarget.hasVLX())
13551      if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v4i32, V1, V2, Mask,
13552                                                Subtarget, DAG))
13553        return Rotate;
13554
13555    if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i32, V1, V2, Mask,
13556                                                  Subtarget, DAG))
13557      return Rotate;
13558  }
13559
13560  // Assume that a single SHUFPS is faster than an alternative sequence of
13561  // multiple instructions (even if the CPU has a domain penalty).
13562  // If some CPU is harmed by the domain switch, we can fix it in a later pass.
13563  if (!isSingleSHUFPSMask(Mask)) {
13564    // If we have direct support for blends, we should lower by decomposing into
13565    // a permute. That will be faster than the domain cross.
13566    if (IsBlendSupported)
13567      return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4i32, V1, V2, Mask,
13568                                                  Subtarget, DAG);
13569
13570    // Try to lower by permuting the inputs into an unpack instruction.
13571    if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v4i32, V1, V2,
13572                                                        Mask, Subtarget, DAG))
13573      return Unpack;
13574  }
13575
13576  // We implement this with SHUFPS because it can blend from two vectors.
13577  // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
13578  // up the inputs, bypassing domain shift penalties that we would incur if we
13579  // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
13580  // relevant.
13581  SDValue CastV1 = DAG.getBitcast(MVT::v4f32, V1);
13582  SDValue CastV2 = DAG.getBitcast(MVT::v4f32, V2);
13583  SDValue ShufPS = DAG.getVectorShuffle(MVT::v4f32, DL, CastV1, CastV2, Mask);
13584  return DAG.getBitcast(MVT::v4i32, ShufPS);
13585}
13586
13587/// Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
13588/// shuffle lowering, and the most complex part.
13589///
13590/// The lowering strategy is to try to form pairs of input lanes which are
13591/// targeted at the same half of the final vector, and then use a dword shuffle
13592/// to place them onto the right half, and finally unpack the paired lanes into
13593/// their final position.
13594///
13595/// The exact breakdown of how to form these dword pairs and align them on the
13596/// correct sides is really tricky. See the comments within the function for
13597/// more of the details.
13598///
13599/// This code also handles repeated 128-bit lanes of v8i16 shuffles, but each
13600/// lane must shuffle the *exact* same way. In fact, you must pass a v8 Mask to
13601/// this routine for it to work correctly. To shuffle a 256-bit or 512-bit i16
13602/// vector, form the analogous 128-bit 8-element Mask.
13603static SDValue lowerV8I16GeneralSingleInputShuffle(
13604    const SDLoc &DL, MVT VT, SDValue V, MutableArrayRef<int> Mask,
13605    const X86Subtarget &Subtarget, SelectionDAG &DAG) {
13606  assert(VT.getVectorElementType() == MVT::i16 && "Bad input type!");
13607  MVT PSHUFDVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
13608
13609  assert(Mask.size() == 8 && "Shuffle mask length doesn't match!");
13610  MutableArrayRef<int> LoMask = Mask.slice(0, 4);
13611  MutableArrayRef<int> HiMask = Mask.slice(4, 4);
13612
13613  // Attempt to directly match PSHUFLW or PSHUFHW.
13614  if (isUndefOrInRange(LoMask, 0, 4) &&
13615      isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
13616    return DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
13617                       getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
13618  }
13619  if (isUndefOrInRange(HiMask, 4, 8) &&
13620      isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
13621    for (int i = 0; i != 4; ++i)
13622      HiMask[i] = (HiMask[i] < 0 ? HiMask[i] : (HiMask[i] - 4));
13623    return DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
13624                       getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
13625  }
13626
13627  SmallVector<int, 4> LoInputs;
13628  copy_if(LoMask, std::back_inserter(LoInputs), [](int M) { return M >= 0; });
13629  array_pod_sort(LoInputs.begin(), LoInputs.end());
13630  LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
13631  SmallVector<int, 4> HiInputs;
13632  copy_if(HiMask, std::back_inserter(HiInputs), [](int M) { return M >= 0; });
13633  array_pod_sort(HiInputs.begin(), HiInputs.end());
13634  HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
13635  int NumLToL = llvm::lower_bound(LoInputs, 4) - LoInputs.begin();
13636  int NumHToL = LoInputs.size() - NumLToL;
13637  int NumLToH = llvm::lower_bound(HiInputs, 4) - HiInputs.begin();
13638  int NumHToH = HiInputs.size() - NumLToH;
13639  MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
13640  MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
13641  MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
13642  MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
13643
13644  // If we are shuffling values from one half - check how many different DWORD
13645  // pairs we need to create. If only 1 or 2 then we can perform this as a
13646  // PSHUFLW/PSHUFHW + PSHUFD instead of the PSHUFD+PSHUFLW+PSHUFHW chain below.
13647  auto ShuffleDWordPairs = [&](ArrayRef<int> PSHUFHalfMask,
13648                               ArrayRef<int> PSHUFDMask, unsigned ShufWOp) {
13649    V = DAG.getNode(ShufWOp, DL, VT, V,
13650                    getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
13651    V = DAG.getBitcast(PSHUFDVT, V);
13652    V = DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, V,
13653                    getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
13654    return DAG.getBitcast(VT, V);
13655  };
13656
13657  if ((NumHToL + NumHToH) == 0 || (NumLToL + NumLToH) == 0) {
13658    int PSHUFDMask[4] = { -1, -1, -1, -1 };
13659    SmallVector<std::pair<int, int>, 4> DWordPairs;
13660    int DOffset = ((NumHToL + NumHToH) == 0 ? 0 : 2);
13661
13662    // Collect the different DWORD pairs.
13663    for (int DWord = 0; DWord != 4; ++DWord) {
13664      int M0 = Mask[2 * DWord + 0];
13665      int M1 = Mask[2 * DWord + 1];
13666      M0 = (M0 >= 0 ? M0 % 4 : M0);
13667      M1 = (M1 >= 0 ? M1 % 4 : M1);
13668      if (M0 < 0 && M1 < 0)
13669        continue;
13670
13671      bool Match = false;
13672      for (int j = 0, e = DWordPairs.size(); j < e; ++j) {
13673        auto &DWordPair = DWordPairs[j];
13674        if ((M0 < 0 || isUndefOrEqual(DWordPair.first, M0)) &&
13675            (M1 < 0 || isUndefOrEqual(DWordPair.second, M1))) {
13676          DWordPair.first = (M0 >= 0 ? M0 : DWordPair.first);
13677          DWordPair.second = (M1 >= 0 ? M1 : DWordPair.second);
13678          PSHUFDMask[DWord] = DOffset + j;
13679          Match = true;
13680          break;
13681        }
13682      }
13683      if (!Match) {
13684        PSHUFDMask[DWord] = DOffset + DWordPairs.size();
13685        DWordPairs.push_back(std::make_pair(M0, M1));
13686      }
13687    }
13688
13689    if (DWordPairs.size() <= 2) {
13690      DWordPairs.resize(2, std::make_pair(-1, -1));
13691      int PSHUFHalfMask[4] = {DWordPairs[0].first, DWordPairs[0].second,
13692                              DWordPairs[1].first, DWordPairs[1].second};
13693      if ((NumHToL + NumHToH) == 0)
13694        return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFLW);
13695      if ((NumLToL + NumLToH) == 0)
13696        return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFHW);
13697    }
13698  }
13699
13700  // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
13701  // such inputs we can swap two of the dwords across the half mark and end up
13702  // with <=2 inputs to each half in each half. Once there, we can fall through
13703  // to the generic code below. For example:
13704  //
13705  // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
13706  // Mask:  [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
13707  //
13708  // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
13709  // and an existing 2-into-2 on the other half. In this case we may have to
13710  // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
13711  // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
13712  // Fortunately, we don't have to handle anything but a 2-into-2 pattern
13713  // because any other situation (including a 3-into-1 or 1-into-3 in the other
13714  // half than the one we target for fixing) will be fixed when we re-enter this
13715  // path. We will also combine away any sequence of PSHUFD instructions that
13716  // result into a single instruction. Here is an example of the tricky case:
13717  //
13718  // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
13719  // Mask:  [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
13720  //
13721  // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
13722  //
13723  // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
13724  // Mask:  [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
13725  //
13726  // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
13727  // Mask:  [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
13728  //
13729  // The result is fine to be handled by the generic logic.
13730  auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
13731                          ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
13732                          int AOffset, int BOffset) {
13733    assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
13734           "Must call this with A having 3 or 1 inputs from the A half.");
13735    assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
13736           "Must call this with B having 1 or 3 inputs from the B half.");
13737    assert(AToAInputs.size() + BToAInputs.size() == 4 &&
13738           "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
13739
13740    bool ThreeAInputs = AToAInputs.size() == 3;
13741
13742    // Compute the index of dword with only one word among the three inputs in
13743    // a half by taking the sum of the half with three inputs and subtracting
13744    // the sum of the actual three inputs. The difference is the remaining
13745    // slot.
13746    int ADWord = 0, BDWord = 0;
13747    int &TripleDWord = ThreeAInputs ? ADWord : BDWord;
13748    int &OneInputDWord = ThreeAInputs ? BDWord : ADWord;
13749    int TripleInputOffset = ThreeAInputs ? AOffset : BOffset;
13750    ArrayRef<int> TripleInputs = ThreeAInputs ? AToAInputs : BToAInputs;
13751    int OneInput = ThreeAInputs ? BToAInputs[0] : AToAInputs[0];
13752    int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
13753    int TripleNonInputIdx =
13754        TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
13755    TripleDWord = TripleNonInputIdx / 2;
13756
13757    // We use xor with one to compute the adjacent DWord to whichever one the
13758    // OneInput is in.
13759    OneInputDWord = (OneInput / 2) ^ 1;
13760
13761    // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
13762    // and BToA inputs. If there is also such a problem with the BToB and AToB
13763    // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
13764    // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
13765    // is essential that we don't *create* a 3<-1 as then we might oscillate.
13766    if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
13767      // Compute how many inputs will be flipped by swapping these DWords. We
13768      // need
13769      // to balance this to ensure we don't form a 3-1 shuffle in the other
13770      // half.
13771      int NumFlippedAToBInputs =
13772          std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
13773          std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
13774      int NumFlippedBToBInputs =
13775          std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
13776          std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
13777      if ((NumFlippedAToBInputs == 1 &&
13778           (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
13779          (NumFlippedBToBInputs == 1 &&
13780           (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
13781        // We choose whether to fix the A half or B half based on whether that
13782        // half has zero flipped inputs. At zero, we may not be able to fix it
13783        // with that half. We also bias towards fixing the B half because that
13784        // will more commonly be the high half, and we have to bias one way.
13785        auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
13786                                                       ArrayRef<int> Inputs) {
13787          int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
13788          bool IsFixIdxInput = is_contained(Inputs, PinnedIdx ^ 1);
13789          // Determine whether the free index is in the flipped dword or the
13790          // unflipped dword based on where the pinned index is. We use this bit
13791          // in an xor to conditionally select the adjacent dword.
13792          int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
13793          bool IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
13794          if (IsFixIdxInput == IsFixFreeIdxInput)
13795            FixFreeIdx += 1;
13796          IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
13797          assert(IsFixIdxInput != IsFixFreeIdxInput &&
13798                 "We need to be changing the number of flipped inputs!");
13799          int PSHUFHalfMask[] = {0, 1, 2, 3};
13800          std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
13801          V = DAG.getNode(
13802              FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
13803              MVT::getVectorVT(MVT::i16, V.getValueSizeInBits() / 16), V,
13804              getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
13805
13806          for (int &M : Mask)
13807            if (M >= 0 && M == FixIdx)
13808              M = FixFreeIdx;
13809            else if (M >= 0 && M == FixFreeIdx)
13810              M = FixIdx;
13811        };
13812        if (NumFlippedBToBInputs != 0) {
13813          int BPinnedIdx =
13814              BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
13815          FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
13816        } else {
13817          assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
13818          int APinnedIdx = ThreeAInputs ? TripleNonInputIdx : OneInput;
13819          FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
13820        }
13821      }
13822    }
13823
13824    int PSHUFDMask[] = {0, 1, 2, 3};
13825    PSHUFDMask[ADWord] = BDWord;
13826    PSHUFDMask[BDWord] = ADWord;
13827    V = DAG.getBitcast(
13828        VT,
13829        DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
13830                    getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
13831
13832    // Adjust the mask to match the new locations of A and B.
13833    for (int &M : Mask)
13834      if (M >= 0 && M/2 == ADWord)
13835        M = 2 * BDWord + M % 2;
13836      else if (M >= 0 && M/2 == BDWord)
13837        M = 2 * ADWord + M % 2;
13838
13839    // Recurse back into this routine to re-compute state now that this isn't
13840    // a 3 and 1 problem.
13841    return lowerV8I16GeneralSingleInputShuffle(DL, VT, V, Mask, Subtarget, DAG);
13842  };
13843  if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
13844    return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
13845  if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
13846    return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
13847
13848  // At this point there are at most two inputs to the low and high halves from
13849  // each half. That means the inputs can always be grouped into dwords and
13850  // those dwords can then be moved to the correct half with a dword shuffle.
13851  // We use at most one low and one high word shuffle to collect these paired
13852  // inputs into dwords, and finally a dword shuffle to place them.
13853  int PSHUFLMask[4] = {-1, -1, -1, -1};
13854  int PSHUFHMask[4] = {-1, -1, -1, -1};
13855  int PSHUFDMask[4] = {-1, -1, -1, -1};
13856
13857  // First fix the masks for all the inputs that are staying in their
13858  // original halves. This will then dictate the targets of the cross-half
13859  // shuffles.
13860  auto fixInPlaceInputs =
13861      [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
13862                    MutableArrayRef<int> SourceHalfMask,
13863                    MutableArrayRef<int> HalfMask, int HalfOffset) {
13864    if (InPlaceInputs.empty())
13865      return;
13866    if (InPlaceInputs.size() == 1) {
13867      SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
13868          InPlaceInputs[0] - HalfOffset;
13869      PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
13870      return;
13871    }
13872    if (IncomingInputs.empty()) {
13873      // Just fix all of the in place inputs.
13874      for (int Input : InPlaceInputs) {
13875        SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
13876        PSHUFDMask[Input / 2] = Input / 2;
13877      }
13878      return;
13879    }
13880
13881    assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
13882    SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
13883        InPlaceInputs[0] - HalfOffset;
13884    // Put the second input next to the first so that they are packed into
13885    // a dword. We find the adjacent index by toggling the low bit.
13886    int AdjIndex = InPlaceInputs[0] ^ 1;
13887    SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
13888    std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
13889    PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
13890  };
13891  fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
13892  fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
13893
13894  // Now gather the cross-half inputs and place them into a free dword of
13895  // their target half.
13896  // FIXME: This operation could almost certainly be simplified dramatically to
13897  // look more like the 3-1 fixing operation.
13898  auto moveInputsToRightHalf = [&PSHUFDMask](
13899      MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
13900      MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
13901      MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
13902      int DestOffset) {
13903    auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
13904      return SourceHalfMask[Word] >= 0 && SourceHalfMask[Word] != Word;
13905    };
13906    auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
13907                                               int Word) {
13908      int LowWord = Word & ~1;
13909      int HighWord = Word | 1;
13910      return isWordClobbered(SourceHalfMask, LowWord) ||
13911             isWordClobbered(SourceHalfMask, HighWord);
13912    };
13913
13914    if (IncomingInputs.empty())
13915      return;
13916
13917    if (ExistingInputs.empty()) {
13918      // Map any dwords with inputs from them into the right half.
13919      for (int Input : IncomingInputs) {
13920        // If the source half mask maps over the inputs, turn those into
13921        // swaps and use the swapped lane.
13922        if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
13923          if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] < 0) {
13924            SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
13925                Input - SourceOffset;
13926            // We have to swap the uses in our half mask in one sweep.
13927            for (int &M : HalfMask)
13928              if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
13929                M = Input;
13930              else if (M == Input)
13931                M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
13932          } else {
13933            assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
13934                       Input - SourceOffset &&
13935                   "Previous placement doesn't match!");
13936          }
13937          // Note that this correctly re-maps both when we do a swap and when
13938          // we observe the other side of the swap above. We rely on that to
13939          // avoid swapping the members of the input list directly.
13940          Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
13941        }
13942
13943        // Map the input's dword into the correct half.
13944        if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] < 0)
13945          PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
13946        else
13947          assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
13948                     Input / 2 &&
13949                 "Previous placement doesn't match!");
13950      }
13951
13952      // And just directly shift any other-half mask elements to be same-half
13953      // as we will have mirrored the dword containing the element into the
13954      // same position within that half.
13955      for (int &M : HalfMask)
13956        if (M >= SourceOffset && M < SourceOffset + 4) {
13957          M = M - SourceOffset + DestOffset;
13958          assert(M >= 0 && "This should never wrap below zero!");
13959        }
13960      return;
13961    }
13962
13963    // Ensure we have the input in a viable dword of its current half. This
13964    // is particularly tricky because the original position may be clobbered
13965    // by inputs being moved and *staying* in that half.
13966    if (IncomingInputs.size() == 1) {
13967      if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
13968        int InputFixed = find(SourceHalfMask, -1) - std::begin(SourceHalfMask) +
13969                         SourceOffset;
13970        SourceHalfMask[InputFixed - SourceOffset] =
13971            IncomingInputs[0] - SourceOffset;
13972        std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
13973                     InputFixed);
13974        IncomingInputs[0] = InputFixed;
13975      }
13976    } else if (IncomingInputs.size() == 2) {
13977      if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
13978          isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
13979        // We have two non-adjacent or clobbered inputs we need to extract from
13980        // the source half. To do this, we need to map them into some adjacent
13981        // dword slot in the source mask.
13982        int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
13983                              IncomingInputs[1] - SourceOffset};
13984
13985        // If there is a free slot in the source half mask adjacent to one of
13986        // the inputs, place the other input in it. We use (Index XOR 1) to
13987        // compute an adjacent index.
13988        if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
13989            SourceHalfMask[InputsFixed[0] ^ 1] < 0) {
13990          SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
13991          SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
13992          InputsFixed[1] = InputsFixed[0] ^ 1;
13993        } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
13994                   SourceHalfMask[InputsFixed[1] ^ 1] < 0) {
13995          SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
13996          SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
13997          InputsFixed[0] = InputsFixed[1] ^ 1;
13998        } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] < 0 &&
13999                   SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] < 0) {
14000          // The two inputs are in the same DWord but it is clobbered and the
14001          // adjacent DWord isn't used at all. Move both inputs to the free
14002          // slot.
14003          SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
14004          SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
14005          InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
14006          InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
14007        } else {
14008          // The only way we hit this point is if there is no clobbering
14009          // (because there are no off-half inputs to this half) and there is no
14010          // free slot adjacent to one of the inputs. In this case, we have to
14011          // swap an input with a non-input.
14012          for (int i = 0; i < 4; ++i)
14013            assert((SourceHalfMask[i] < 0 || SourceHalfMask[i] == i) &&
14014                   "We can't handle any clobbers here!");
14015          assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
14016                 "Cannot have adjacent inputs here!");
14017
14018          SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
14019          SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
14020
14021          // We also have to update the final source mask in this case because
14022          // it may need to undo the above swap.
14023          for (int &M : FinalSourceHalfMask)
14024            if (M == (InputsFixed[0] ^ 1) + SourceOffset)
14025              M = InputsFixed[1] + SourceOffset;
14026            else if (M == InputsFixed[1] + SourceOffset)
14027              M = (InputsFixed[0] ^ 1) + SourceOffset;
14028
14029          InputsFixed[1] = InputsFixed[0] ^ 1;
14030        }
14031
14032        // Point everything at the fixed inputs.
14033        for (int &M : HalfMask)
14034          if (M == IncomingInputs[0])
14035            M = InputsFixed[0] + SourceOffset;
14036          else if (M == IncomingInputs[1])
14037            M = InputsFixed[1] + SourceOffset;
14038
14039        IncomingInputs[0] = InputsFixed[0] + SourceOffset;
14040        IncomingInputs[1] = InputsFixed[1] + SourceOffset;
14041      }
14042    } else {
14043      llvm_unreachable("Unhandled input size!");
14044    }
14045
14046    // Now hoist the DWord down to the right half.
14047    int FreeDWord = (PSHUFDMask[DestOffset / 2] < 0 ? 0 : 1) + DestOffset / 2;
14048    assert(PSHUFDMask[FreeDWord] < 0 && "DWord not free");
14049    PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
14050    for (int &M : HalfMask)
14051      for (int Input : IncomingInputs)
14052        if (M == Input)
14053          M = FreeDWord * 2 + Input % 2;
14054  };
14055  moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
14056                        /*SourceOffset*/ 4, /*DestOffset*/ 0);
14057  moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
14058                        /*SourceOffset*/ 0, /*DestOffset*/ 4);
14059
14060  // Now enact all the shuffles we've computed to move the inputs into their
14061  // target half.
14062  if (!isNoopShuffleMask(PSHUFLMask))
14063    V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
14064                    getV4X86ShuffleImm8ForMask(PSHUFLMask, DL, DAG));
14065  if (!isNoopShuffleMask(PSHUFHMask))
14066    V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
14067                    getV4X86ShuffleImm8ForMask(PSHUFHMask, DL, DAG));
14068  if (!isNoopShuffleMask(PSHUFDMask))
14069    V = DAG.getBitcast(
14070        VT,
14071        DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
14072                    getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
14073
14074  // At this point, each half should contain all its inputs, and we can then
14075  // just shuffle them into their final position.
14076  assert(count_if(LoMask, [](int M) { return M >= 4; }) == 0 &&
14077         "Failed to lift all the high half inputs to the low mask!");
14078  assert(count_if(HiMask, [](int M) { return M >= 0 && M < 4; }) == 0 &&
14079         "Failed to lift all the low half inputs to the high mask!");
14080
14081  // Do a half shuffle for the low mask.
14082  if (!isNoopShuffleMask(LoMask))
14083    V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
14084                    getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
14085
14086  // Do a half shuffle with the high mask after shifting its values down.
14087  for (int &M : HiMask)
14088    if (M >= 0)
14089      M -= 4;
14090  if (!isNoopShuffleMask(HiMask))
14091    V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
14092                    getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
14093
14094  return V;
14095}
14096
14097/// Helper to form a PSHUFB-based shuffle+blend, opportunistically avoiding the
14098/// blend if only one input is used.
14099static SDValue lowerShuffleAsBlendOfPSHUFBs(
14100    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14101    const APInt &Zeroable, SelectionDAG &DAG, bool &V1InUse, bool &V2InUse) {
14102  assert(!is128BitLaneCrossingShuffleMask(VT, Mask) &&
14103         "Lane crossing shuffle masks not supported");
14104
14105  int NumBytes = VT.getSizeInBits() / 8;
14106  int Size = Mask.size();
14107  int Scale = NumBytes / Size;
14108
14109  SmallVector<SDValue, 64> V1Mask(NumBytes, DAG.getUNDEF(MVT::i8));
14110  SmallVector<SDValue, 64> V2Mask(NumBytes, DAG.getUNDEF(MVT::i8));
14111  V1InUse = false;
14112  V2InUse = false;
14113
14114  for (int i = 0; i < NumBytes; ++i) {
14115    int M = Mask[i / Scale];
14116    if (M < 0)
14117      continue;
14118
14119    const int ZeroMask = 0x80;
14120    int V1Idx = M < Size ? M * Scale + i % Scale : ZeroMask;
14121    int V2Idx = M < Size ? ZeroMask : (M - Size) * Scale + i % Scale;
14122    if (Zeroable[i / Scale])
14123      V1Idx = V2Idx = ZeroMask;
14124
14125    V1Mask[i] = DAG.getConstant(V1Idx, DL, MVT::i8);
14126    V2Mask[i] = DAG.getConstant(V2Idx, DL, MVT::i8);
14127    V1InUse |= (ZeroMask != V1Idx);
14128    V2InUse |= (ZeroMask != V2Idx);
14129  }
14130
14131  MVT ShufVT = MVT::getVectorVT(MVT::i8, NumBytes);
14132  if (V1InUse)
14133    V1 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V1),
14134                     DAG.getBuildVector(ShufVT, DL, V1Mask));
14135  if (V2InUse)
14136    V2 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V2),
14137                     DAG.getBuildVector(ShufVT, DL, V2Mask));
14138
14139  // If we need shuffled inputs from both, blend the two.
14140  SDValue V;
14141  if (V1InUse && V2InUse)
14142    V = DAG.getNode(ISD::OR, DL, ShufVT, V1, V2);
14143  else
14144    V = V1InUse ? V1 : V2;
14145
14146  // Cast the result back to the correct type.
14147  return DAG.getBitcast(VT, V);
14148}
14149
14150/// Generic lowering of 8-lane i16 shuffles.
14151///
14152/// This handles both single-input shuffles and combined shuffle/blends with
14153/// two inputs. The single input shuffles are immediately delegated to
14154/// a dedicated lowering routine.
14155///
14156/// The blends are lowered in one of three fundamental ways. If there are few
14157/// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
14158/// of the input is significantly cheaper when lowered as an interleaving of
14159/// the two inputs, try to interleave them. Otherwise, blend the low and high
14160/// halves of the inputs separately (making them have relatively few inputs)
14161/// and then concatenate them.
14162static SDValue lowerV8I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
14163                                 const APInt &Zeroable, SDValue V1, SDValue V2,
14164                                 const X86Subtarget &Subtarget,
14165                                 SelectionDAG &DAG) {
14166  assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
14167  assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
14168  assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
14169
14170  // Whenever we can lower this as a zext, that instruction is strictly faster
14171  // than any alternative.
14172  if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i16, V1, V2, Mask,
14173                                                   Zeroable, Subtarget, DAG))
14174    return ZExt;
14175
14176  int NumV2Inputs = count_if(Mask, [](int M) { return M >= 8; });
14177
14178  if (NumV2Inputs == 0) {
14179    // Try to use shift instructions.
14180    if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i16, V1, V1, Mask,
14181                                            Zeroable, Subtarget, DAG))
14182      return Shift;
14183
14184    // Check for being able to broadcast a single element.
14185    if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i16, V1, V2,
14186                                                    Mask, Subtarget, DAG))
14187      return Broadcast;
14188
14189    // Use dedicated unpack instructions for masks that match their pattern.
14190    if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
14191      return V;
14192
14193    // Use dedicated pack instructions for masks that match their pattern.
14194    if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
14195                                         Subtarget))
14196      return V;
14197
14198    // Try to use byte rotation instructions.
14199    if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V1, Mask,
14200                                                  Subtarget, DAG))
14201      return Rotate;
14202
14203    // Make a copy of the mask so it can be modified.
14204    SmallVector<int, 8> MutableMask(Mask.begin(), Mask.end());
14205    return lowerV8I16GeneralSingleInputShuffle(DL, MVT::v8i16, V1, MutableMask,
14206                                               Subtarget, DAG);
14207  }
14208
14209  assert(llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) &&
14210         "All single-input shuffles should be canonicalized to be V1-input "
14211         "shuffles.");
14212
14213  // Try to use shift instructions.
14214  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask,
14215                                          Zeroable, Subtarget, DAG))
14216    return Shift;
14217
14218  // See if we can use SSE4A Extraction / Insertion.
14219  if (Subtarget.hasSSE4A())
14220    if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v8i16, V1, V2, Mask,
14221                                          Zeroable, DAG))
14222      return V;
14223
14224  // There are special ways we can lower some single-element blends.
14225  if (NumV2Inputs == 1)
14226    if (SDValue V = lowerShuffleAsElementInsertion(
14227            DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
14228      return V;
14229
14230  // We have different paths for blend lowering, but they all must use the
14231  // *exact* same predicate.
14232  bool IsBlendSupported = Subtarget.hasSSE41();
14233  if (IsBlendSupported)
14234    if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
14235                                            Zeroable, Subtarget, DAG))
14236      return Blend;
14237
14238  if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask,
14239                                             Zeroable, Subtarget, DAG))
14240    return Masked;
14241
14242  // Use dedicated unpack instructions for masks that match their pattern.
14243  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
14244    return V;
14245
14246  // Use dedicated pack instructions for masks that match their pattern.
14247  if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
14248                                       Subtarget))
14249    return V;
14250
14251  // Try to use byte rotation instructions.
14252  if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V2, Mask,
14253                                                Subtarget, DAG))
14254    return Rotate;
14255
14256  if (SDValue BitBlend =
14257          lowerShuffleAsBitBlend(DL, MVT::v8i16, V1, V2, Mask, DAG))
14258    return BitBlend;
14259
14260  // Try to use byte shift instructions to mask.
14261  if (SDValue V = lowerShuffleAsByteShiftMask(DL, MVT::v8i16, V1, V2, Mask,
14262                                              Zeroable, Subtarget, DAG))
14263    return V;
14264
14265  // Try to lower by permuting the inputs into an unpack instruction.
14266  if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v8i16, V1, V2,
14267                                                      Mask, Subtarget, DAG))
14268    return Unpack;
14269
14270  // If we can't directly blend but can use PSHUFB, that will be better as it
14271  // can both shuffle and set up the inefficient blend.
14272  if (!IsBlendSupported && Subtarget.hasSSSE3()) {
14273    bool V1InUse, V2InUse;
14274    return lowerShuffleAsBlendOfPSHUFBs(DL, MVT::v8i16, V1, V2, Mask,
14275                                        Zeroable, DAG, V1InUse, V2InUse);
14276  }
14277
14278  // We can always bit-blend if we have to so the fallback strategy is to
14279  // decompose into single-input permutes and blends.
14280  return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v8i16, V1, V2,
14281                                              Mask, Subtarget, DAG);
14282}
14283
14284/// Check whether a compaction lowering can be done by dropping even
14285/// elements and compute how many times even elements must be dropped.
14286///
14287/// This handles shuffles which take every Nth element where N is a power of
14288/// two. Example shuffle masks:
14289///
14290///  N = 1:  0,  2,  4,  6,  8, 10, 12, 14,  0,  2,  4,  6,  8, 10, 12, 14
14291///  N = 1:  0,  2,  4,  6,  8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
14292///  N = 2:  0,  4,  8, 12,  0,  4,  8, 12,  0,  4,  8, 12,  0,  4,  8, 12
14293///  N = 2:  0,  4,  8, 12, 16, 20, 24, 28,  0,  4,  8, 12, 16, 20, 24, 28
14294///  N = 3:  0,  8,  0,  8,  0,  8,  0,  8,  0,  8,  0,  8,  0,  8,  0,  8
14295///  N = 3:  0,  8, 16, 24,  0,  8, 16, 24,  0,  8, 16, 24,  0,  8, 16, 24
14296///
14297/// Any of these lanes can of course be undef.
14298///
14299/// This routine only supports N <= 3.
14300/// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
14301/// for larger N.
14302///
14303/// \returns N above, or the number of times even elements must be dropped if
14304/// there is such a number. Otherwise returns zero.
14305static int canLowerByDroppingEvenElements(ArrayRef<int> Mask,
14306                                          bool IsSingleInput) {
14307  // The modulus for the shuffle vector entries is based on whether this is
14308  // a single input or not.
14309  int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
14310  assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
14311         "We should only be called with masks with a power-of-2 size!");
14312
14313  uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
14314
14315  // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
14316  // and 2^3 simultaneously. This is because we may have ambiguity with
14317  // partially undef inputs.
14318  bool ViableForN[3] = {true, true, true};
14319
14320  for (int i = 0, e = Mask.size(); i < e; ++i) {
14321    // Ignore undef lanes, we'll optimistically collapse them to the pattern we
14322    // want.
14323    if (Mask[i] < 0)
14324      continue;
14325
14326    bool IsAnyViable = false;
14327    for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
14328      if (ViableForN[j]) {
14329        uint64_t N = j + 1;
14330
14331        // The shuffle mask must be equal to (i * 2^N) % M.
14332        if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
14333          IsAnyViable = true;
14334        else
14335          ViableForN[j] = false;
14336      }
14337    // Early exit if we exhaust the possible powers of two.
14338    if (!IsAnyViable)
14339      break;
14340  }
14341
14342  for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
14343    if (ViableForN[j])
14344      return j + 1;
14345
14346  // Return 0 as there is no viable power of two.
14347  return 0;
14348}
14349
14350static SDValue lowerShuffleWithPERMV(const SDLoc &DL, MVT VT,
14351                                     ArrayRef<int> Mask, SDValue V1,
14352                                     SDValue V2, SelectionDAG &DAG) {
14353  MVT MaskEltVT = MVT::getIntegerVT(VT.getScalarSizeInBits());
14354  MVT MaskVecVT = MVT::getVectorVT(MaskEltVT, VT.getVectorNumElements());
14355
14356  SDValue MaskNode = getConstVector(Mask, MaskVecVT, DAG, DL, true);
14357  if (V2.isUndef())
14358    return DAG.getNode(X86ISD::VPERMV, DL, VT, MaskNode, V1);
14359
14360  return DAG.getNode(X86ISD::VPERMV3, DL, VT, V1, MaskNode, V2);
14361}
14362
14363/// Generic lowering of v16i8 shuffles.
14364///
14365/// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
14366/// detect any complexity reducing interleaving. If that doesn't help, it uses
14367/// UNPCK to spread the i8 elements across two i16-element vectors, and uses
14368/// the existing lowering for v8i16 blends on each half, finally PACK-ing them
14369/// back together.
14370static SDValue lowerV16I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
14371                                 const APInt &Zeroable, SDValue V1, SDValue V2,
14372                                 const X86Subtarget &Subtarget,
14373                                 SelectionDAG &DAG) {
14374  assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
14375  assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
14376  assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
14377
14378  // Try to use shift instructions.
14379  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i8, V1, V2, Mask,
14380                                          Zeroable, Subtarget, DAG))
14381    return Shift;
14382
14383  // Try to use byte rotation instructions.
14384  if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i8, V1, V2, Mask,
14385                                                Subtarget, DAG))
14386    return Rotate;
14387
14388  // Use dedicated pack instructions for masks that match their pattern.
14389  if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i8, Mask, V1, V2, DAG,
14390                                       Subtarget))
14391    return V;
14392
14393  // Try to use a zext lowering.
14394  if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v16i8, V1, V2, Mask,
14395                                                   Zeroable, Subtarget, DAG))
14396    return ZExt;
14397
14398  // See if we can use SSE4A Extraction / Insertion.
14399  if (Subtarget.hasSSE4A())
14400    if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v16i8, V1, V2, Mask,
14401                                          Zeroable, DAG))
14402      return V;
14403
14404  int NumV2Elements = count_if(Mask, [](int M) { return M >= 16; });
14405
14406  // For single-input shuffles, there are some nicer lowering tricks we can use.
14407  if (NumV2Elements == 0) {
14408    // Check for being able to broadcast a single element.
14409    if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i8, V1, V2,
14410                                                    Mask, Subtarget, DAG))
14411      return Broadcast;
14412
14413    if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
14414      return V;
14415
14416    // Check whether we can widen this to an i16 shuffle by duplicating bytes.
14417    // Notably, this handles splat and partial-splat shuffles more efficiently.
14418    // However, it only makes sense if the pre-duplication shuffle simplifies
14419    // things significantly. Currently, this means we need to be able to
14420    // express the pre-duplication shuffle as an i16 shuffle.
14421    //
14422    // FIXME: We should check for other patterns which can be widened into an
14423    // i16 shuffle as well.
14424    auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
14425      for (int i = 0; i < 16; i += 2)
14426        if (Mask[i] >= 0 && Mask[i + 1] >= 0 && Mask[i] != Mask[i + 1])
14427          return false;
14428
14429      return true;
14430    };
14431    auto tryToWidenViaDuplication = [&]() -> SDValue {
14432      if (!canWidenViaDuplication(Mask))
14433        return SDValue();
14434      SmallVector<int, 4> LoInputs;
14435      copy_if(Mask, std::back_inserter(LoInputs),
14436              [](int M) { return M >= 0 && M < 8; });
14437      array_pod_sort(LoInputs.begin(), LoInputs.end());
14438      LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
14439                     LoInputs.end());
14440      SmallVector<int, 4> HiInputs;
14441      copy_if(Mask, std::back_inserter(HiInputs), [](int M) { return M >= 8; });
14442      array_pod_sort(HiInputs.begin(), HiInputs.end());
14443      HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
14444                     HiInputs.end());
14445
14446      bool TargetLo = LoInputs.size() >= HiInputs.size();
14447      ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
14448      ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
14449
14450      int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
14451      SmallDenseMap<int, int, 8> LaneMap;
14452      for (int I : InPlaceInputs) {
14453        PreDupI16Shuffle[I/2] = I/2;
14454        LaneMap[I] = I;
14455      }
14456      int j = TargetLo ? 0 : 4, je = j + 4;
14457      for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
14458        // Check if j is already a shuffle of this input. This happens when
14459        // there are two adjacent bytes after we move the low one.
14460        if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
14461          // If we haven't yet mapped the input, search for a slot into which
14462          // we can map it.
14463          while (j < je && PreDupI16Shuffle[j] >= 0)
14464            ++j;
14465
14466          if (j == je)
14467            // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
14468            return SDValue();
14469
14470          // Map this input with the i16 shuffle.
14471          PreDupI16Shuffle[j] = MovingInputs[i] / 2;
14472        }
14473
14474        // Update the lane map based on the mapping we ended up with.
14475        LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
14476      }
14477      V1 = DAG.getBitcast(
14478          MVT::v16i8,
14479          DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
14480                               DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
14481
14482      // Unpack the bytes to form the i16s that will be shuffled into place.
14483      bool EvenInUse = false, OddInUse = false;
14484      for (int i = 0; i < 16; i += 2) {
14485        EvenInUse |= (Mask[i + 0] >= 0);
14486        OddInUse |= (Mask[i + 1] >= 0);
14487        if (EvenInUse && OddInUse)
14488          break;
14489      }
14490      V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
14491                       MVT::v16i8, EvenInUse ? V1 : DAG.getUNDEF(MVT::v16i8),
14492                       OddInUse ? V1 : DAG.getUNDEF(MVT::v16i8));
14493
14494      int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
14495      for (int i = 0; i < 16; ++i)
14496        if (Mask[i] >= 0) {
14497          int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
14498          assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
14499          if (PostDupI16Shuffle[i / 2] < 0)
14500            PostDupI16Shuffle[i / 2] = MappedMask;
14501          else
14502            assert(PostDupI16Shuffle[i / 2] == MappedMask &&
14503                   "Conflicting entries in the original shuffle!");
14504        }
14505      return DAG.getBitcast(
14506          MVT::v16i8,
14507          DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
14508                               DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
14509    };
14510    if (SDValue V = tryToWidenViaDuplication())
14511      return V;
14512  }
14513
14514  if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v16i8, V1, V2, Mask,
14515                                             Zeroable, Subtarget, DAG))
14516    return Masked;
14517
14518  // Use dedicated unpack instructions for masks that match their pattern.
14519  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
14520    return V;
14521
14522  // Try to use byte shift instructions to mask.
14523  if (SDValue V = lowerShuffleAsByteShiftMask(DL, MVT::v16i8, V1, V2, Mask,
14524                                              Zeroable, Subtarget, DAG))
14525    return V;
14526
14527  // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
14528  // with PSHUFB. It is important to do this before we attempt to generate any
14529  // blends but after all of the single-input lowerings. If the single input
14530  // lowerings can find an instruction sequence that is faster than a PSHUFB, we
14531  // want to preserve that and we can DAG combine any longer sequences into
14532  // a PSHUFB in the end. But once we start blending from multiple inputs,
14533  // the complexity of DAG combining bad patterns back into PSHUFB is too high,
14534  // and there are *very* few patterns that would actually be faster than the
14535  // PSHUFB approach because of its ability to zero lanes.
14536  //
14537  // FIXME: The only exceptions to the above are blends which are exact
14538  // interleavings with direct instructions supporting them. We currently don't
14539  // handle those well here.
14540  if (Subtarget.hasSSSE3()) {
14541    bool V1InUse = false;
14542    bool V2InUse = false;
14543
14544    SDValue PSHUFB = lowerShuffleAsBlendOfPSHUFBs(
14545        DL, MVT::v16i8, V1, V2, Mask, Zeroable, DAG, V1InUse, V2InUse);
14546
14547    // If both V1 and V2 are in use and we can use a direct blend or an unpack,
14548    // do so. This avoids using them to handle blends-with-zero which is
14549    // important as a single pshufb is significantly faster for that.
14550    if (V1InUse && V2InUse) {
14551      if (Subtarget.hasSSE41())
14552        if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i8, V1, V2, Mask,
14553                                                Zeroable, Subtarget, DAG))
14554          return Blend;
14555
14556      // We can use an unpack to do the blending rather than an or in some
14557      // cases. Even though the or may be (very minorly) more efficient, we
14558      // preference this lowering because there are common cases where part of
14559      // the complexity of the shuffles goes away when we do the final blend as
14560      // an unpack.
14561      // FIXME: It might be worth trying to detect if the unpack-feeding
14562      // shuffles will both be pshufb, in which case we shouldn't bother with
14563      // this.
14564      if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(
14565              DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
14566        return Unpack;
14567
14568      // If we have VBMI we can use one VPERM instead of multiple PSHUFBs.
14569      if (Subtarget.hasVBMI() && Subtarget.hasVLX())
14570        return lowerShuffleWithPERMV(DL, MVT::v16i8, Mask, V1, V2, DAG);
14571
14572      // Use PALIGNR+Permute if possible - permute might become PSHUFB but the
14573      // PALIGNR will be cheaper than the second PSHUFB+OR.
14574      if (SDValue V = lowerShuffleAsByteRotateAndPermute(
14575              DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
14576        return V;
14577    }
14578
14579    return PSHUFB;
14580  }
14581
14582  // There are special ways we can lower some single-element blends.
14583  if (NumV2Elements == 1)
14584    if (SDValue V = lowerShuffleAsElementInsertion(
14585            DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
14586      return V;
14587
14588  if (SDValue Blend = lowerShuffleAsBitBlend(DL, MVT::v16i8, V1, V2, Mask, DAG))
14589    return Blend;
14590
14591  // Check whether a compaction lowering can be done. This handles shuffles
14592  // which take every Nth element for some even N. See the helper function for
14593  // details.
14594  //
14595  // We special case these as they can be particularly efficiently handled with
14596  // the PACKUSB instruction on x86 and they show up in common patterns of
14597  // rearranging bytes to truncate wide elements.
14598  bool IsSingleInput = V2.isUndef();
14599  if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask, IsSingleInput)) {
14600    // NumEvenDrops is the power of two stride of the elements. Another way of
14601    // thinking about it is that we need to drop the even elements this many
14602    // times to get the original input.
14603
14604    // First we need to zero all the dropped bytes.
14605    assert(NumEvenDrops <= 3 &&
14606           "No support for dropping even elements more than 3 times.");
14607    SmallVector<SDValue, 16> ByteClearOps(16, DAG.getConstant(0, DL, MVT::i8));
14608    for (unsigned i = 0; i != 16; i += 1 << NumEvenDrops)
14609      ByteClearOps[i] = DAG.getConstant(0xFF, DL, MVT::i8);
14610    SDValue ByteClearMask = DAG.getBuildVector(MVT::v16i8, DL, ByteClearOps);
14611    V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask);
14612    if (!IsSingleInput)
14613      V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask);
14614
14615    // Now pack things back together.
14616    V1 = DAG.getBitcast(MVT::v8i16, V1);
14617    V2 = IsSingleInput ? V1 : DAG.getBitcast(MVT::v8i16, V2);
14618    SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1, V2);
14619    for (int i = 1; i < NumEvenDrops; ++i) {
14620      Result = DAG.getBitcast(MVT::v8i16, Result);
14621      Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
14622    }
14623
14624    return Result;
14625  }
14626
14627  // Handle multi-input cases by blending single-input shuffles.
14628  if (NumV2Elements > 0)
14629    return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v16i8, V1, V2, Mask,
14630                                                Subtarget, DAG);
14631
14632  // The fallback path for single-input shuffles widens this into two v8i16
14633  // vectors with unpacks, shuffles those, and then pulls them back together
14634  // with a pack.
14635  SDValue V = V1;
14636
14637  std::array<int, 8> LoBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
14638  std::array<int, 8> HiBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
14639  for (int i = 0; i < 16; ++i)
14640    if (Mask[i] >= 0)
14641      (i < 8 ? LoBlendMask[i] : HiBlendMask[i % 8]) = Mask[i];
14642
14643  SDValue VLoHalf, VHiHalf;
14644  // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
14645  // them out and avoid using UNPCK{L,H} to extract the elements of V as
14646  // i16s.
14647  if (none_of(LoBlendMask, [](int M) { return M >= 0 && M % 2 == 1; }) &&
14648      none_of(HiBlendMask, [](int M) { return M >= 0 && M % 2 == 1; })) {
14649    // Use a mask to drop the high bytes.
14650    VLoHalf = DAG.getBitcast(MVT::v8i16, V);
14651    VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf,
14652                          DAG.getConstant(0x00FF, DL, MVT::v8i16));
14653
14654    // This will be a single vector shuffle instead of a blend so nuke VHiHalf.
14655    VHiHalf = DAG.getUNDEF(MVT::v8i16);
14656
14657    // Squash the masks to point directly into VLoHalf.
14658    for (int &M : LoBlendMask)
14659      if (M >= 0)
14660        M /= 2;
14661    for (int &M : HiBlendMask)
14662      if (M >= 0)
14663        M /= 2;
14664  } else {
14665    // Otherwise just unpack the low half of V into VLoHalf and the high half into
14666    // VHiHalf so that we can blend them as i16s.
14667    SDValue Zero = getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
14668
14669    VLoHalf = DAG.getBitcast(
14670        MVT::v8i16, DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
14671    VHiHalf = DAG.getBitcast(
14672        MVT::v8i16, DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
14673  }
14674
14675  SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, LoBlendMask);
14676  SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, HiBlendMask);
14677
14678  return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
14679}
14680
14681/// Dispatching routine to lower various 128-bit x86 vector shuffles.
14682///
14683/// This routine breaks down the specific type of 128-bit shuffle and
14684/// dispatches to the lowering routines accordingly.
14685static SDValue lower128BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
14686                                  MVT VT, SDValue V1, SDValue V2,
14687                                  const APInt &Zeroable,
14688                                  const X86Subtarget &Subtarget,
14689                                  SelectionDAG &DAG) {
14690  switch (VT.SimpleTy) {
14691  case MVT::v2i64:
14692    return lowerV2I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14693  case MVT::v2f64:
14694    return lowerV2F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14695  case MVT::v4i32:
14696    return lowerV4I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14697  case MVT::v4f32:
14698    return lowerV4F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14699  case MVT::v8i16:
14700    return lowerV8I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14701  case MVT::v16i8:
14702    return lowerV16I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14703
14704  default:
14705    llvm_unreachable("Unimplemented!");
14706  }
14707}
14708
14709/// Generic routine to split vector shuffle into half-sized shuffles.
14710///
14711/// This routine just extracts two subvectors, shuffles them independently, and
14712/// then concatenates them back together. This should work effectively with all
14713/// AVX vector shuffle types.
14714static SDValue splitAndLowerShuffle(const SDLoc &DL, MVT VT, SDValue V1,
14715                                    SDValue V2, ArrayRef<int> Mask,
14716                                    SelectionDAG &DAG) {
14717  assert(VT.getSizeInBits() >= 256 &&
14718         "Only for 256-bit or wider vector shuffles!");
14719  assert(V1.getSimpleValueType() == VT && "Bad operand type!");
14720  assert(V2.getSimpleValueType() == VT && "Bad operand type!");
14721
14722  ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
14723  ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
14724
14725  int NumElements = VT.getVectorNumElements();
14726  int SplitNumElements = NumElements / 2;
14727  MVT ScalarVT = VT.getVectorElementType();
14728  MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2);
14729
14730  // Rather than splitting build-vectors, just build two narrower build
14731  // vectors. This helps shuffling with splats and zeros.
14732  auto SplitVector = [&](SDValue V) {
14733    V = peekThroughBitcasts(V);
14734
14735    MVT OrigVT = V.getSimpleValueType();
14736    int OrigNumElements = OrigVT.getVectorNumElements();
14737    int OrigSplitNumElements = OrigNumElements / 2;
14738    MVT OrigScalarVT = OrigVT.getVectorElementType();
14739    MVT OrigSplitVT = MVT::getVectorVT(OrigScalarVT, OrigNumElements / 2);
14740
14741    SDValue LoV, HiV;
14742
14743    auto *BV = dyn_cast<BuildVectorSDNode>(V);
14744    if (!BV) {
14745      LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
14746                        DAG.getIntPtrConstant(0, DL));
14747      HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
14748                        DAG.getIntPtrConstant(OrigSplitNumElements, DL));
14749    } else {
14750
14751      SmallVector<SDValue, 16> LoOps, HiOps;
14752      for (int i = 0; i < OrigSplitNumElements; ++i) {
14753        LoOps.push_back(BV->getOperand(i));
14754        HiOps.push_back(BV->getOperand(i + OrigSplitNumElements));
14755      }
14756      LoV = DAG.getBuildVector(OrigSplitVT, DL, LoOps);
14757      HiV = DAG.getBuildVector(OrigSplitVT, DL, HiOps);
14758    }
14759    return std::make_pair(DAG.getBitcast(SplitVT, LoV),
14760                          DAG.getBitcast(SplitVT, HiV));
14761  };
14762
14763  SDValue LoV1, HiV1, LoV2, HiV2;
14764  std::tie(LoV1, HiV1) = SplitVector(V1);
14765  std::tie(LoV2, HiV2) = SplitVector(V2);
14766
14767  // Now create two 4-way blends of these half-width vectors.
14768  auto HalfBlend = [&](ArrayRef<int> HalfMask) {
14769    bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
14770    SmallVector<int, 32> V1BlendMask((unsigned)SplitNumElements, -1);
14771    SmallVector<int, 32> V2BlendMask((unsigned)SplitNumElements, -1);
14772    SmallVector<int, 32> BlendMask((unsigned)SplitNumElements, -1);
14773    for (int i = 0; i < SplitNumElements; ++i) {
14774      int M = HalfMask[i];
14775      if (M >= NumElements) {
14776        if (M >= NumElements + SplitNumElements)
14777          UseHiV2 = true;
14778        else
14779          UseLoV2 = true;
14780        V2BlendMask[i] = M - NumElements;
14781        BlendMask[i] = SplitNumElements + i;
14782      } else if (M >= 0) {
14783        if (M >= SplitNumElements)
14784          UseHiV1 = true;
14785        else
14786          UseLoV1 = true;
14787        V1BlendMask[i] = M;
14788        BlendMask[i] = i;
14789      }
14790    }
14791
14792    // Because the lowering happens after all combining takes place, we need to
14793    // manually combine these blend masks as much as possible so that we create
14794    // a minimal number of high-level vector shuffle nodes.
14795
14796    // First try just blending the halves of V1 or V2.
14797    if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
14798      return DAG.getUNDEF(SplitVT);
14799    if (!UseLoV2 && !UseHiV2)
14800      return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
14801    if (!UseLoV1 && !UseHiV1)
14802      return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
14803
14804    SDValue V1Blend, V2Blend;
14805    if (UseLoV1 && UseHiV1) {
14806      V1Blend =
14807        DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
14808    } else {
14809      // We only use half of V1 so map the usage down into the final blend mask.
14810      V1Blend = UseLoV1 ? LoV1 : HiV1;
14811      for (int i = 0; i < SplitNumElements; ++i)
14812        if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
14813          BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
14814    }
14815    if (UseLoV2 && UseHiV2) {
14816      V2Blend =
14817        DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
14818    } else {
14819      // We only use half of V2 so map the usage down into the final blend mask.
14820      V2Blend = UseLoV2 ? LoV2 : HiV2;
14821      for (int i = 0; i < SplitNumElements; ++i)
14822        if (BlendMask[i] >= SplitNumElements)
14823          BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
14824    }
14825    return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
14826  };
14827  SDValue Lo = HalfBlend(LoMask);
14828  SDValue Hi = HalfBlend(HiMask);
14829  return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
14830}
14831
14832/// Either split a vector in halves or decompose the shuffles and the
14833/// blend.
14834///
14835/// This is provided as a good fallback for many lowerings of non-single-input
14836/// shuffles with more than one 128-bit lane. In those cases, we want to select
14837/// between splitting the shuffle into 128-bit components and stitching those
14838/// back together vs. extracting the single-input shuffles and blending those
14839/// results.
14840static SDValue lowerShuffleAsSplitOrBlend(const SDLoc &DL, MVT VT, SDValue V1,
14841                                          SDValue V2, ArrayRef<int> Mask,
14842                                          const X86Subtarget &Subtarget,
14843                                          SelectionDAG &DAG) {
14844  assert(!V2.isUndef() && "This routine must not be used to lower single-input "
14845         "shuffles as it could then recurse on itself.");
14846  int Size = Mask.size();
14847
14848  // If this can be modeled as a broadcast of two elements followed by a blend,
14849  // prefer that lowering. This is especially important because broadcasts can
14850  // often fold with memory operands.
14851  auto DoBothBroadcast = [&] {
14852    int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
14853    for (int M : Mask)
14854      if (M >= Size) {
14855        if (V2BroadcastIdx < 0)
14856          V2BroadcastIdx = M - Size;
14857        else if (M - Size != V2BroadcastIdx)
14858          return false;
14859      } else if (M >= 0) {
14860        if (V1BroadcastIdx < 0)
14861          V1BroadcastIdx = M;
14862        else if (M != V1BroadcastIdx)
14863          return false;
14864      }
14865    return true;
14866  };
14867  if (DoBothBroadcast())
14868    return lowerShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
14869                                                Subtarget, DAG);
14870
14871  // If the inputs all stem from a single 128-bit lane of each input, then we
14872  // split them rather than blending because the split will decompose to
14873  // unusually few instructions.
14874  int LaneCount = VT.getSizeInBits() / 128;
14875  int LaneSize = Size / LaneCount;
14876  SmallBitVector LaneInputs[2];
14877  LaneInputs[0].resize(LaneCount, false);
14878  LaneInputs[1].resize(LaneCount, false);
14879  for (int i = 0; i < Size; ++i)
14880    if (Mask[i] >= 0)
14881      LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
14882  if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
14883    return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
14884
14885  // Otherwise, just fall back to decomposed shuffles and a blend. This requires
14886  // that the decomposed single-input shuffles don't end up here.
14887  return lowerShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, Subtarget,
14888                                              DAG);
14889}
14890
14891// Lower as SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
14892// TODO: Extend to support v8f32 (+ 512-bit shuffles).
14893static SDValue lowerShuffleAsLanePermuteAndSHUFP(const SDLoc &DL, MVT VT,
14894                                                 SDValue V1, SDValue V2,
14895                                                 ArrayRef<int> Mask,
14896                                                 SelectionDAG &DAG) {
14897  assert(VT == MVT::v4f64 && "Only for v4f64 shuffles");
14898
14899  int LHSMask[4] = {-1, -1, -1, -1};
14900  int RHSMask[4] = {-1, -1, -1, -1};
14901  unsigned SHUFPMask = 0;
14902
14903  // As SHUFPD uses a single LHS/RHS element per lane, we can always
14904  // perform the shuffle once the lanes have been shuffled in place.
14905  for (int i = 0; i != 4; ++i) {
14906    int M = Mask[i];
14907    if (M < 0)
14908      continue;
14909    int LaneBase = i & ~1;
14910    auto &LaneMask = (i & 1) ? RHSMask : LHSMask;
14911    LaneMask[LaneBase + (M & 1)] = M;
14912    SHUFPMask |= (M & 1) << i;
14913  }
14914
14915  SDValue LHS = DAG.getVectorShuffle(VT, DL, V1, V2, LHSMask);
14916  SDValue RHS = DAG.getVectorShuffle(VT, DL, V1, V2, RHSMask);
14917  return DAG.getNode(X86ISD::SHUFP, DL, VT, LHS, RHS,
14918                     DAG.getTargetConstant(SHUFPMask, DL, MVT::i8));
14919}
14920
14921/// Lower a vector shuffle crossing multiple 128-bit lanes as
14922/// a lane permutation followed by a per-lane permutation.
14923///
14924/// This is mainly for cases where we can have non-repeating permutes
14925/// in each lane.
14926///
14927/// TODO: This is very similar to lowerShuffleAsLanePermuteAndRepeatedMask,
14928/// we should investigate merging them.
14929static SDValue lowerShuffleAsLanePermuteAndPermute(
14930    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14931    SelectionDAG &DAG, const X86Subtarget &Subtarget) {
14932  int NumElts = VT.getVectorNumElements();
14933  int NumLanes = VT.getSizeInBits() / 128;
14934  int NumEltsPerLane = NumElts / NumLanes;
14935
14936  SmallVector<int, 4> SrcLaneMask(NumLanes, SM_SentinelUndef);
14937  SmallVector<int, 16> PermMask(NumElts, SM_SentinelUndef);
14938
14939  for (int i = 0; i != NumElts; ++i) {
14940    int M = Mask[i];
14941    if (M < 0)
14942      continue;
14943
14944    // Ensure that each lane comes from a single source lane.
14945    int SrcLane = M / NumEltsPerLane;
14946    int DstLane = i / NumEltsPerLane;
14947    if (!isUndefOrEqual(SrcLaneMask[DstLane], SrcLane))
14948      return SDValue();
14949    SrcLaneMask[DstLane] = SrcLane;
14950
14951    PermMask[i] = (DstLane * NumEltsPerLane) + (M % NumEltsPerLane);
14952  }
14953
14954  // Make sure we set all elements of the lane mask, to avoid undef propagation.
14955  SmallVector<int, 16> LaneMask(NumElts, SM_SentinelUndef);
14956  for (int DstLane = 0; DstLane != NumLanes; ++DstLane) {
14957    int SrcLane = SrcLaneMask[DstLane];
14958    if (0 <= SrcLane)
14959      for (int j = 0; j != NumEltsPerLane; ++j) {
14960        LaneMask[(DstLane * NumEltsPerLane) + j] =
14961            (SrcLane * NumEltsPerLane) + j;
14962      }
14963  }
14964
14965  // If we're only shuffling a single lowest lane and the rest are identity
14966  // then don't bother.
14967  // TODO - isShuffleMaskInputInPlace could be extended to something like this.
14968  int NumIdentityLanes = 0;
14969  bool OnlyShuffleLowestLane = true;
14970  for (int i = 0; i != NumLanes; ++i) {
14971    if (isSequentialOrUndefInRange(PermMask, i * NumEltsPerLane, NumEltsPerLane,
14972                                   i * NumEltsPerLane))
14973      NumIdentityLanes++;
14974    else if (SrcLaneMask[i] != 0 && SrcLaneMask[i] != NumLanes)
14975      OnlyShuffleLowestLane = false;
14976  }
14977  if (OnlyShuffleLowestLane && NumIdentityLanes == (NumLanes - 1))
14978    return SDValue();
14979
14980  SDValue LanePermute = DAG.getVectorShuffle(VT, DL, V1, V2, LaneMask);
14981  return DAG.getVectorShuffle(VT, DL, LanePermute, DAG.getUNDEF(VT), PermMask);
14982}
14983
14984/// Lower a vector shuffle crossing multiple 128-bit lanes by shuffling one
14985/// source with a lane permutation.
14986///
14987/// This lowering strategy results in four instructions in the worst case for a
14988/// single-input cross lane shuffle which is lower than any other fully general
14989/// cross-lane shuffle strategy I'm aware of. Special cases for each particular
14990/// shuffle pattern should be handled prior to trying this lowering.
14991static SDValue lowerShuffleAsLanePermuteAndShuffle(
14992    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14993    SelectionDAG &DAG, const X86Subtarget &Subtarget) {
14994  // FIXME: This should probably be generalized for 512-bit vectors as well.
14995  assert(VT.is256BitVector() && "Only for 256-bit vector shuffles!");
14996  int Size = Mask.size();
14997  int LaneSize = Size / 2;
14998
14999  // Fold to SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
15000  // Only do this if the elements aren't all from the lower lane,
15001  // otherwise we're (probably) better off doing a split.
15002  if (VT == MVT::v4f64 &&
15003      !all_of(Mask, [LaneSize](int M) { return M < LaneSize; }))
15004    if (SDValue V =
15005            lowerShuffleAsLanePermuteAndSHUFP(DL, VT, V1, V2, Mask, DAG))
15006      return V;
15007
15008  // If there are only inputs from one 128-bit lane, splitting will in fact be
15009  // less expensive. The flags track whether the given lane contains an element
15010  // that crosses to another lane.
15011  if (!Subtarget.hasAVX2()) {
15012    bool LaneCrossing[2] = {false, false};
15013    for (int i = 0; i < Size; ++i)
15014      if (Mask[i] >= 0 && ((Mask[i] % Size) / LaneSize) != (i / LaneSize))
15015        LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
15016    if (!LaneCrossing[0] || !LaneCrossing[1])
15017      return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
15018  } else {
15019    bool LaneUsed[2] = {false, false};
15020    for (int i = 0; i < Size; ++i)
15021      if (Mask[i] >= 0)
15022        LaneUsed[(Mask[i] % Size) / LaneSize] = true;
15023    if (!LaneUsed[0] || !LaneUsed[1])
15024      return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
15025  }
15026
15027  // TODO - we could support shuffling V2 in the Flipped input.
15028  assert(V2.isUndef() &&
15029         "This last part of this routine only works on single input shuffles");
15030
15031  SmallVector<int, 32> InLaneMask(Mask.begin(), Mask.end());
15032  for (int i = 0; i < Size; ++i) {
15033    int &M = InLaneMask[i];
15034    if (M < 0)
15035      continue;
15036    if (((M % Size) / LaneSize) != (i / LaneSize))
15037      M = (M % LaneSize) + ((i / LaneSize) * LaneSize) + Size;
15038  }
15039  assert(!is128BitLaneCrossingShuffleMask(VT, InLaneMask) &&
15040         "In-lane shuffle mask expected");
15041
15042  // Flip the lanes, and shuffle the results which should now be in-lane.
15043  MVT PVT = VT.isFloatingPoint() ? MVT::v4f64 : MVT::v4i64;
15044  SDValue Flipped = DAG.getBitcast(PVT, V1);
15045  Flipped =
15046      DAG.getVectorShuffle(PVT, DL, Flipped, DAG.getUNDEF(PVT), {2, 3, 0, 1});
15047  Flipped = DAG.getBitcast(VT, Flipped);
15048  return DAG.getVectorShuffle(VT, DL, V1, Flipped, InLaneMask);
15049}
15050
15051/// Handle lowering 2-lane 128-bit shuffles.
15052static SDValue lowerV2X128Shuffle(const SDLoc &DL, MVT VT, SDValue V1,
15053                                  SDValue V2, ArrayRef<int> Mask,
15054                                  const APInt &Zeroable,
15055                                  const X86Subtarget &Subtarget,
15056                                  SelectionDAG &DAG) {
15057  // With AVX2, use VPERMQ/VPERMPD for unary shuffles to allow memory folding.
15058  if (Subtarget.hasAVX2() && V2.isUndef())
15059    return SDValue();
15060
15061  bool V2IsZero = !V2.isUndef() && ISD::isBuildVectorAllZeros(V2.getNode());
15062
15063  SmallVector<int, 4> WidenedMask;
15064  if (!canWidenShuffleElements(Mask, Zeroable, V2IsZero, WidenedMask))
15065    return SDValue();
15066
15067  bool IsLowZero = (Zeroable & 0x3) == 0x3;
15068  bool IsHighZero = (Zeroable & 0xc) == 0xc;
15069
15070  // Try to use an insert into a zero vector.
15071  if (WidenedMask[0] == 0 && IsHighZero) {
15072    MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
15073    SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
15074                              DAG.getIntPtrConstant(0, DL));
15075    return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
15076                       getZeroVector(VT, Subtarget, DAG, DL), LoV,
15077                       DAG.getIntPtrConstant(0, DL));
15078  }
15079
15080  // TODO: If minimizing size and one of the inputs is a zero vector and the
15081  // the zero vector has only one use, we could use a VPERM2X128 to save the
15082  // instruction bytes needed to explicitly generate the zero vector.
15083
15084  // Blends are faster and handle all the non-lane-crossing cases.
15085  if (SDValue Blend = lowerShuffleAsBlend(DL, VT, V1, V2, Mask, Zeroable,
15086                                          Subtarget, DAG))
15087    return Blend;
15088
15089  // If either input operand is a zero vector, use VPERM2X128 because its mask
15090  // allows us to replace the zero input with an implicit zero.
15091  if (!IsLowZero && !IsHighZero) {
15092    // Check for patterns which can be matched with a single insert of a 128-bit
15093    // subvector.
15094    bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1});
15095    if (OnlyUsesV1 || isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5})) {
15096
15097      // With AVX1, use vperm2f128 (below) to allow load folding. Otherwise,
15098      // this will likely become vinsertf128 which can't fold a 256-bit memop.
15099      if (!isa<LoadSDNode>(peekThroughBitcasts(V1))) {
15100        MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
15101        SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
15102                                     OnlyUsesV1 ? V1 : V2,
15103                                     DAG.getIntPtrConstant(0, DL));
15104        return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
15105                           DAG.getIntPtrConstant(2, DL));
15106      }
15107    }
15108
15109    // Try to use SHUF128 if possible.
15110    if (Subtarget.hasVLX()) {
15111      if (WidenedMask[0] < 2 && WidenedMask[1] >= 2) {
15112        unsigned PermMask = ((WidenedMask[0] % 2) << 0) |
15113                            ((WidenedMask[1] % 2) << 1);
15114        return DAG.getNode(X86ISD::SHUF128, DL, VT, V1, V2,
15115                           DAG.getTargetConstant(PermMask, DL, MVT::i8));
15116      }
15117    }
15118  }
15119
15120  // Otherwise form a 128-bit permutation. After accounting for undefs,
15121  // convert the 64-bit shuffle mask selection values into 128-bit
15122  // selection bits by dividing the indexes by 2 and shifting into positions
15123  // defined by a vperm2*128 instruction's immediate control byte.
15124
15125  // The immediate permute control byte looks like this:
15126  //    [1:0] - select 128 bits from sources for low half of destination
15127  //    [2]   - ignore
15128  //    [3]   - zero low half of destination
15129  //    [5:4] - select 128 bits from sources for high half of destination
15130  //    [6]   - ignore
15131  //    [7]   - zero high half of destination
15132
15133  assert((WidenedMask[0] >= 0 || IsLowZero) &&
15134         (WidenedMask[1] >= 0 || IsHighZero) && "Undef half?");
15135
15136  unsigned PermMask = 0;
15137  PermMask |= IsLowZero  ? 0x08 : (WidenedMask[0] << 0);
15138  PermMask |= IsHighZero ? 0x80 : (WidenedMask[1] << 4);
15139
15140  // Check the immediate mask and replace unused sources with undef.
15141  if ((PermMask & 0x0a) != 0x00 && (PermMask & 0xa0) != 0x00)
15142    V1 = DAG.getUNDEF(VT);
15143  if ((PermMask & 0x0a) != 0x02 && (PermMask & 0xa0) != 0x20)
15144    V2 = DAG.getUNDEF(VT);
15145
15146  return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
15147                     DAG.getTargetConstant(PermMask, DL, MVT::i8));
15148}
15149
15150/// Lower a vector shuffle by first fixing the 128-bit lanes and then
15151/// shuffling each lane.
15152///
15153/// This attempts to create a repeated lane shuffle where each lane uses one
15154/// or two of the lanes of the inputs. The lanes of the input vectors are
15155/// shuffled in one or two independent shuffles to get the lanes into the
15156/// position needed by the final shuffle.
15157static SDValue lowerShuffleAsLanePermuteAndRepeatedMask(
15158    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
15159    const X86Subtarget &Subtarget, SelectionDAG &DAG) {
15160  assert(!V2.isUndef() && "This is only useful with multiple inputs.");
15161
15162  if (is128BitLaneRepeatedShuffleMask(VT, Mask))
15163    return SDValue();
15164
15165  int NumElts = Mask.size();
15166  int NumLanes = VT.getSizeInBits() / 128;
15167  int NumLaneElts = 128 / VT.getScalarSizeInBits();
15168  SmallVector<int, 16> RepeatMask(NumLaneElts, -1);
15169  SmallVector<std::array<int, 2>, 2> LaneSrcs(NumLanes, {{-1, -1}});
15170
15171  // First pass will try to fill in the RepeatMask from lanes that need two
15172  // sources.
15173  for (int Lane = 0; Lane != NumLanes; ++Lane) {
15174    int Srcs[2] = {-1, -1};
15175    SmallVector<int, 16> InLaneMask(NumLaneElts, -1);
15176    for (int i = 0; i != NumLaneElts; ++i) {
15177      int M = Mask[(Lane * NumLaneElts) + i];
15178      if (M < 0)
15179        continue;
15180      // Determine which of the possible input lanes (NumLanes from each source)
15181      // this element comes from. Assign that as one of the sources for this
15182      // lane. We can assign up to 2 sources for this lane. If we run out
15183      // sources we can't do anything.
15184      int LaneSrc = M / NumLaneElts;
15185      int Src;
15186      if (Srcs[0] < 0 || Srcs[0] == LaneSrc)
15187        Src = 0;
15188      else if (Srcs[1] < 0 || Srcs[1] == LaneSrc)
15189        Src = 1;
15190      else
15191        return SDValue();
15192
15193      Srcs[Src] = LaneSrc;
15194      InLaneMask[i] = (M % NumLaneElts) + Src * NumElts;
15195    }
15196
15197    // If this lane has two sources, see if it fits with the repeat mask so far.
15198    if (Srcs[1] < 0)
15199      continue;
15200
15201    LaneSrcs[Lane][0] = Srcs[0];
15202    LaneSrcs[Lane][1] = Srcs[1];
15203
15204    auto MatchMasks = [](ArrayRef<int> M1, ArrayRef<int> M2) {
15205      assert(M1.size() == M2.size() && "Unexpected mask size");
15206      for (int i = 0, e = M1.size(); i != e; ++i)
15207        if (M1[i] >= 0 && M2[i] >= 0 && M1[i] != M2[i])
15208          return false;
15209      return true;
15210    };
15211
15212    auto MergeMasks = [](ArrayRef<int> Mask, MutableArrayRef<int> MergedMask) {
15213      assert(Mask.size() == MergedMask.size() && "Unexpected mask size");
15214      for (int i = 0, e = MergedMask.size(); i != e; ++i) {
15215        int M = Mask[i];
15216        if (M < 0)
15217          continue;
15218        assert((MergedMask[i] < 0 || MergedMask[i] == M) &&
15219               "Unexpected mask element");
15220        MergedMask[i] = M;
15221      }
15222    };
15223
15224    if (MatchMasks(InLaneMask, RepeatMask)) {
15225      // Merge this lane mask into the final repeat mask.
15226      MergeMasks(InLaneMask, RepeatMask);
15227      continue;
15228    }
15229
15230    // Didn't find a match. Swap the operands and try again.
15231    std::swap(LaneSrcs[Lane][0], LaneSrcs[Lane][1]);
15232    ShuffleVectorSDNode::commuteMask(InLaneMask);
15233
15234    if (MatchMasks(InLaneMask, RepeatMask)) {
15235      // Merge this lane mask into the final repeat mask.
15236      MergeMasks(InLaneMask, RepeatMask);
15237      continue;
15238    }
15239
15240    // Couldn't find a match with the operands in either order.
15241    return SDValue();
15242  }
15243
15244  // Now handle any lanes with only one source.
15245  for (int Lane = 0; Lane != NumLanes; ++Lane) {
15246    // If this lane has already been processed, skip it.
15247    if (LaneSrcs[Lane][0] >= 0)
15248      continue;
15249
15250    for (int i = 0; i != NumLaneElts; ++i) {
15251      int M = Mask[(Lane * NumLaneElts) + i];
15252      if (M < 0)
15253        continue;
15254
15255      // If RepeatMask isn't defined yet we can define it ourself.
15256      if (RepeatMask[i] < 0)
15257        RepeatMask[i] = M % NumLaneElts;
15258
15259      if (RepeatMask[i] < NumElts) {
15260        if (RepeatMask[i] != M % NumLaneElts)
15261          return SDValue();
15262        LaneSrcs[Lane][0] = M / NumLaneElts;
15263      } else {
15264        if (RepeatMask[i] != ((M % NumLaneElts) + NumElts))
15265          return SDValue();
15266        LaneSrcs[Lane][1] = M / NumLaneElts;
15267      }
15268    }
15269
15270    if (LaneSrcs[Lane][0] < 0 && LaneSrcs[Lane][1] < 0)
15271      return SDValue();
15272  }
15273
15274  SmallVector<int, 16> NewMask(NumElts, -1);
15275  for (int Lane = 0; Lane != NumLanes; ++Lane) {
15276    int Src = LaneSrcs[Lane][0];
15277    for (int i = 0; i != NumLaneElts; ++i) {
15278      int M = -1;
15279      if (Src >= 0)
15280        M = Src * NumLaneElts + i;
15281      NewMask[Lane * NumLaneElts + i] = M;
15282    }
15283  }
15284  SDValue NewV1 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
15285  // Ensure we didn't get back the shuffle we started with.
15286  // FIXME: This is a hack to make up for some splat handling code in
15287  // getVectorShuffle.
15288  if (isa<ShuffleVectorSDNode>(NewV1) &&
15289      cast<ShuffleVectorSDNode>(NewV1)->getMask() == Mask)
15290    return SDValue();
15291
15292  for (int Lane = 0; Lane != NumLanes; ++Lane) {
15293    int Src = LaneSrcs[Lane][1];
15294    for (int i = 0; i != NumLaneElts; ++i) {
15295      int M = -1;
15296      if (Src >= 0)
15297        M = Src * NumLaneElts + i;
15298      NewMask[Lane * NumLaneElts + i] = M;
15299    }
15300  }
15301  SDValue NewV2 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
15302  // Ensure we didn't get back the shuffle we started with.
15303  // FIXME: This is a hack to make up for some splat handling code in
15304  // getVectorShuffle.
15305  if (isa<ShuffleVectorSDNode>(NewV2) &&
15306      cast<ShuffleVectorSDNode>(NewV2)->getMask() == Mask)
15307    return SDValue();
15308
15309  for (int i = 0; i != NumElts; ++i) {
15310    NewMask[i] = RepeatMask[i % NumLaneElts];
15311    if (NewMask[i] < 0)
15312      continue;
15313
15314    NewMask[i] += (i / NumLaneElts) * NumLaneElts;
15315  }
15316  return DAG.getVectorShuffle(VT, DL, NewV1, NewV2, NewMask);
15317}
15318
15319/// If the input shuffle mask results in a vector that is undefined in all upper
15320/// or lower half elements and that mask accesses only 2 halves of the
15321/// shuffle's operands, return true. A mask of half the width with mask indexes
15322/// adjusted to access the extracted halves of the original shuffle operands is
15323/// returned in HalfMask. HalfIdx1 and HalfIdx2 return whether the upper or
15324/// lower half of each input operand is accessed.
15325static bool
15326getHalfShuffleMask(ArrayRef<int> Mask, MutableArrayRef<int> HalfMask,
15327                   int &HalfIdx1, int &HalfIdx2) {
15328  assert((Mask.size() == HalfMask.size() * 2) &&
15329         "Expected input mask to be twice as long as output");
15330
15331  // Exactly one half of the result must be undef to allow narrowing.
15332  bool UndefLower = isUndefLowerHalf(Mask);
15333  bool UndefUpper = isUndefUpperHalf(Mask);
15334  if (UndefLower == UndefUpper)
15335    return false;
15336
15337  unsigned HalfNumElts = HalfMask.size();
15338  unsigned MaskIndexOffset = UndefLower ? HalfNumElts : 0;
15339  HalfIdx1 = -1;
15340  HalfIdx2 = -1;
15341  for (unsigned i = 0; i != HalfNumElts; ++i) {
15342    int M = Mask[i + MaskIndexOffset];
15343    if (M < 0) {
15344      HalfMask[i] = M;
15345      continue;
15346    }
15347
15348    // Determine which of the 4 half vectors this element is from.
15349    // i.e. 0 = Lower V1, 1 = Upper V1, 2 = Lower V2, 3 = Upper V2.
15350    int HalfIdx = M / HalfNumElts;
15351
15352    // Determine the element index into its half vector source.
15353    int HalfElt = M % HalfNumElts;
15354
15355    // We can shuffle with up to 2 half vectors, set the new 'half'
15356    // shuffle mask accordingly.
15357    if (HalfIdx1 < 0 || HalfIdx1 == HalfIdx) {
15358      HalfMask[i] = HalfElt;
15359      HalfIdx1 = HalfIdx;
15360      continue;
15361    }
15362    if (HalfIdx2 < 0 || HalfIdx2 == HalfIdx) {
15363      HalfMask[i] = HalfElt + HalfNumElts;
15364      HalfIdx2 = HalfIdx;
15365      continue;
15366    }
15367
15368    // Too many half vectors referenced.
15369    return false;
15370  }
15371
15372  return true;
15373}
15374
15375/// Given the output values from getHalfShuffleMask(), create a half width
15376/// shuffle of extracted vectors followed by an insert back to full width.
15377static SDValue getShuffleHalfVectors(const SDLoc &DL, SDValue V1, SDValue V2,
15378                                     ArrayRef<int> HalfMask, int HalfIdx1,
15379                                     int HalfIdx2, bool UndefLower,
15380                                     SelectionDAG &DAG, bool UseConcat = false) {
15381  assert(V1.getValueType() == V2.getValueType() && "Different sized vectors?");
15382  assert(V1.getValueType().isSimple() && "Expecting only simple types");
15383
15384  MVT VT = V1.getSimpleValueType();
15385  MVT HalfVT = VT.getHalfNumVectorElementsVT();
15386  unsigned HalfNumElts = HalfVT.getVectorNumElements();
15387
15388  auto getHalfVector = [&](int HalfIdx) {
15389    if (HalfIdx < 0)
15390      return DAG.getUNDEF(HalfVT);
15391    SDValue V = (HalfIdx < 2 ? V1 : V2);
15392    HalfIdx = (HalfIdx % 2) * HalfNumElts;
15393    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V,
15394                       DAG.getIntPtrConstant(HalfIdx, DL));
15395  };
15396
15397  // ins undef, (shuf (ext V1, HalfIdx1), (ext V2, HalfIdx2), HalfMask), Offset
15398  SDValue Half1 = getHalfVector(HalfIdx1);
15399  SDValue Half2 = getHalfVector(HalfIdx2);
15400  SDValue V = DAG.getVectorShuffle(HalfVT, DL, Half1, Half2, HalfMask);
15401  if (UseConcat) {
15402    SDValue Op0 = V;
15403    SDValue Op1 = DAG.getUNDEF(HalfVT);
15404    if (UndefLower)
15405      std::swap(Op0, Op1);
15406    return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Op0, Op1);
15407  }
15408
15409  unsigned Offset = UndefLower ? HalfNumElts : 0;
15410  return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V,
15411                     DAG.getIntPtrConstant(Offset, DL));
15412}
15413
15414/// Lower shuffles where an entire half of a 256 or 512-bit vector is UNDEF.
15415/// This allows for fast cases such as subvector extraction/insertion
15416/// or shuffling smaller vector types which can lower more efficiently.
15417static SDValue lowerShuffleWithUndefHalf(const SDLoc &DL, MVT VT, SDValue V1,
15418                                         SDValue V2, ArrayRef<int> Mask,
15419                                         const X86Subtarget &Subtarget,
15420                                         SelectionDAG &DAG) {
15421  assert((VT.is256BitVector() || VT.is512BitVector()) &&
15422         "Expected 256-bit or 512-bit vector");
15423
15424  bool UndefLower = isUndefLowerHalf(Mask);
15425  if (!UndefLower && !isUndefUpperHalf(Mask))
15426    return SDValue();
15427
15428  assert((!UndefLower || !isUndefUpperHalf(Mask)) &&
15429         "Completely undef shuffle mask should have been simplified already");
15430
15431  // Upper half is undef and lower half is whole upper subvector.
15432  // e.g. vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
15433  MVT HalfVT = VT.getHalfNumVectorElementsVT();
15434  unsigned HalfNumElts = HalfVT.getVectorNumElements();
15435  if (!UndefLower &&
15436      isSequentialOrUndefInRange(Mask, 0, HalfNumElts, HalfNumElts)) {
15437    SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
15438                             DAG.getIntPtrConstant(HalfNumElts, DL));
15439    return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
15440                       DAG.getIntPtrConstant(0, DL));
15441  }
15442
15443  // Lower half is undef and upper half is whole lower subvector.
15444  // e.g. vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
15445  if (UndefLower &&
15446      isSequentialOrUndefInRange(Mask, HalfNumElts, HalfNumElts, 0)) {
15447    SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
15448                             DAG.getIntPtrConstant(0, DL));
15449    return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
15450                       DAG.getIntPtrConstant(HalfNumElts, DL));
15451  }
15452
15453  int HalfIdx1, HalfIdx2;
15454  SmallVector<int, 8> HalfMask(HalfNumElts);
15455  if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2))
15456    return SDValue();
15457
15458  assert(HalfMask.size() == HalfNumElts && "Unexpected shuffle mask length");
15459
15460  // Only shuffle the halves of the inputs when useful.
15461  unsigned NumLowerHalves =
15462      (HalfIdx1 == 0 || HalfIdx1 == 2) + (HalfIdx2 == 0 || HalfIdx2 == 2);
15463  unsigned NumUpperHalves =
15464      (HalfIdx1 == 1 || HalfIdx1 == 3) + (HalfIdx2 == 1 || HalfIdx2 == 3);
15465  assert(NumLowerHalves + NumUpperHalves <= 2 && "Only 1 or 2 halves allowed");
15466
15467  // Determine the larger pattern of undef/halves, then decide if it's worth
15468  // splitting the shuffle based on subtarget capabilities and types.
15469  unsigned EltWidth = VT.getVectorElementType().getSizeInBits();
15470  if (!UndefLower) {
15471    // XXXXuuuu: no insert is needed.
15472    // Always extract lowers when setting lower - these are all free subreg ops.
15473    if (NumUpperHalves == 0)
15474      return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
15475                                   UndefLower, DAG);
15476
15477    if (NumUpperHalves == 1) {
15478      // AVX2 has efficient 32/64-bit element cross-lane shuffles.
15479      if (Subtarget.hasAVX2()) {
15480        // extract128 + vunpckhps/vshufps, is better than vblend + vpermps.
15481        if (EltWidth == 32 && NumLowerHalves && HalfVT.is128BitVector() &&
15482            !is128BitUnpackShuffleMask(HalfMask) &&
15483            (!isSingleSHUFPSMask(HalfMask) ||
15484             Subtarget.hasFastVariableShuffle()))
15485          return SDValue();
15486        // If this is a unary shuffle (assume that the 2nd operand is
15487        // canonicalized to undef), then we can use vpermpd. Otherwise, we
15488        // are better off extracting the upper half of 1 operand and using a
15489        // narrow shuffle.
15490        if (EltWidth == 64 && V2.isUndef())
15491          return SDValue();
15492      }
15493      // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
15494      if (Subtarget.hasAVX512() && VT.is512BitVector())
15495        return SDValue();
15496      // Extract + narrow shuffle is better than the wide alternative.
15497      return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
15498                                   UndefLower, DAG);
15499    }
15500
15501    // Don't extract both uppers, instead shuffle and then extract.
15502    assert(NumUpperHalves == 2 && "Half vector count went wrong");
15503    return SDValue();
15504  }
15505
15506  // UndefLower - uuuuXXXX: an insert to high half is required if we split this.
15507  if (NumUpperHalves == 0) {
15508    // AVX2 has efficient 64-bit element cross-lane shuffles.
15509    // TODO: Refine to account for unary shuffle, splat, and other masks?
15510    if (Subtarget.hasAVX2() && EltWidth == 64)
15511      return SDValue();
15512    // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
15513    if (Subtarget.hasAVX512() && VT.is512BitVector())
15514      return SDValue();
15515    // Narrow shuffle + insert is better than the wide alternative.
15516    return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
15517                                 UndefLower, DAG);
15518  }
15519
15520  // NumUpperHalves != 0: don't bother with extract, shuffle, and then insert.
15521  return SDValue();
15522}
15523
15524/// Test whether the specified input (0 or 1) is in-place blended by the
15525/// given mask.
15526///
15527/// This returns true if the elements from a particular input are already in the
15528/// slot required by the given mask and require no permutation.
15529static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
15530  assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
15531  int Size = Mask.size();
15532  for (int i = 0; i < Size; ++i)
15533    if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
15534      return false;
15535
15536  return true;
15537}
15538
15539/// Handle case where shuffle sources are coming from the same 128-bit lane and
15540/// every lane can be represented as the same repeating mask - allowing us to
15541/// shuffle the sources with the repeating shuffle and then permute the result
15542/// to the destination lanes.
15543static SDValue lowerShuffleAsRepeatedMaskAndLanePermute(
15544    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
15545    const X86Subtarget &Subtarget, SelectionDAG &DAG) {
15546  int NumElts = VT.getVectorNumElements();
15547  int NumLanes = VT.getSizeInBits() / 128;
15548  int NumLaneElts = NumElts / NumLanes;
15549
15550  // On AVX2 we may be able to just shuffle the lowest elements and then
15551  // broadcast the result.
15552  if (Subtarget.hasAVX2()) {
15553    for (unsigned BroadcastSize : {16, 32, 64}) {
15554      if (BroadcastSize <= VT.getScalarSizeInBits())
15555        continue;
15556      int NumBroadcastElts = BroadcastSize / VT.getScalarSizeInBits();
15557
15558      // Attempt to match a repeating pattern every NumBroadcastElts,
15559      // accounting for UNDEFs but only references the lowest 128-bit
15560      // lane of the inputs.
15561      auto FindRepeatingBroadcastMask = [&](SmallVectorImpl<int> &RepeatMask) {
15562        for (int i = 0; i != NumElts; i += NumBroadcastElts)
15563          for (int j = 0; j != NumBroadcastElts; ++j) {
15564            int M = Mask[i + j];
15565            if (M < 0)
15566              continue;
15567            int &R = RepeatMask[j];
15568            if (0 != ((M % NumElts) / NumLaneElts))
15569              return false;
15570            if (0 <= R && R != M)
15571              return false;
15572            R = M;
15573          }
15574        return true;
15575      };
15576
15577      SmallVector<int, 8> RepeatMask((unsigned)NumElts, -1);
15578      if (!FindRepeatingBroadcastMask(RepeatMask))
15579        continue;
15580
15581      // Shuffle the (lowest) repeated elements in place for broadcast.
15582      SDValue RepeatShuf = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatMask);
15583
15584      // Shuffle the actual broadcast.
15585      SmallVector<int, 8> BroadcastMask((unsigned)NumElts, -1);
15586      for (int i = 0; i != NumElts; i += NumBroadcastElts)
15587        for (int j = 0; j != NumBroadcastElts; ++j)
15588          BroadcastMask[i + j] = j;
15589      return DAG.getVectorShuffle(VT, DL, RepeatShuf, DAG.getUNDEF(VT),
15590                                  BroadcastMask);
15591    }
15592  }
15593
15594  // Bail if the shuffle mask doesn't cross 128-bit lanes.
15595  if (!is128BitLaneCrossingShuffleMask(VT, Mask))
15596    return SDValue();
15597
15598  // Bail if we already have a repeated lane shuffle mask.
15599  SmallVector<int, 8> RepeatedShuffleMask;
15600  if (is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedShuffleMask))
15601    return SDValue();
15602
15603  // On AVX2 targets we can permute 256-bit vectors as 64-bit sub-lanes
15604  // (with PERMQ/PERMPD), otherwise we can only permute whole 128-bit lanes.
15605  int SubLaneScale = Subtarget.hasAVX2() && VT.is256BitVector() ? 2 : 1;
15606  int NumSubLanes = NumLanes * SubLaneScale;
15607  int NumSubLaneElts = NumLaneElts / SubLaneScale;
15608
15609  // Check that all the sources are coming from the same lane and see if we can
15610  // form a repeating shuffle mask (local to each sub-lane). At the same time,
15611  // determine the source sub-lane for each destination sub-lane.
15612  int TopSrcSubLane = -1;
15613  SmallVector<int, 8> Dst2SrcSubLanes((unsigned)NumSubLanes, -1);
15614  SmallVector<int, 8> RepeatedSubLaneMasks[2] = {
15615      SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef),
15616      SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef)};
15617
15618  for (int DstSubLane = 0; DstSubLane != NumSubLanes; ++DstSubLane) {
15619    // Extract the sub-lane mask, check that it all comes from the same lane
15620    // and normalize the mask entries to come from the first lane.
15621    int SrcLane = -1;
15622    SmallVector<int, 8> SubLaneMask((unsigned)NumSubLaneElts, -1);
15623    for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
15624      int M = Mask[(DstSubLane * NumSubLaneElts) + Elt];
15625      if (M < 0)
15626        continue;
15627      int Lane = (M % NumElts) / NumLaneElts;
15628      if ((0 <= SrcLane) && (SrcLane != Lane))
15629        return SDValue();
15630      SrcLane = Lane;
15631      int LocalM = (M % NumLaneElts) + (M < NumElts ? 0 : NumElts);
15632      SubLaneMask[Elt] = LocalM;
15633    }
15634
15635    // Whole sub-lane is UNDEF.
15636    if (SrcLane < 0)
15637      continue;
15638
15639    // Attempt to match against the candidate repeated sub-lane masks.
15640    for (int SubLane = 0; SubLane != SubLaneScale; ++SubLane) {
15641      auto MatchMasks = [NumSubLaneElts](ArrayRef<int> M1, ArrayRef<int> M2) {
15642        for (int i = 0; i != NumSubLaneElts; ++i) {
15643          if (M1[i] < 0 || M2[i] < 0)
15644            continue;
15645          if (M1[i] != M2[i])
15646            return false;
15647        }
15648        return true;
15649      };
15650
15651      auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane];
15652      if (!MatchMasks(SubLaneMask, RepeatedSubLaneMask))
15653        continue;
15654
15655      // Merge the sub-lane mask into the matching repeated sub-lane mask.
15656      for (int i = 0; i != NumSubLaneElts; ++i) {
15657        int M = SubLaneMask[i];
15658        if (M < 0)
15659          continue;
15660        assert((RepeatedSubLaneMask[i] < 0 || RepeatedSubLaneMask[i] == M) &&
15661               "Unexpected mask element");
15662        RepeatedSubLaneMask[i] = M;
15663      }
15664
15665      // Track the top most source sub-lane - by setting the remaining to UNDEF
15666      // we can greatly simplify shuffle matching.
15667      int SrcSubLane = (SrcLane * SubLaneScale) + SubLane;
15668      TopSrcSubLane = std::max(TopSrcSubLane, SrcSubLane);
15669      Dst2SrcSubLanes[DstSubLane] = SrcSubLane;
15670      break;
15671    }
15672
15673    // Bail if we failed to find a matching repeated sub-lane mask.
15674    if (Dst2SrcSubLanes[DstSubLane] < 0)
15675      return SDValue();
15676  }
15677  assert(0 <= TopSrcSubLane && TopSrcSubLane < NumSubLanes &&
15678         "Unexpected source lane");
15679
15680  // Create a repeating shuffle mask for the entire vector.
15681  SmallVector<int, 8> RepeatedMask((unsigned)NumElts, -1);
15682  for (int SubLane = 0; SubLane <= TopSrcSubLane; ++SubLane) {
15683    int Lane = SubLane / SubLaneScale;
15684    auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane % SubLaneScale];
15685    for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
15686      int M = RepeatedSubLaneMask[Elt];
15687      if (M < 0)
15688        continue;
15689      int Idx = (SubLane * NumSubLaneElts) + Elt;
15690      RepeatedMask[Idx] = M + (Lane * NumLaneElts);
15691    }
15692  }
15693  SDValue RepeatedShuffle = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatedMask);
15694
15695  // Shuffle each source sub-lane to its destination.
15696  SmallVector<int, 8> SubLaneMask((unsigned)NumElts, -1);
15697  for (int i = 0; i != NumElts; i += NumSubLaneElts) {
15698    int SrcSubLane = Dst2SrcSubLanes[i / NumSubLaneElts];
15699    if (SrcSubLane < 0)
15700      continue;
15701    for (int j = 0; j != NumSubLaneElts; ++j)
15702      SubLaneMask[i + j] = j + (SrcSubLane * NumSubLaneElts);
15703  }
15704
15705  return DAG.getVectorShuffle(VT, DL, RepeatedShuffle, DAG.getUNDEF(VT),
15706                              SubLaneMask);
15707}
15708
15709static bool matchShuffleWithSHUFPD(MVT VT, SDValue &V1, SDValue &V2,
15710                                   bool &ForceV1Zero, bool &ForceV2Zero,
15711                                   unsigned &ShuffleImm, ArrayRef<int> Mask,
15712                                   const APInt &Zeroable) {
15713  int NumElts = VT.getVectorNumElements();
15714  assert(VT.getScalarSizeInBits() == 64 &&
15715         (NumElts == 2 || NumElts == 4 || NumElts == 8) &&
15716         "Unexpected data type for VSHUFPD");
15717  assert(isUndefOrZeroOrInRange(Mask, 0, 2 * NumElts) &&
15718         "Illegal shuffle mask");
15719
15720  bool ZeroLane[2] = { true, true };
15721  for (int i = 0; i < NumElts; ++i)
15722    ZeroLane[i & 1] &= Zeroable[i];
15723
15724  // Mask for V8F64: 0/1,  8/9,  2/3,  10/11, 4/5, ..
15725  // Mask for V4F64; 0/1,  4/5,  2/3,  6/7..
15726  ShuffleImm = 0;
15727  bool ShufpdMask = true;
15728  bool CommutableMask = true;
15729  for (int i = 0; i < NumElts; ++i) {
15730    if (Mask[i] == SM_SentinelUndef || ZeroLane[i & 1])
15731      continue;
15732    if (Mask[i] < 0)
15733      return false;
15734    int Val = (i & 6) + NumElts * (i & 1);
15735    int CommutVal = (i & 0xe) + NumElts * ((i & 1) ^ 1);
15736    if (Mask[i] < Val || Mask[i] > Val + 1)
15737      ShufpdMask = false;
15738    if (Mask[i] < CommutVal || Mask[i] > CommutVal + 1)
15739      CommutableMask = false;
15740    ShuffleImm |= (Mask[i] % 2) << i;
15741  }
15742
15743  if (!ShufpdMask && !CommutableMask)
15744    return false;
15745
15746  if (!ShufpdMask && CommutableMask)
15747    std::swap(V1, V2);
15748
15749  ForceV1Zero = ZeroLane[0];
15750  ForceV2Zero = ZeroLane[1];
15751  return true;
15752}
15753
15754static SDValue lowerShuffleWithSHUFPD(const SDLoc &DL, MVT VT, SDValue V1,
15755                                      SDValue V2, ArrayRef<int> Mask,
15756                                      const APInt &Zeroable,
15757                                      const X86Subtarget &Subtarget,
15758                                      SelectionDAG &DAG) {
15759  assert((VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v8f64) &&
15760         "Unexpected data type for VSHUFPD");
15761
15762  unsigned Immediate = 0;
15763  bool ForceV1Zero = false, ForceV2Zero = false;
15764  if (!matchShuffleWithSHUFPD(VT, V1, V2, ForceV1Zero, ForceV2Zero, Immediate,
15765                              Mask, Zeroable))
15766    return SDValue();
15767
15768  // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
15769  if (ForceV1Zero)
15770    V1 = getZeroVector(VT, Subtarget, DAG, DL);
15771  if (ForceV2Zero)
15772    V2 = getZeroVector(VT, Subtarget, DAG, DL);
15773
15774  return DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
15775                     DAG.getTargetConstant(Immediate, DL, MVT::i8));
15776}
15777
15778// Look for {0, 8, 16, 24, 32, 40, 48, 56 } in the first 8 elements. Followed
15779// by zeroable elements in the remaining 24 elements. Turn this into two
15780// vmovqb instructions shuffled together.
15781static SDValue lowerShuffleAsVTRUNCAndUnpack(const SDLoc &DL, MVT VT,
15782                                             SDValue V1, SDValue V2,
15783                                             ArrayRef<int> Mask,
15784                                             const APInt &Zeroable,
15785                                             SelectionDAG &DAG) {
15786  assert(VT == MVT::v32i8 && "Unexpected type!");
15787
15788  // The first 8 indices should be every 8th element.
15789  if (!isSequentialOrUndefInRange(Mask, 0, 8, 0, 8))
15790    return SDValue();
15791
15792  // Remaining elements need to be zeroable.
15793  if (Zeroable.countLeadingOnes() < (Mask.size() - 8))
15794    return SDValue();
15795
15796  V1 = DAG.getBitcast(MVT::v4i64, V1);
15797  V2 = DAG.getBitcast(MVT::v4i64, V2);
15798
15799  V1 = DAG.getNode(X86ISD::VTRUNC, DL, MVT::v16i8, V1);
15800  V2 = DAG.getNode(X86ISD::VTRUNC, DL, MVT::v16i8, V2);
15801
15802  // The VTRUNCs will put 0s in the upper 12 bytes. Use them to put zeroes in
15803  // the upper bits of the result using an unpckldq.
15804  SDValue Unpack = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2,
15805                                        { 0, 1, 2, 3, 16, 17, 18, 19,
15806                                          4, 5, 6, 7, 20, 21, 22, 23 });
15807  // Insert the unpckldq into a zero vector to widen to v32i8.
15808  return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v32i8,
15809                     DAG.getConstant(0, DL, MVT::v32i8), Unpack,
15810                     DAG.getIntPtrConstant(0, DL));
15811}
15812
15813
15814/// Handle lowering of 4-lane 64-bit floating point shuffles.
15815///
15816/// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
15817/// isn't available.
15818static SDValue lowerV4F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15819                                 const APInt &Zeroable, SDValue V1, SDValue V2,
15820                                 const X86Subtarget &Subtarget,
15821                                 SelectionDAG &DAG) {
15822  assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
15823  assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
15824  assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
15825
15826  if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4f64, V1, V2, Mask, Zeroable,
15827                                     Subtarget, DAG))
15828    return V;
15829
15830  if (V2.isUndef()) {
15831    // Check for being able to broadcast a single element.
15832    if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f64, V1, V2,
15833                                                    Mask, Subtarget, DAG))
15834      return Broadcast;
15835
15836    // Use low duplicate instructions for masks that match their pattern.
15837    if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
15838      return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
15839
15840    if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
15841      // Non-half-crossing single input shuffles can be lowered with an
15842      // interleaved permutation.
15843      unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
15844                              ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
15845      return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
15846                         DAG.getTargetConstant(VPERMILPMask, DL, MVT::i8));
15847    }
15848
15849    // With AVX2 we have direct support for this permutation.
15850    if (Subtarget.hasAVX2())
15851      return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
15852                         getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
15853
15854    // Try to create an in-lane repeating shuffle mask and then shuffle the
15855    // results into the target lanes.
15856    if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15857            DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
15858      return V;
15859
15860    // Try to permute the lanes and then use a per-lane permute.
15861    if (SDValue V = lowerShuffleAsLanePermuteAndPermute(DL, MVT::v4f64, V1, V2,
15862                                                        Mask, DAG, Subtarget))
15863      return V;
15864
15865    // Otherwise, fall back.
15866    return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v4f64, V1, V2, Mask,
15867                                               DAG, Subtarget);
15868  }
15869
15870  // Use dedicated unpack instructions for masks that match their pattern.
15871  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f64, Mask, V1, V2, DAG))
15872    return V;
15873
15874  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
15875                                          Zeroable, Subtarget, DAG))
15876    return Blend;
15877
15878  // Check if the blend happens to exactly fit that of SHUFPD.
15879  if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v4f64, V1, V2, Mask,
15880                                          Zeroable, Subtarget, DAG))
15881    return Op;
15882
15883  // If we have lane crossing shuffles AND they don't all come from the lower
15884  // lane elements, lower to SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
15885  // TODO: Handle BUILD_VECTOR sources which getVectorShuffle currently
15886  // canonicalize to a blend of splat which isn't necessary for this combine.
15887  if (is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask) &&
15888      !all_of(Mask, [](int M) { return M < 2 || (4 <= M && M < 6); }) &&
15889      (V1.getOpcode() != ISD::BUILD_VECTOR) &&
15890      (V2.getOpcode() != ISD::BUILD_VECTOR))
15891    if (SDValue Op = lowerShuffleAsLanePermuteAndSHUFP(DL, MVT::v4f64, V1, V2,
15892                                                       Mask, DAG))
15893      return Op;
15894
15895  // If we have one input in place, then we can permute the other input and
15896  // blend the result.
15897  if (isShuffleMaskInputInPlace(0, Mask) || isShuffleMaskInputInPlace(1, Mask))
15898    return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2, Mask,
15899                                                Subtarget, DAG);
15900
15901  // Try to create an in-lane repeating shuffle mask and then shuffle the
15902  // results into the target lanes.
15903  if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15904          DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
15905    return V;
15906
15907  // Try to simplify this by merging 128-bit lanes to enable a lane-based
15908  // shuffle. However, if we have AVX2 and either inputs are already in place,
15909  // we will be able to shuffle even across lanes the other input in a single
15910  // instruction so skip this pattern.
15911  if (!(Subtarget.hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
15912                                isShuffleMaskInputInPlace(1, Mask))))
15913    if (SDValue V = lowerShuffleAsLanePermuteAndRepeatedMask(
15914            DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
15915      return V;
15916
15917  // If we have VLX support, we can use VEXPAND.
15918  if (Subtarget.hasVLX())
15919    if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4f64, Zeroable, Mask, V1, V2,
15920                                         DAG, Subtarget))
15921      return V;
15922
15923  // If we have AVX2 then we always want to lower with a blend because an v4 we
15924  // can fully permute the elements.
15925  if (Subtarget.hasAVX2())
15926    return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2, Mask,
15927                                                Subtarget, DAG);
15928
15929  // Otherwise fall back on generic lowering.
15930  return lowerShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask,
15931                                    Subtarget, DAG);
15932}
15933
15934/// Handle lowering of 4-lane 64-bit integer shuffles.
15935///
15936/// This routine is only called when we have AVX2 and thus a reasonable
15937/// instruction set for v4i64 shuffling..
15938static SDValue lowerV4I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15939                                 const APInt &Zeroable, SDValue V1, SDValue V2,
15940                                 const X86Subtarget &Subtarget,
15941                                 SelectionDAG &DAG) {
15942  assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
15943  assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
15944  assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
15945  assert(Subtarget.hasAVX2() && "We can only lower v4i64 with AVX2!");
15946
15947  if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4i64, V1, V2, Mask, Zeroable,
15948                                     Subtarget, DAG))
15949    return V;
15950
15951  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
15952                                          Zeroable, Subtarget, DAG))
15953    return Blend;
15954
15955  // Check for being able to broadcast a single element.
15956  if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i64, V1, V2, Mask,
15957                                                  Subtarget, DAG))
15958    return Broadcast;
15959
15960  if (V2.isUndef()) {
15961    // When the shuffle is mirrored between the 128-bit lanes of the unit, we
15962    // can use lower latency instructions that will operate on both lanes.
15963    SmallVector<int, 2> RepeatedMask;
15964    if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
15965      SmallVector<int, 4> PSHUFDMask;
15966      scaleShuffleMask<int>(2, RepeatedMask, PSHUFDMask);
15967      return DAG.getBitcast(
15968          MVT::v4i64,
15969          DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
15970                      DAG.getBitcast(MVT::v8i32, V1),
15971                      getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
15972    }
15973
15974    // AVX2 provides a direct instruction for permuting a single input across
15975    // lanes.
15976    return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
15977                       getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
15978  }
15979
15980  // Try to use shift instructions.
15981  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask,
15982                                          Zeroable, Subtarget, DAG))
15983    return Shift;
15984
15985  // If we have VLX support, we can use VALIGN or VEXPAND.
15986  if (Subtarget.hasVLX()) {
15987    if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v4i64, V1, V2, Mask,
15988                                              Subtarget, DAG))
15989      return Rotate;
15990
15991    if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4i64, Zeroable, Mask, V1, V2,
15992                                         DAG, Subtarget))
15993      return V;
15994  }
15995
15996  // Try to use PALIGNR.
15997  if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i64, V1, V2, Mask,
15998                                                Subtarget, DAG))
15999    return Rotate;
16000
16001  // Use dedicated unpack instructions for masks that match their pattern.
16002  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i64, Mask, V1, V2, DAG))
16003    return V;
16004
16005  // If we have one input in place, then we can permute the other input and
16006  // blend the result.
16007  if (isShuffleMaskInputInPlace(0, Mask) || isShuffleMaskInputInPlace(1, Mask))
16008    return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2, Mask,
16009                                                Subtarget, DAG);
16010
16011  // Try to create an in-lane repeating shuffle mask and then shuffle the
16012  // results into the target lanes.
16013  if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16014          DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
16015    return V;
16016
16017  // Try to simplify this by merging 128-bit lanes to enable a lane-based
16018  // shuffle. However, if we have AVX2 and either inputs are already in place,
16019  // we will be able to shuffle even across lanes the other input in a single
16020  // instruction so skip this pattern.
16021  if (!isShuffleMaskInputInPlace(0, Mask) &&
16022      !isShuffleMaskInputInPlace(1, Mask))
16023    if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16024            DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
16025      return Result;
16026
16027  // Otherwise fall back on generic blend lowering.
16028  return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2, Mask,
16029                                              Subtarget, DAG);
16030}
16031
16032/// Handle lowering of 8-lane 32-bit floating point shuffles.
16033///
16034/// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
16035/// isn't available.
16036static SDValue lowerV8F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16037                                 const APInt &Zeroable, SDValue V1, SDValue V2,
16038                                 const X86Subtarget &Subtarget,
16039                                 SelectionDAG &DAG) {
16040  assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
16041  assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
16042  assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16043
16044  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
16045                                          Zeroable, Subtarget, DAG))
16046    return Blend;
16047
16048  // Check for being able to broadcast a single element.
16049  if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8f32, V1, V2, Mask,
16050                                                  Subtarget, DAG))
16051    return Broadcast;
16052
16053  // If the shuffle mask is repeated in each 128-bit lane, we have many more
16054  // options to efficiently lower the shuffle.
16055  SmallVector<int, 4> RepeatedMask;
16056  if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
16057    assert(RepeatedMask.size() == 4 &&
16058           "Repeated masks must be half the mask width!");
16059
16060    // Use even/odd duplicate instructions for masks that match their pattern.
16061    if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2}))
16062      return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
16063    if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3}))
16064      return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
16065
16066    if (V2.isUndef())
16067      return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
16068                         getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16069
16070    // Use dedicated unpack instructions for masks that match their pattern.
16071    if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8f32, Mask, V1, V2, DAG))
16072      return V;
16073
16074    // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
16075    // have already handled any direct blends.
16076    return lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
16077  }
16078
16079  // Try to create an in-lane repeating shuffle mask and then shuffle the
16080  // results into the target lanes.
16081  if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16082          DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
16083    return V;
16084
16085  // If we have a single input shuffle with different shuffle patterns in the
16086  // two 128-bit lanes use the variable mask to VPERMILPS.
16087  if (V2.isUndef()) {
16088    SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
16089    if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
16090      return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, V1, VPermMask);
16091
16092    if (Subtarget.hasAVX2())
16093      return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32, VPermMask, V1);
16094
16095    // Otherwise, fall back.
16096    return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v8f32, V1, V2, Mask,
16097                                               DAG, Subtarget);
16098  }
16099
16100  // Try to simplify this by merging 128-bit lanes to enable a lane-based
16101  // shuffle.
16102  if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16103          DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
16104    return Result;
16105
16106  // If we have VLX support, we can use VEXPAND.
16107  if (Subtarget.hasVLX())
16108    if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f32, Zeroable, Mask, V1, V2,
16109                                         DAG, Subtarget))
16110      return V;
16111
16112  // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
16113  // since after split we get a more efficient code using vpunpcklwd and
16114  // vpunpckhwd instrs than vblend.
16115  if (!Subtarget.hasAVX512() && isUnpackWdShuffleMask(Mask, MVT::v8f32))
16116    if (SDValue V = lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask,
16117                                               Subtarget, DAG))
16118      return V;
16119
16120  // If we have AVX2 then we always want to lower with a blend because at v8 we
16121  // can fully permute the elements.
16122  if (Subtarget.hasAVX2())
16123    return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2, Mask,
16124                                                Subtarget, DAG);
16125
16126  // Otherwise fall back on generic lowering.
16127  return lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask,
16128                                    Subtarget, DAG);
16129}
16130
16131/// Handle lowering of 8-lane 32-bit integer shuffles.
16132///
16133/// This routine is only called when we have AVX2 and thus a reasonable
16134/// instruction set for v8i32 shuffling..
16135static SDValue lowerV8I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16136                                 const APInt &Zeroable, SDValue V1, SDValue V2,
16137                                 const X86Subtarget &Subtarget,
16138                                 SelectionDAG &DAG) {
16139  assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
16140  assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
16141  assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16142  assert(Subtarget.hasAVX2() && "We can only lower v8i32 with AVX2!");
16143
16144  // Whenever we can lower this as a zext, that instruction is strictly faster
16145  // than any alternative. It also allows us to fold memory operands into the
16146  // shuffle in many cases.
16147  if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2, Mask,
16148                                                   Zeroable, Subtarget, DAG))
16149    return ZExt;
16150
16151  // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
16152  // since after split we get a more efficient code than vblend by using
16153  // vpunpcklwd and vpunpckhwd instrs.
16154  if (isUnpackWdShuffleMask(Mask, MVT::v8i32) && !V2.isUndef() &&
16155      !Subtarget.hasAVX512())
16156    if (SDValue V = lowerShuffleAsSplitOrBlend(DL, MVT::v8i32, V1, V2, Mask,
16157                                               Subtarget, DAG))
16158      return V;
16159
16160  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
16161                                          Zeroable, Subtarget, DAG))
16162    return Blend;
16163
16164  // Check for being able to broadcast a single element.
16165  if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i32, V1, V2, Mask,
16166                                                  Subtarget, DAG))
16167    return Broadcast;
16168
16169  // If the shuffle mask is repeated in each 128-bit lane we can use more
16170  // efficient instructions that mirror the shuffles across the two 128-bit
16171  // lanes.
16172  SmallVector<int, 4> RepeatedMask;
16173  bool Is128BitLaneRepeatedShuffle =
16174      is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask);
16175  if (Is128BitLaneRepeatedShuffle) {
16176    assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
16177    if (V2.isUndef())
16178      return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
16179                         getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16180
16181    // Use dedicated unpack instructions for masks that match their pattern.
16182    if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i32, Mask, V1, V2, DAG))
16183      return V;
16184  }
16185
16186  // Try to use shift instructions.
16187  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask,
16188                                          Zeroable, Subtarget, DAG))
16189    return Shift;
16190
16191  // If we have VLX support, we can use VALIGN or EXPAND.
16192  if (Subtarget.hasVLX()) {
16193    if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v8i32, V1, V2, Mask,
16194                                              Subtarget, DAG))
16195      return Rotate;
16196
16197    if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i32, Zeroable, Mask, V1, V2,
16198                                         DAG, Subtarget))
16199      return V;
16200  }
16201
16202  // Try to use byte rotation instructions.
16203  if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i32, V1, V2, Mask,
16204                                                Subtarget, DAG))
16205    return Rotate;
16206
16207  // Try to create an in-lane repeating shuffle mask and then shuffle the
16208  // results into the target lanes.
16209  if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16210          DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
16211    return V;
16212
16213  // If the shuffle patterns aren't repeated but it is a single input, directly
16214  // generate a cross-lane VPERMD instruction.
16215  if (V2.isUndef()) {
16216    SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
16217    return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8i32, VPermMask, V1);
16218  }
16219
16220  // Assume that a single SHUFPS is faster than an alternative sequence of
16221  // multiple instructions (even if the CPU has a domain penalty).
16222  // If some CPU is harmed by the domain switch, we can fix it in a later pass.
16223  if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
16224    SDValue CastV1 = DAG.getBitcast(MVT::v8f32, V1);
16225    SDValue CastV2 = DAG.getBitcast(MVT::v8f32, V2);
16226    SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask,
16227                                            CastV1, CastV2, DAG);
16228    return DAG.getBitcast(MVT::v8i32, ShufPS);
16229  }
16230
16231  // Try to simplify this by merging 128-bit lanes to enable a lane-based
16232  // shuffle.
16233  if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16234          DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
16235    return Result;
16236
16237  // Otherwise fall back on generic blend lowering.
16238  return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2, Mask,
16239                                              Subtarget, DAG);
16240}
16241
16242/// Handle lowering of 16-lane 16-bit integer shuffles.
16243///
16244/// This routine is only called when we have AVX2 and thus a reasonable
16245/// instruction set for v16i16 shuffling..
16246static SDValue lowerV16I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16247                                  const APInt &Zeroable, SDValue V1, SDValue V2,
16248                                  const X86Subtarget &Subtarget,
16249                                  SelectionDAG &DAG) {
16250  assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
16251  assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
16252  assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
16253  assert(Subtarget.hasAVX2() && "We can only lower v16i16 with AVX2!");
16254
16255  // Whenever we can lower this as a zext, that instruction is strictly faster
16256  // than any alternative. It also allows us to fold memory operands into the
16257  // shuffle in many cases.
16258  if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16259          DL, MVT::v16i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
16260    return ZExt;
16261
16262  // Check for being able to broadcast a single element.
16263  if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i16, V1, V2, Mask,
16264                                                  Subtarget, DAG))
16265    return Broadcast;
16266
16267  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
16268                                          Zeroable, Subtarget, DAG))
16269    return Blend;
16270
16271  // Use dedicated unpack instructions for masks that match their pattern.
16272  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i16, Mask, V1, V2, DAG))
16273    return V;
16274
16275  // Use dedicated pack instructions for masks that match their pattern.
16276  if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i16, Mask, V1, V2, DAG,
16277                                       Subtarget))
16278    return V;
16279
16280  // Try to use shift instructions.
16281  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask,
16282                                          Zeroable, Subtarget, DAG))
16283    return Shift;
16284
16285  // Try to use byte rotation instructions.
16286  if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i16, V1, V2, Mask,
16287                                                Subtarget, DAG))
16288    return Rotate;
16289
16290  // Try to create an in-lane repeating shuffle mask and then shuffle the
16291  // results into the target lanes.
16292  if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16293          DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
16294    return V;
16295
16296  if (V2.isUndef()) {
16297    // There are no generalized cross-lane shuffle operations available on i16
16298    // element types.
16299    if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask)) {
16300      if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16301              DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
16302        return V;
16303
16304      return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v16i16, V1, V2, Mask,
16305                                                 DAG, Subtarget);
16306    }
16307
16308    SmallVector<int, 8> RepeatedMask;
16309    if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
16310      // As this is a single-input shuffle, the repeated mask should be
16311      // a strictly valid v8i16 mask that we can pass through to the v8i16
16312      // lowering to handle even the v16 case.
16313      return lowerV8I16GeneralSingleInputShuffle(
16314          DL, MVT::v16i16, V1, RepeatedMask, Subtarget, DAG);
16315    }
16316  }
16317
16318  if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v16i16, Mask, V1, V2,
16319                                              Zeroable, Subtarget, DAG))
16320    return PSHUFB;
16321
16322  // AVX512BWVL can lower to VPERMW.
16323  if (Subtarget.hasBWI() && Subtarget.hasVLX())
16324    return lowerShuffleWithPERMV(DL, MVT::v16i16, Mask, V1, V2, DAG);
16325
16326  // Try to simplify this by merging 128-bit lanes to enable a lane-based
16327  // shuffle.
16328  if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16329          DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
16330    return Result;
16331
16332  // Try to permute the lanes and then use a per-lane permute.
16333  if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16334          DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
16335    return V;
16336
16337  // Otherwise fall back on generic lowering.
16338  return lowerShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask,
16339                                    Subtarget, DAG);
16340}
16341
16342/// Handle lowering of 32-lane 8-bit integer shuffles.
16343///
16344/// This routine is only called when we have AVX2 and thus a reasonable
16345/// instruction set for v32i8 shuffling..
16346static SDValue lowerV32I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16347                                 const APInt &Zeroable, SDValue V1, SDValue V2,
16348                                 const X86Subtarget &Subtarget,
16349                                 SelectionDAG &DAG) {
16350  assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
16351  assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
16352  assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
16353  assert(Subtarget.hasAVX2() && "We can only lower v32i8 with AVX2!");
16354
16355  // Whenever we can lower this as a zext, that instruction is strictly faster
16356  // than any alternative. It also allows us to fold memory operands into the
16357  // shuffle in many cases.
16358  if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2, Mask,
16359                                                   Zeroable, Subtarget, DAG))
16360    return ZExt;
16361
16362  // Check for being able to broadcast a single element.
16363  if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v32i8, V1, V2, Mask,
16364                                                  Subtarget, DAG))
16365    return Broadcast;
16366
16367  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
16368                                          Zeroable, Subtarget, DAG))
16369    return Blend;
16370
16371  // Use dedicated unpack instructions for masks that match their pattern.
16372  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i8, Mask, V1, V2, DAG))
16373    return V;
16374
16375  // Use dedicated pack instructions for masks that match their pattern.
16376  if (SDValue V = lowerShuffleWithPACK(DL, MVT::v32i8, Mask, V1, V2, DAG,
16377                                       Subtarget))
16378    return V;
16379
16380  // Try to use shift instructions.
16381  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask,
16382                                                Zeroable, Subtarget, DAG))
16383    return Shift;
16384
16385  // Try to use byte rotation instructions.
16386  if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i8, V1, V2, Mask,
16387                                                Subtarget, DAG))
16388    return Rotate;
16389
16390  // Try to create an in-lane repeating shuffle mask and then shuffle the
16391  // results into the target lanes.
16392  if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16393          DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
16394    return V;
16395
16396  // There are no generalized cross-lane shuffle operations available on i8
16397  // element types.
16398  if (V2.isUndef() && is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask)) {
16399    if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16400            DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
16401      return V;
16402
16403    return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v32i8, V1, V2, Mask,
16404                                               DAG, Subtarget);
16405  }
16406
16407  if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i8, Mask, V1, V2,
16408                                              Zeroable, Subtarget, DAG))
16409    return PSHUFB;
16410
16411  // AVX512VBMIVL can lower to VPERMB.
16412  if (Subtarget.hasVBMI() && Subtarget.hasVLX())
16413    return lowerShuffleWithPERMV(DL, MVT::v32i8, Mask, V1, V2, DAG);
16414
16415  // Try to simplify this by merging 128-bit lanes to enable a lane-based
16416  // shuffle.
16417  if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16418          DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
16419    return Result;
16420
16421  // Try to permute the lanes and then use a per-lane permute.
16422  if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16423          DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
16424    return V;
16425
16426  // Look for {0, 8, 16, 24, 32, 40, 48, 56 } in the first 8 elements. Followed
16427  // by zeroable elements in the remaining 24 elements. Turn this into two
16428  // vmovqb instructions shuffled together.
16429  if (Subtarget.hasVLX())
16430    if (SDValue V = lowerShuffleAsVTRUNCAndUnpack(DL, MVT::v32i8, V1, V2,
16431                                                  Mask, Zeroable, DAG))
16432      return V;
16433
16434  // Otherwise fall back on generic lowering.
16435  return lowerShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask,
16436                                    Subtarget, DAG);
16437}
16438
16439/// High-level routine to lower various 256-bit x86 vector shuffles.
16440///
16441/// This routine either breaks down the specific type of a 256-bit x86 vector
16442/// shuffle or splits it into two 128-bit shuffles and fuses the results back
16443/// together based on the available instructions.
16444static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
16445                                  SDValue V1, SDValue V2, const APInt &Zeroable,
16446                                  const X86Subtarget &Subtarget,
16447                                  SelectionDAG &DAG) {
16448  // If we have a single input to the zero element, insert that into V1 if we
16449  // can do so cheaply.
16450  int NumElts = VT.getVectorNumElements();
16451  int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
16452
16453  if (NumV2Elements == 1 && Mask[0] >= NumElts)
16454    if (SDValue Insertion = lowerShuffleAsElementInsertion(
16455            DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
16456      return Insertion;
16457
16458  // Handle special cases where the lower or upper half is UNDEF.
16459  if (SDValue V =
16460          lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
16461    return V;
16462
16463  // There is a really nice hard cut-over between AVX1 and AVX2 that means we
16464  // can check for those subtargets here and avoid much of the subtarget
16465  // querying in the per-vector-type lowering routines. With AVX1 we have
16466  // essentially *zero* ability to manipulate a 256-bit vector with integer
16467  // types. Since we'll use floating point types there eventually, just
16468  // immediately cast everything to a float and operate entirely in that domain.
16469  if (VT.isInteger() && !Subtarget.hasAVX2()) {
16470    int ElementBits = VT.getScalarSizeInBits();
16471    if (ElementBits < 32) {
16472      // No floating point type available, if we can't use the bit operations
16473      // for masking/blending then decompose into 128-bit vectors.
16474      if (SDValue V = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
16475                                            Subtarget, DAG))
16476        return V;
16477      if (SDValue V = lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
16478        return V;
16479      return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
16480    }
16481
16482    MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
16483                                VT.getVectorNumElements());
16484    V1 = DAG.getBitcast(FpVT, V1);
16485    V2 = DAG.getBitcast(FpVT, V2);
16486    return DAG.getBitcast(VT, DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
16487  }
16488
16489  switch (VT.SimpleTy) {
16490  case MVT::v4f64:
16491    return lowerV4F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16492  case MVT::v4i64:
16493    return lowerV4I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16494  case MVT::v8f32:
16495    return lowerV8F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16496  case MVT::v8i32:
16497    return lowerV8I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16498  case MVT::v16i16:
16499    return lowerV16I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16500  case MVT::v32i8:
16501    return lowerV32I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16502
16503  default:
16504    llvm_unreachable("Not a valid 256-bit x86 vector type!");
16505  }
16506}
16507
16508/// Try to lower a vector shuffle as a 128-bit shuffles.
16509static SDValue lowerV4X128Shuffle(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
16510                                  const APInt &Zeroable, SDValue V1, SDValue V2,
16511                                  const X86Subtarget &Subtarget,
16512                                  SelectionDAG &DAG) {
16513  assert(VT.getScalarSizeInBits() == 64 &&
16514         "Unexpected element type size for 128bit shuffle.");
16515
16516  // To handle 256 bit vector requires VLX and most probably
16517  // function lowerV2X128VectorShuffle() is better solution.
16518  assert(VT.is512BitVector() && "Unexpected vector size for 512bit shuffle.");
16519
16520  // TODO - use Zeroable like we do for lowerV2X128VectorShuffle?
16521  SmallVector<int, 4> WidenedMask;
16522  if (!canWidenShuffleElements(Mask, WidenedMask))
16523    return SDValue();
16524
16525  // Try to use an insert into a zero vector.
16526  if (WidenedMask[0] == 0 && (Zeroable & 0xf0) == 0xf0 &&
16527      (WidenedMask[1] == 1 || (Zeroable & 0x0c) == 0x0c)) {
16528    unsigned NumElts = ((Zeroable & 0x0c) == 0x0c) ? 2 : 4;
16529    MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
16530    SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
16531                              DAG.getIntPtrConstant(0, DL));
16532    return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
16533                       getZeroVector(VT, Subtarget, DAG, DL), LoV,
16534                       DAG.getIntPtrConstant(0, DL));
16535  }
16536
16537  // Check for patterns which can be matched with a single insert of a 256-bit
16538  // subvector.
16539  bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask,
16540                                        {0, 1, 2, 3, 0, 1, 2, 3});
16541  if (OnlyUsesV1 || isShuffleEquivalent(V1, V2, Mask,
16542                                        {0, 1, 2, 3, 8, 9, 10, 11})) {
16543    MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 4);
16544    SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
16545                                 OnlyUsesV1 ? V1 : V2,
16546                              DAG.getIntPtrConstant(0, DL));
16547    return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
16548                       DAG.getIntPtrConstant(4, DL));
16549  }
16550
16551  assert(WidenedMask.size() == 4);
16552
16553  // See if this is an insertion of the lower 128-bits of V2 into V1.
16554  bool IsInsert = true;
16555  int V2Index = -1;
16556  for (int i = 0; i < 4; ++i) {
16557    assert(WidenedMask[i] >= -1);
16558    if (WidenedMask[i] < 0)
16559      continue;
16560
16561    // Make sure all V1 subvectors are in place.
16562    if (WidenedMask[i] < 4) {
16563      if (WidenedMask[i] != i) {
16564        IsInsert = false;
16565        break;
16566      }
16567    } else {
16568      // Make sure we only have a single V2 index and its the lowest 128-bits.
16569      if (V2Index >= 0 || WidenedMask[i] != 4) {
16570        IsInsert = false;
16571        break;
16572      }
16573      V2Index = i;
16574    }
16575  }
16576  if (IsInsert && V2Index >= 0) {
16577    MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
16578    SDValue Subvec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
16579                                 DAG.getIntPtrConstant(0, DL));
16580    return insert128BitVector(V1, Subvec, V2Index * 2, DAG, DL);
16581  }
16582
16583  // Try to lower to vshuf64x2/vshuf32x4.
16584  SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
16585  unsigned PermMask = 0;
16586  // Insure elements came from the same Op.
16587  for (int i = 0; i < 4; ++i) {
16588    assert(WidenedMask[i] >= -1);
16589    if (WidenedMask[i] < 0)
16590      continue;
16591
16592    SDValue Op = WidenedMask[i] >= 4 ? V2 : V1;
16593    unsigned OpIndex = i / 2;
16594    if (Ops[OpIndex].isUndef())
16595      Ops[OpIndex] = Op;
16596    else if (Ops[OpIndex] != Op)
16597      return SDValue();
16598
16599    // Convert the 128-bit shuffle mask selection values into 128-bit selection
16600    // bits defined by a vshuf64x2 instruction's immediate control byte.
16601    PermMask |= (WidenedMask[i] % 4) << (i * 2);
16602  }
16603
16604  return DAG.getNode(X86ISD::SHUF128, DL, VT, Ops[0], Ops[1],
16605                     DAG.getTargetConstant(PermMask, DL, MVT::i8));
16606}
16607
16608/// Handle lowering of 8-lane 64-bit floating point shuffles.
16609static SDValue lowerV8F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16610                                 const APInt &Zeroable, SDValue V1, SDValue V2,
16611                                 const X86Subtarget &Subtarget,
16612                                 SelectionDAG &DAG) {
16613  assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
16614  assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
16615  assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16616
16617  if (V2.isUndef()) {
16618    // Use low duplicate instructions for masks that match their pattern.
16619    if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2, 4, 4, 6, 6}))
16620      return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v8f64, V1);
16621
16622    if (!is128BitLaneCrossingShuffleMask(MVT::v8f64, Mask)) {
16623      // Non-half-crossing single input shuffles can be lowered with an
16624      // interleaved permutation.
16625      unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
16626                              ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3) |
16627                              ((Mask[4] == 5) << 4) | ((Mask[5] == 5) << 5) |
16628                              ((Mask[6] == 7) << 6) | ((Mask[7] == 7) << 7);
16629      return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f64, V1,
16630                         DAG.getTargetConstant(VPERMILPMask, DL, MVT::i8));
16631    }
16632
16633    SmallVector<int, 4> RepeatedMask;
16634    if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask))
16635      return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8f64, V1,
16636                         getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16637  }
16638
16639  if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8f64, Mask, Zeroable, V1,
16640                                           V2, Subtarget, DAG))
16641    return Shuf128;
16642
16643  if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8f64, Mask, V1, V2, DAG))
16644    return Unpck;
16645
16646  // Check if the blend happens to exactly fit that of SHUFPD.
16647  if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v8f64, V1, V2, Mask,
16648                                          Zeroable, Subtarget, DAG))
16649    return Op;
16650
16651  if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f64, Zeroable, Mask, V1, V2,
16652                                       DAG, Subtarget))
16653    return V;
16654
16655  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f64, V1, V2, Mask,
16656                                          Zeroable, Subtarget, DAG))
16657    return Blend;
16658
16659  return lowerShuffleWithPERMV(DL, MVT::v8f64, Mask, V1, V2, DAG);
16660}
16661
16662/// Handle lowering of 16-lane 32-bit floating point shuffles.
16663static SDValue lowerV16F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16664                                  const APInt &Zeroable, SDValue V1, SDValue V2,
16665                                  const X86Subtarget &Subtarget,
16666                                  SelectionDAG &DAG) {
16667  assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
16668  assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
16669  assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
16670
16671  // If the shuffle mask is repeated in each 128-bit lane, we have many more
16672  // options to efficiently lower the shuffle.
16673  SmallVector<int, 4> RepeatedMask;
16674  if (is128BitLaneRepeatedShuffleMask(MVT::v16f32, Mask, RepeatedMask)) {
16675    assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
16676
16677    // Use even/odd duplicate instructions for masks that match their pattern.
16678    if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2}))
16679      return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v16f32, V1);
16680    if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3}))
16681      return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v16f32, V1);
16682
16683    if (V2.isUndef())
16684      return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v16f32, V1,
16685                         getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16686
16687    // Use dedicated unpack instructions for masks that match their pattern.
16688    if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16f32, Mask, V1, V2, DAG))
16689      return V;
16690
16691    if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16f32, V1, V2, Mask,
16692                                            Zeroable, Subtarget, DAG))
16693      return Blend;
16694
16695    // Otherwise, fall back to a SHUFPS sequence.
16696    return lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask, V1, V2, DAG);
16697  }
16698
16699  // If we have a single input shuffle with different shuffle patterns in the
16700  // 128-bit lanes and don't lane cross, use variable mask VPERMILPS.
16701  if (V2.isUndef() &&
16702      !is128BitLaneCrossingShuffleMask(MVT::v16f32, Mask)) {
16703    SDValue VPermMask = getConstVector(Mask, MVT::v16i32, DAG, DL, true);
16704    return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v16f32, V1, VPermMask);
16705  }
16706
16707  // If we have AVX512F support, we can use VEXPAND.
16708  if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16f32, Zeroable, Mask,
16709                                             V1, V2, DAG, Subtarget))
16710    return V;
16711
16712  return lowerShuffleWithPERMV(DL, MVT::v16f32, Mask, V1, V2, DAG);
16713}
16714
16715/// Handle lowering of 8-lane 64-bit integer shuffles.
16716static SDValue lowerV8I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16717                                 const APInt &Zeroable, SDValue V1, SDValue V2,
16718                                 const X86Subtarget &Subtarget,
16719                                 SelectionDAG &DAG) {
16720  assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
16721  assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
16722  assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16723
16724  if (V2.isUndef()) {
16725    // When the shuffle is mirrored between the 128-bit lanes of the unit, we
16726    // can use lower latency instructions that will operate on all four
16727    // 128-bit lanes.
16728    SmallVector<int, 2> Repeated128Mask;
16729    if (is128BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated128Mask)) {
16730      SmallVector<int, 4> PSHUFDMask;
16731      scaleShuffleMask<int>(2, Repeated128Mask, PSHUFDMask);
16732      return DAG.getBitcast(
16733          MVT::v8i64,
16734          DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32,
16735                      DAG.getBitcast(MVT::v16i32, V1),
16736                      getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
16737    }
16738
16739    SmallVector<int, 4> Repeated256Mask;
16740    if (is256BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated256Mask))
16741      return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8i64, V1,
16742                         getV4X86ShuffleImm8ForMask(Repeated256Mask, DL, DAG));
16743  }
16744
16745  if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8i64, Mask, Zeroable, V1,
16746                                           V2, Subtarget, DAG))
16747    return Shuf128;
16748
16749  // Try to use shift instructions.
16750  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i64, V1, V2, Mask,
16751                                          Zeroable, Subtarget, DAG))
16752    return Shift;
16753
16754  // Try to use VALIGN.
16755  if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v8i64, V1, V2, Mask,
16756                                            Subtarget, DAG))
16757    return Rotate;
16758
16759  // Try to use PALIGNR.
16760  if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i64, V1, V2, Mask,
16761                                                Subtarget, DAG))
16762    return Rotate;
16763
16764  if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8i64, Mask, V1, V2, DAG))
16765    return Unpck;
16766  // If we have AVX512F support, we can use VEXPAND.
16767  if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i64, Zeroable, Mask, V1, V2,
16768                                       DAG, Subtarget))
16769    return V;
16770
16771  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i64, V1, V2, Mask,
16772                                          Zeroable, Subtarget, DAG))
16773    return Blend;
16774
16775  return lowerShuffleWithPERMV(DL, MVT::v8i64, Mask, V1, V2, DAG);
16776}
16777
16778/// Handle lowering of 16-lane 32-bit integer shuffles.
16779static SDValue lowerV16I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16780                                  const APInt &Zeroable, SDValue V1, SDValue V2,
16781                                  const X86Subtarget &Subtarget,
16782                                  SelectionDAG &DAG) {
16783  assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
16784  assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
16785  assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
16786
16787  // Whenever we can lower this as a zext, that instruction is strictly faster
16788  // than any alternative. It also allows us to fold memory operands into the
16789  // shuffle in many cases.
16790  if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16791          DL, MVT::v16i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
16792    return ZExt;
16793
16794  // If the shuffle mask is repeated in each 128-bit lane we can use more
16795  // efficient instructions that mirror the shuffles across the four 128-bit
16796  // lanes.
16797  SmallVector<int, 4> RepeatedMask;
16798  bool Is128BitLaneRepeatedShuffle =
16799      is128BitLaneRepeatedShuffleMask(MVT::v16i32, Mask, RepeatedMask);
16800  if (Is128BitLaneRepeatedShuffle) {
16801    assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
16802    if (V2.isUndef())
16803      return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32, V1,
16804                         getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16805
16806    // Use dedicated unpack instructions for masks that match their pattern.
16807    if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i32, Mask, V1, V2, DAG))
16808      return V;
16809  }
16810
16811  // Try to use shift instructions.
16812  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i32, V1, V2, Mask,
16813                                          Zeroable, Subtarget, DAG))
16814    return Shift;
16815
16816  // Try to use VALIGN.
16817  if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v16i32, V1, V2, Mask,
16818                                            Subtarget, DAG))
16819    return Rotate;
16820
16821  // Try to use byte rotation instructions.
16822  if (Subtarget.hasBWI())
16823    if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i32, V1, V2, Mask,
16824                                                  Subtarget, DAG))
16825      return Rotate;
16826
16827  // Assume that a single SHUFPS is faster than using a permv shuffle.
16828  // If some CPU is harmed by the domain switch, we can fix it in a later pass.
16829  if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
16830    SDValue CastV1 = DAG.getBitcast(MVT::v16f32, V1);
16831    SDValue CastV2 = DAG.getBitcast(MVT::v16f32, V2);
16832    SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask,
16833                                            CastV1, CastV2, DAG);
16834    return DAG.getBitcast(MVT::v16i32, ShufPS);
16835  }
16836  // If we have AVX512F support, we can use VEXPAND.
16837  if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16i32, Zeroable, Mask, V1, V2,
16838                                       DAG, Subtarget))
16839    return V;
16840
16841  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i32, V1, V2, Mask,
16842                                          Zeroable, Subtarget, DAG))
16843    return Blend;
16844  return lowerShuffleWithPERMV(DL, MVT::v16i32, Mask, V1, V2, DAG);
16845}
16846
16847/// Handle lowering of 32-lane 16-bit integer shuffles.
16848static SDValue lowerV32I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16849                                  const APInt &Zeroable, SDValue V1, SDValue V2,
16850                                  const X86Subtarget &Subtarget,
16851                                  SelectionDAG &DAG) {
16852  assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
16853  assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
16854  assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
16855  assert(Subtarget.hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
16856
16857  // Whenever we can lower this as a zext, that instruction is strictly faster
16858  // than any alternative. It also allows us to fold memory operands into the
16859  // shuffle in many cases.
16860  if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16861          DL, MVT::v32i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
16862    return ZExt;
16863
16864  // Use dedicated unpack instructions for masks that match their pattern.
16865  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i16, Mask, V1, V2, DAG))
16866    return V;
16867
16868  // Try to use shift instructions.
16869  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v32i16, V1, V2, Mask,
16870                                          Zeroable, Subtarget, DAG))
16871    return Shift;
16872
16873  // Try to use byte rotation instructions.
16874  if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i16, V1, V2, Mask,
16875                                                Subtarget, DAG))
16876    return Rotate;
16877
16878  if (V2.isUndef()) {
16879    SmallVector<int, 8> RepeatedMask;
16880    if (is128BitLaneRepeatedShuffleMask(MVT::v32i16, Mask, RepeatedMask)) {
16881      // As this is a single-input shuffle, the repeated mask should be
16882      // a strictly valid v8i16 mask that we can pass through to the v8i16
16883      // lowering to handle even the v32 case.
16884      return lowerV8I16GeneralSingleInputShuffle(
16885          DL, MVT::v32i16, V1, RepeatedMask, Subtarget, DAG);
16886    }
16887  }
16888
16889  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i16, V1, V2, Mask,
16890                                                Zeroable, Subtarget, DAG))
16891    return Blend;
16892
16893  if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i16, Mask, V1, V2,
16894                                              Zeroable, Subtarget, DAG))
16895    return PSHUFB;
16896
16897  return lowerShuffleWithPERMV(DL, MVT::v32i16, Mask, V1, V2, DAG);
16898}
16899
16900/// Handle lowering of 64-lane 8-bit integer shuffles.
16901static SDValue lowerV64I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16902                                 const APInt &Zeroable, SDValue V1, SDValue V2,
16903                                 const X86Subtarget &Subtarget,
16904                                 SelectionDAG &DAG) {
16905  assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
16906  assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
16907  assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
16908  assert(Subtarget.hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
16909
16910  // Whenever we can lower this as a zext, that instruction is strictly faster
16911  // than any alternative. It also allows us to fold memory operands into the
16912  // shuffle in many cases.
16913  if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16914          DL, MVT::v64i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
16915    return ZExt;
16916
16917  // Use dedicated unpack instructions for masks that match their pattern.
16918  if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v64i8, Mask, V1, V2, DAG))
16919    return V;
16920
16921  // Use dedicated pack instructions for masks that match their pattern.
16922  if (SDValue V = lowerShuffleWithPACK(DL, MVT::v64i8, Mask, V1, V2, DAG,
16923                                       Subtarget))
16924    return V;
16925
16926  // Try to use shift instructions.
16927  if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v64i8, V1, V2, Mask,
16928                                          Zeroable, Subtarget, DAG))
16929    return Shift;
16930
16931  // Try to use byte rotation instructions.
16932  if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v64i8, V1, V2, Mask,
16933                                                Subtarget, DAG))
16934    return Rotate;
16935
16936  if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v64i8, Mask, V1, V2,
16937                                              Zeroable, Subtarget, DAG))
16938    return PSHUFB;
16939
16940  // VBMI can use VPERMV/VPERMV3 byte shuffles.
16941  if (Subtarget.hasVBMI())
16942    return lowerShuffleWithPERMV(DL, MVT::v64i8, Mask, V1, V2, DAG);
16943
16944  // Try to create an in-lane repeating shuffle mask and then shuffle the
16945  // results into the target lanes.
16946  if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16947          DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
16948    return V;
16949
16950  if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v64i8, V1, V2, Mask,
16951                                          Zeroable, Subtarget, DAG))
16952    return Blend;
16953
16954  // Try to simplify this by merging 128-bit lanes to enable a lane-based
16955  // shuffle.
16956  if (!V2.isUndef())
16957    if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16958            DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
16959      return Result;
16960
16961  // FIXME: Implement direct support for this type!
16962  return splitAndLowerShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
16963}
16964
16965/// High-level routine to lower various 512-bit x86 vector shuffles.
16966///
16967/// This routine either breaks down the specific type of a 512-bit x86 vector
16968/// shuffle or splits it into two 256-bit shuffles and fuses the results back
16969/// together based on the available instructions.
16970static SDValue lower512BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
16971                                  MVT VT, SDValue V1, SDValue V2,
16972                                  const APInt &Zeroable,
16973                                  const X86Subtarget &Subtarget,
16974                                  SelectionDAG &DAG) {
16975  assert(Subtarget.hasAVX512() &&
16976         "Cannot lower 512-bit vectors w/ basic ISA!");
16977
16978  // If we have a single input to the zero element, insert that into V1 if we
16979  // can do so cheaply.
16980  int NumElts = Mask.size();
16981  int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
16982
16983  if (NumV2Elements == 1 && Mask[0] >= NumElts)
16984    if (SDValue Insertion = lowerShuffleAsElementInsertion(
16985            DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
16986      return Insertion;
16987
16988  // Handle special cases where the lower or upper half is UNDEF.
16989  if (SDValue V =
16990          lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
16991    return V;
16992
16993  // Check for being able to broadcast a single element.
16994  if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, Mask,
16995                                                  Subtarget, DAG))
16996    return Broadcast;
16997
16998  // Dispatch to each element type for lowering. If we don't have support for
16999  // specific element type shuffles at 512 bits, immediately split them and
17000  // lower them. Each lowering routine of a given type is allowed to assume that
17001  // the requisite ISA extensions for that element type are available.
17002  switch (VT.SimpleTy) {
17003  case MVT::v8f64:
17004    return lowerV8F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17005  case MVT::v16f32:
17006    return lowerV16F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17007  case MVT::v8i64:
17008    return lowerV8I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17009  case MVT::v16i32:
17010    return lowerV16I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17011  case MVT::v32i16:
17012    return lowerV32I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17013  case MVT::v64i8:
17014    return lowerV64I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17015
17016  default:
17017    llvm_unreachable("Not a valid 512-bit x86 vector type!");
17018  }
17019}
17020
17021static SDValue lower1BitShuffleAsKSHIFTR(const SDLoc &DL, ArrayRef<int> Mask,
17022                                         MVT VT, SDValue V1, SDValue V2,
17023                                         const X86Subtarget &Subtarget,
17024                                         SelectionDAG &DAG) {
17025  // Shuffle should be unary.
17026  if (!V2.isUndef())
17027    return SDValue();
17028
17029  int ShiftAmt = -1;
17030  int NumElts = Mask.size();
17031  for (int i = 0; i != NumElts; ++i) {
17032    int M = Mask[i];
17033    assert((M == SM_SentinelUndef || (0 <= M && M < NumElts)) &&
17034           "Unexpected mask index.");
17035    if (M < 0)
17036      continue;
17037
17038    // The first non-undef element determines our shift amount.
17039    if (ShiftAmt < 0) {
17040      ShiftAmt = M - i;
17041      // Need to be shifting right.
17042      if (ShiftAmt <= 0)
17043        return SDValue();
17044    }
17045    // All non-undef elements must shift by the same amount.
17046    if (ShiftAmt != M - i)
17047      return SDValue();
17048  }
17049  assert(ShiftAmt >= 0 && "All undef?");
17050
17051  // Great we found a shift right.
17052  MVT WideVT = VT;
17053  if ((!Subtarget.hasDQI() && NumElts == 8) || NumElts < 8)
17054    WideVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
17055  SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideVT,
17056                            DAG.getUNDEF(WideVT), V1,
17057                            DAG.getIntPtrConstant(0, DL));
17058  Res = DAG.getNode(X86ISD::KSHIFTR, DL, WideVT, Res,
17059                    DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
17060  return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
17061                     DAG.getIntPtrConstant(0, DL));
17062}
17063
17064// Determine if this shuffle can be implemented with a KSHIFT instruction.
17065// Returns the shift amount if possible or -1 if not. This is a simplified
17066// version of matchShuffleAsShift.
17067static int match1BitShuffleAsKSHIFT(unsigned &Opcode, ArrayRef<int> Mask,
17068                                    int MaskOffset, const APInt &Zeroable) {
17069  int Size = Mask.size();
17070
17071  auto CheckZeros = [&](int Shift, bool Left) {
17072    for (int j = 0; j < Shift; ++j)
17073      if (!Zeroable[j + (Left ? 0 : (Size - Shift))])
17074        return false;
17075
17076    return true;
17077  };
17078
17079  auto MatchShift = [&](int Shift, bool Left) {
17080    unsigned Pos = Left ? Shift : 0;
17081    unsigned Low = Left ? 0 : Shift;
17082    unsigned Len = Size - Shift;
17083    return isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset);
17084  };
17085
17086  for (int Shift = 1; Shift != Size; ++Shift)
17087    for (bool Left : {true, false})
17088      if (CheckZeros(Shift, Left) && MatchShift(Shift, Left)) {
17089        Opcode = Left ? X86ISD::KSHIFTL : X86ISD::KSHIFTR;
17090        return Shift;
17091      }
17092
17093  return -1;
17094}
17095
17096
17097// Lower vXi1 vector shuffles.
17098// There is no a dedicated instruction on AVX-512 that shuffles the masks.
17099// The only way to shuffle bits is to sign-extend the mask vector to SIMD
17100// vector, shuffle and then truncate it back.
17101static SDValue lower1BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
17102                                MVT VT, SDValue V1, SDValue V2,
17103                                const APInt &Zeroable,
17104                                const X86Subtarget &Subtarget,
17105                                SelectionDAG &DAG) {
17106  assert(Subtarget.hasAVX512() &&
17107         "Cannot lower 512-bit vectors w/o basic ISA!");
17108
17109  int NumElts = Mask.size();
17110
17111  // Try to recognize shuffles that are just padding a subvector with zeros.
17112  int SubvecElts = 0;
17113  int Src = -1;
17114  for (int i = 0; i != NumElts; ++i) {
17115    if (Mask[i] >= 0) {
17116      // Grab the source from the first valid mask. All subsequent elements need
17117      // to use this same source.
17118      if (Src < 0)
17119        Src = Mask[i] / NumElts;
17120      if (Src != (Mask[i] / NumElts) || (Mask[i] % NumElts) != i)
17121        break;
17122    }
17123
17124    ++SubvecElts;
17125  }
17126  assert(SubvecElts != NumElts && "Identity shuffle?");
17127
17128  // Clip to a power 2.
17129  SubvecElts = PowerOf2Floor(SubvecElts);
17130
17131  // Make sure the number of zeroable bits in the top at least covers the bits
17132  // not covered by the subvector.
17133  if ((int)Zeroable.countLeadingOnes() >= (NumElts - SubvecElts)) {
17134    assert(Src >= 0 && "Expected a source!");
17135    MVT ExtractVT = MVT::getVectorVT(MVT::i1, SubvecElts);
17136    SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtractVT,
17137                                  Src == 0 ? V1 : V2,
17138                                  DAG.getIntPtrConstant(0, DL));
17139    return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
17140                       DAG.getConstant(0, DL, VT),
17141                       Extract, DAG.getIntPtrConstant(0, DL));
17142  }
17143
17144  // Try a simple shift right with undef elements. Later we'll try with zeros.
17145  if (SDValue Shift = lower1BitShuffleAsKSHIFTR(DL, Mask, VT, V1, V2, Subtarget,
17146                                                DAG))
17147    return Shift;
17148
17149  // Try to match KSHIFTs.
17150  unsigned Offset = 0;
17151  for (SDValue V : { V1, V2 }) {
17152    unsigned Opcode;
17153    int ShiftAmt = match1BitShuffleAsKSHIFT(Opcode, Mask, Offset, Zeroable);
17154    if (ShiftAmt >= 0) {
17155      MVT WideVT = VT;
17156      if ((!Subtarget.hasDQI() && NumElts == 8) || NumElts < 8)
17157        WideVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
17158      SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideVT,
17159                                DAG.getUNDEF(WideVT), V,
17160                                DAG.getIntPtrConstant(0, DL));
17161      // Widened right shifts need two shifts to ensure we shift in zeroes.
17162      if (Opcode == X86ISD::KSHIFTR && WideVT != VT) {
17163        int WideElts = WideVT.getVectorNumElements();
17164        // Shift left to put the original vector in the MSBs of the new size.
17165        Res = DAG.getNode(X86ISD::KSHIFTL, DL, WideVT, Res,
17166                          DAG.getTargetConstant(WideElts - NumElts, DL, MVT::i8));
17167        // Increase the shift amount to account for the left shift.
17168        ShiftAmt += WideElts - NumElts;
17169      }
17170
17171      Res = DAG.getNode(Opcode, DL, WideVT, Res,
17172                        DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
17173      return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
17174                         DAG.getIntPtrConstant(0, DL));
17175    }
17176    Offset += NumElts; // Increment for next iteration.
17177  }
17178
17179
17180
17181  MVT ExtVT;
17182  switch (VT.SimpleTy) {
17183  default:
17184    llvm_unreachable("Expected a vector of i1 elements");
17185  case MVT::v2i1:
17186    ExtVT = MVT::v2i64;
17187    break;
17188  case MVT::v4i1:
17189    ExtVT = MVT::v4i32;
17190    break;
17191  case MVT::v8i1:
17192    // Take 512-bit type, more shuffles on KNL. If we have VLX use a 256-bit
17193    // shuffle.
17194    ExtVT = Subtarget.hasVLX() ? MVT::v8i32 : MVT::v8i64;
17195    break;
17196  case MVT::v16i1:
17197    // Take 512-bit type, unless we are avoiding 512-bit types and have the
17198    // 256-bit operation available.
17199    ExtVT = Subtarget.canExtendTo512DQ() ? MVT::v16i32 : MVT::v16i16;
17200    break;
17201  case MVT::v32i1:
17202    // Take 512-bit type, unless we are avoiding 512-bit types and have the
17203    // 256-bit operation available.
17204    assert(Subtarget.hasBWI() && "Expected AVX512BW support");
17205    ExtVT = Subtarget.canExtendTo512BW() ? MVT::v32i16 : MVT::v32i8;
17206    break;
17207  case MVT::v64i1:
17208    // Fall back to scalarization. FIXME: We can do better if the shuffle
17209    // can be partitioned cleanly.
17210    if (!Subtarget.useBWIRegs())
17211      return SDValue();
17212    ExtVT = MVT::v64i8;
17213    break;
17214  }
17215
17216  V1 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V1);
17217  V2 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V2);
17218
17219  SDValue Shuffle = DAG.getVectorShuffle(ExtVT, DL, V1, V2, Mask);
17220  // i1 was sign extended we can use X86ISD::CVT2MASK.
17221  int NumElems = VT.getVectorNumElements();
17222  if ((Subtarget.hasBWI() && (NumElems >= 32)) ||
17223      (Subtarget.hasDQI() && (NumElems < 32)))
17224    return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, ExtVT),
17225                       Shuffle, ISD::SETGT);
17226
17227  return DAG.getNode(ISD::TRUNCATE, DL, VT, Shuffle);
17228}
17229
17230/// Helper function that returns true if the shuffle mask should be
17231/// commuted to improve canonicalization.
17232static bool canonicalizeShuffleMaskWithCommute(ArrayRef<int> Mask) {
17233  int NumElements = Mask.size();
17234
17235  int NumV1Elements = 0, NumV2Elements = 0;
17236  for (int M : Mask)
17237    if (M < 0)
17238      continue;
17239    else if (M < NumElements)
17240      ++NumV1Elements;
17241    else
17242      ++NumV2Elements;
17243
17244  // Commute the shuffle as needed such that more elements come from V1 than
17245  // V2. This allows us to match the shuffle pattern strictly on how many
17246  // elements come from V1 without handling the symmetric cases.
17247  if (NumV2Elements > NumV1Elements)
17248    return true;
17249
17250  assert(NumV1Elements > 0 && "No V1 indices");
17251
17252  if (NumV2Elements == 0)
17253    return false;
17254
17255  // When the number of V1 and V2 elements are the same, try to minimize the
17256  // number of uses of V2 in the low half of the vector. When that is tied,
17257  // ensure that the sum of indices for V1 is equal to or lower than the sum
17258  // indices for V2. When those are equal, try to ensure that the number of odd
17259  // indices for V1 is lower than the number of odd indices for V2.
17260  if (NumV1Elements == NumV2Elements) {
17261    int LowV1Elements = 0, LowV2Elements = 0;
17262    for (int M : Mask.slice(0, NumElements / 2))
17263      if (M >= NumElements)
17264        ++LowV2Elements;
17265      else if (M >= 0)
17266        ++LowV1Elements;
17267    if (LowV2Elements > LowV1Elements)
17268      return true;
17269    if (LowV2Elements == LowV1Elements) {
17270      int SumV1Indices = 0, SumV2Indices = 0;
17271      for (int i = 0, Size = Mask.size(); i < Size; ++i)
17272        if (Mask[i] >= NumElements)
17273          SumV2Indices += i;
17274        else if (Mask[i] >= 0)
17275          SumV1Indices += i;
17276      if (SumV2Indices < SumV1Indices)
17277        return true;
17278      if (SumV2Indices == SumV1Indices) {
17279        int NumV1OddIndices = 0, NumV2OddIndices = 0;
17280        for (int i = 0, Size = Mask.size(); i < Size; ++i)
17281          if (Mask[i] >= NumElements)
17282            NumV2OddIndices += i % 2;
17283          else if (Mask[i] >= 0)
17284            NumV1OddIndices += i % 2;
17285        if (NumV2OddIndices < NumV1OddIndices)
17286          return true;
17287      }
17288    }
17289  }
17290
17291  return false;
17292}
17293
17294/// Top-level lowering for x86 vector shuffles.
17295///
17296/// This handles decomposition, canonicalization, and lowering of all x86
17297/// vector shuffles. Most of the specific lowering strategies are encapsulated
17298/// above in helper routines. The canonicalization attempts to widen shuffles
17299/// to involve fewer lanes of wider elements, consolidate symmetric patterns
17300/// s.t. only one of the two inputs needs to be tested, etc.
17301static SDValue lowerVECTOR_SHUFFLE(SDValue Op, const X86Subtarget &Subtarget,
17302                                   SelectionDAG &DAG) {
17303  ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
17304  ArrayRef<int> OrigMask = SVOp->getMask();
17305  SDValue V1 = Op.getOperand(0);
17306  SDValue V2 = Op.getOperand(1);
17307  MVT VT = Op.getSimpleValueType();
17308  int NumElements = VT.getVectorNumElements();
17309  SDLoc DL(Op);
17310  bool Is1BitVector = (VT.getVectorElementType() == MVT::i1);
17311
17312  assert((VT.getSizeInBits() != 64 || Is1BitVector) &&
17313         "Can't lower MMX shuffles");
17314
17315  bool V1IsUndef = V1.isUndef();
17316  bool V2IsUndef = V2.isUndef();
17317  if (V1IsUndef && V2IsUndef)
17318    return DAG.getUNDEF(VT);
17319
17320  // When we create a shuffle node we put the UNDEF node to second operand,
17321  // but in some cases the first operand may be transformed to UNDEF.
17322  // In this case we should just commute the node.
17323  if (V1IsUndef)
17324    return DAG.getCommutedVectorShuffle(*SVOp);
17325
17326  // Check for non-undef masks pointing at an undef vector and make the masks
17327  // undef as well. This makes it easier to match the shuffle based solely on
17328  // the mask.
17329  if (V2IsUndef &&
17330      any_of(OrigMask, [NumElements](int M) { return M >= NumElements; })) {
17331    SmallVector<int, 8> NewMask(OrigMask.begin(), OrigMask.end());
17332    for (int &M : NewMask)
17333      if (M >= NumElements)
17334        M = -1;
17335    return DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
17336  }
17337
17338  // Check for illegal shuffle mask element index values.
17339  int MaskUpperLimit = OrigMask.size() * (V2IsUndef ? 1 : 2);
17340  (void)MaskUpperLimit;
17341  assert(llvm::all_of(OrigMask,
17342                      [&](int M) { return -1 <= M && M < MaskUpperLimit; }) &&
17343         "Out of bounds shuffle index");
17344
17345  // We actually see shuffles that are entirely re-arrangements of a set of
17346  // zero inputs. This mostly happens while decomposing complex shuffles into
17347  // simple ones. Directly lower these as a buildvector of zeros.
17348  APInt KnownUndef, KnownZero;
17349  computeZeroableShuffleElements(OrigMask, V1, V2, KnownUndef, KnownZero);
17350
17351  APInt Zeroable = KnownUndef | KnownZero;
17352  if (Zeroable.isAllOnesValue())
17353    return getZeroVector(VT, Subtarget, DAG, DL);
17354
17355  bool V2IsZero = !V2IsUndef && ISD::isBuildVectorAllZeros(V2.getNode());
17356
17357  // Try to collapse shuffles into using a vector type with fewer elements but
17358  // wider element types. We cap this to not form integers or floating point
17359  // elements wider than 64 bits, but it might be interesting to form i128
17360  // integers to handle flipping the low and high halves of AVX 256-bit vectors.
17361  SmallVector<int, 16> WidenedMask;
17362  if (VT.getScalarSizeInBits() < 64 && !Is1BitVector &&
17363      canWidenShuffleElements(OrigMask, Zeroable, V2IsZero, WidenedMask)) {
17364    // Shuffle mask widening should not interfere with a broadcast opportunity
17365    // by obfuscating the operands with bitcasts.
17366    // TODO: Avoid lowering directly from this top-level function: make this
17367    // a query (canLowerAsBroadcast) and defer lowering to the type-based calls.
17368    if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, OrigMask,
17369                                                    Subtarget, DAG))
17370      return Broadcast;
17371
17372    MVT NewEltVT = VT.isFloatingPoint()
17373                       ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
17374                       : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
17375    int NewNumElts = NumElements / 2;
17376    MVT NewVT = MVT::getVectorVT(NewEltVT, NewNumElts);
17377    // Make sure that the new vector type is legal. For example, v2f64 isn't
17378    // legal on SSE1.
17379    if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
17380      if (V2IsZero) {
17381        // Modify the new Mask to take all zeros from the all-zero vector.
17382        // Choose indices that are blend-friendly.
17383        bool UsedZeroVector = false;
17384        assert(find(WidenedMask, SM_SentinelZero) != WidenedMask.end() &&
17385               "V2's non-undef elements are used?!");
17386        for (int i = 0; i != NewNumElts; ++i)
17387          if (WidenedMask[i] == SM_SentinelZero) {
17388            WidenedMask[i] = i + NewNumElts;
17389            UsedZeroVector = true;
17390          }
17391        // Ensure all elements of V2 are zero - isBuildVectorAllZeros permits
17392        // some elements to be undef.
17393        if (UsedZeroVector)
17394          V2 = getZeroVector(NewVT, Subtarget, DAG, DL);
17395      }
17396      V1 = DAG.getBitcast(NewVT, V1);
17397      V2 = DAG.getBitcast(NewVT, V2);
17398      return DAG.getBitcast(
17399          VT, DAG.getVectorShuffle(NewVT, DL, V1, V2, WidenedMask));
17400    }
17401  }
17402
17403  // Commute the shuffle if it will improve canonicalization.
17404  SmallVector<int, 64> Mask(OrigMask.begin(), OrigMask.end());
17405  if (canonicalizeShuffleMaskWithCommute(Mask)) {
17406    ShuffleVectorSDNode::commuteMask(Mask);
17407    std::swap(V1, V2);
17408  }
17409
17410  if (SDValue V = lowerShuffleWithVPMOV(DL, Mask, VT, V1, V2, DAG, Subtarget))
17411    return V;
17412
17413  // For each vector width, delegate to a specialized lowering routine.
17414  if (VT.is128BitVector())
17415    return lower128BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17416
17417  if (VT.is256BitVector())
17418    return lower256BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17419
17420  if (VT.is512BitVector())
17421    return lower512BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17422
17423  if (Is1BitVector)
17424    return lower1BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17425
17426  llvm_unreachable("Unimplemented!");
17427}
17428
17429/// Try to lower a VSELECT instruction to a vector shuffle.
17430static SDValue lowerVSELECTtoVectorShuffle(SDValue Op,
17431                                           const X86Subtarget &Subtarget,
17432                                           SelectionDAG &DAG) {
17433  SDValue Cond = Op.getOperand(0);
17434  SDValue LHS = Op.getOperand(1);
17435  SDValue RHS = Op.getOperand(2);
17436  MVT VT = Op.getSimpleValueType();
17437
17438  // Only non-legal VSELECTs reach this lowering, convert those into generic
17439  // shuffles and re-use the shuffle lowering path for blends.
17440  SmallVector<int, 32> Mask;
17441  if (createShuffleMaskFromVSELECT(Mask, Cond))
17442    return DAG.getVectorShuffle(VT, SDLoc(Op), LHS, RHS, Mask);
17443
17444  return SDValue();
17445}
17446
17447SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
17448  SDValue Cond = Op.getOperand(0);
17449  SDValue LHS = Op.getOperand(1);
17450  SDValue RHS = Op.getOperand(2);
17451
17452  // A vselect where all conditions and data are constants can be optimized into
17453  // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
17454  if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()) &&
17455      ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
17456      ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
17457    return SDValue();
17458
17459  // Try to lower this to a blend-style vector shuffle. This can handle all
17460  // constant condition cases.
17461  if (SDValue BlendOp = lowerVSELECTtoVectorShuffle(Op, Subtarget, DAG))
17462    return BlendOp;
17463
17464  // If this VSELECT has a vector if i1 as a mask, it will be directly matched
17465  // with patterns on the mask registers on AVX-512.
17466  MVT CondVT = Cond.getSimpleValueType();
17467  unsigned CondEltSize = Cond.getScalarValueSizeInBits();
17468  if (CondEltSize == 1)
17469    return Op;
17470
17471  // Variable blends are only legal from SSE4.1 onward.
17472  if (!Subtarget.hasSSE41())
17473    return SDValue();
17474
17475  SDLoc dl(Op);
17476  MVT VT = Op.getSimpleValueType();
17477  unsigned EltSize = VT.getScalarSizeInBits();
17478  unsigned NumElts = VT.getVectorNumElements();
17479
17480  // If the VSELECT is on a 512-bit type, we have to convert a non-i1 condition
17481  // into an i1 condition so that we can use the mask-based 512-bit blend
17482  // instructions.
17483  if (VT.getSizeInBits() == 512) {
17484    // Build a mask by testing the condition against zero.
17485    MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
17486    SDValue Mask = DAG.getSetCC(dl, MaskVT, Cond,
17487                                DAG.getConstant(0, dl, CondVT),
17488                                ISD::SETNE);
17489    // Now return a new VSELECT using the mask.
17490    return DAG.getSelect(dl, VT, Mask, LHS, RHS);
17491  }
17492
17493  // SEXT/TRUNC cases where the mask doesn't match the destination size.
17494  if (CondEltSize != EltSize) {
17495    // If we don't have a sign splat, rely on the expansion.
17496    if (CondEltSize != DAG.ComputeNumSignBits(Cond))
17497      return SDValue();
17498
17499    MVT NewCondSVT = MVT::getIntegerVT(EltSize);
17500    MVT NewCondVT = MVT::getVectorVT(NewCondSVT, NumElts);
17501    Cond = DAG.getSExtOrTrunc(Cond, dl, NewCondVT);
17502    return DAG.getNode(ISD::VSELECT, dl, VT, Cond, LHS, RHS);
17503  }
17504
17505  // Only some types will be legal on some subtargets. If we can emit a legal
17506  // VSELECT-matching blend, return Op, and but if we need to expand, return
17507  // a null value.
17508  switch (VT.SimpleTy) {
17509  default:
17510    // Most of the vector types have blends past SSE4.1.
17511    return Op;
17512
17513  case MVT::v32i8:
17514    // The byte blends for AVX vectors were introduced only in AVX2.
17515    if (Subtarget.hasAVX2())
17516      return Op;
17517
17518    return SDValue();
17519
17520  case MVT::v8i16:
17521  case MVT::v16i16: {
17522    // Bitcast everything to the vXi8 type and use a vXi8 vselect.
17523    MVT CastVT = MVT::getVectorVT(MVT::i8, NumElts * 2);
17524    Cond = DAG.getBitcast(CastVT, Cond);
17525    LHS = DAG.getBitcast(CastVT, LHS);
17526    RHS = DAG.getBitcast(CastVT, RHS);
17527    SDValue Select = DAG.getNode(ISD::VSELECT, dl, CastVT, Cond, LHS, RHS);
17528    return DAG.getBitcast(VT, Select);
17529  }
17530  }
17531}
17532
17533static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
17534  MVT VT = Op.getSimpleValueType();
17535  SDLoc dl(Op);
17536
17537  if (!Op.getOperand(0).getSimpleValueType().is128BitVector())
17538    return SDValue();
17539
17540  if (VT.getSizeInBits() == 8) {
17541    SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
17542                                  Op.getOperand(0), Op.getOperand(1));
17543    return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
17544  }
17545
17546  if (VT == MVT::f32) {
17547    // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
17548    // the result back to FR32 register. It's only worth matching if the
17549    // result has a single use which is a store or a bitcast to i32.  And in
17550    // the case of a store, it's not worth it if the index is a constant 0,
17551    // because a MOVSSmr can be used instead, which is smaller and faster.
17552    if (!Op.hasOneUse())
17553      return SDValue();
17554    SDNode *User = *Op.getNode()->use_begin();
17555    if ((User->getOpcode() != ISD::STORE ||
17556         isNullConstant(Op.getOperand(1))) &&
17557        (User->getOpcode() != ISD::BITCAST ||
17558         User->getValueType(0) != MVT::i32))
17559      return SDValue();
17560    SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
17561                                  DAG.getBitcast(MVT::v4i32, Op.getOperand(0)),
17562                                  Op.getOperand(1));
17563    return DAG.getBitcast(MVT::f32, Extract);
17564  }
17565
17566  if (VT == MVT::i32 || VT == MVT::i64) {
17567    // ExtractPS/pextrq works with constant index.
17568    if (isa<ConstantSDNode>(Op.getOperand(1)))
17569      return Op;
17570  }
17571
17572  return SDValue();
17573}
17574
17575/// Extract one bit from mask vector, like v16i1 or v8i1.
17576/// AVX-512 feature.
17577static SDValue ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG,
17578                                        const X86Subtarget &Subtarget) {
17579  SDValue Vec = Op.getOperand(0);
17580  SDLoc dl(Vec);
17581  MVT VecVT = Vec.getSimpleValueType();
17582  SDValue Idx = Op.getOperand(1);
17583  MVT EltVT = Op.getSimpleValueType();
17584
17585  assert((VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI()) &&
17586         "Unexpected vector type in ExtractBitFromMaskVector");
17587
17588  // variable index can't be handled in mask registers,
17589  // extend vector to VR512/128
17590  if (!isa<ConstantSDNode>(Idx)) {
17591    unsigned NumElts = VecVT.getVectorNumElements();
17592    // Extending v8i1/v16i1 to 512-bit get better performance on KNL
17593    // than extending to 128/256bit.
17594    MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
17595    MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
17596    SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec);
17597    SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ExtEltVT, Ext, Idx);
17598    return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
17599  }
17600
17601  unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
17602  if (IdxVal == 0) // the operation is legal
17603    return Op;
17604
17605  // Extend to natively supported kshift.
17606  unsigned NumElems = VecVT.getVectorNumElements();
17607  MVT WideVecVT = VecVT;
17608  if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) {
17609    WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
17610    Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT,
17611                      DAG.getUNDEF(WideVecVT), Vec,
17612                      DAG.getIntPtrConstant(0, dl));
17613  }
17614
17615  // Use kshiftr instruction to move to the lower element.
17616  Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec,
17617                    DAG.getTargetConstant(IdxVal, dl, MVT::i8));
17618
17619  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
17620                     DAG.getIntPtrConstant(0, dl));
17621}
17622
17623SDValue
17624X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
17625                                           SelectionDAG &DAG) const {
17626  SDLoc dl(Op);
17627  SDValue Vec = Op.getOperand(0);
17628  MVT VecVT = Vec.getSimpleValueType();
17629  SDValue Idx = Op.getOperand(1);
17630
17631  if (VecVT.getVectorElementType() == MVT::i1)
17632    return ExtractBitFromMaskVector(Op, DAG, Subtarget);
17633
17634  if (!isa<ConstantSDNode>(Idx)) {
17635    // Its more profitable to go through memory (1 cycles throughput)
17636    // than using VMOVD + VPERMV/PSHUFB sequence ( 2/3 cycles throughput)
17637    // IACA tool was used to get performance estimation
17638    // (https://software.intel.com/en-us/articles/intel-architecture-code-analyzer)
17639    //
17640    // example : extractelement <16 x i8> %a, i32 %i
17641    //
17642    // Block Throughput: 3.00 Cycles
17643    // Throughput Bottleneck: Port5
17644    //
17645    // | Num Of |   Ports pressure in cycles  |    |
17646    // |  Uops  |  0  - DV  |  5  |  6  |  7  |    |
17647    // ---------------------------------------------
17648    // |   1    |           | 1.0 |     |     | CP | vmovd xmm1, edi
17649    // |   1    |           | 1.0 |     |     | CP | vpshufb xmm0, xmm0, xmm1
17650    // |   2    | 1.0       | 1.0 |     |     | CP | vpextrb eax, xmm0, 0x0
17651    // Total Num Of Uops: 4
17652    //
17653    //
17654    // Block Throughput: 1.00 Cycles
17655    // Throughput Bottleneck: PORT2_AGU, PORT3_AGU, Port4
17656    //
17657    // |    |  Ports pressure in cycles   |  |
17658    // |Uops| 1 | 2 - D  |3 -  D  | 4 | 5 |  |
17659    // ---------------------------------------------------------
17660    // |2^  |   | 0.5    | 0.5    |1.0|   |CP| vmovaps xmmword ptr [rsp-0x18], xmm0
17661    // |1   |0.5|        |        |   |0.5|  | lea rax, ptr [rsp-0x18]
17662    // |1   |   |0.5, 0.5|0.5, 0.5|   |   |CP| mov al, byte ptr [rdi+rax*1]
17663    // Total Num Of Uops: 4
17664
17665    return SDValue();
17666  }
17667
17668  unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
17669
17670  // If this is a 256-bit vector result, first extract the 128-bit vector and
17671  // then extract the element from the 128-bit vector.
17672  if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
17673    // Get the 128-bit vector.
17674    Vec = extract128BitVector(Vec, IdxVal, DAG, dl);
17675    MVT EltVT = VecVT.getVectorElementType();
17676
17677    unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
17678    assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
17679
17680    // Find IdxVal modulo ElemsPerChunk. Since ElemsPerChunk is a power of 2
17681    // this can be done with a mask.
17682    IdxVal &= ElemsPerChunk - 1;
17683    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
17684                       DAG.getIntPtrConstant(IdxVal, dl));
17685  }
17686
17687  assert(VecVT.is128BitVector() && "Unexpected vector length");
17688
17689  MVT VT = Op.getSimpleValueType();
17690
17691  if (VT.getSizeInBits() == 16) {
17692    // If IdxVal is 0, it's cheaper to do a move instead of a pextrw, unless
17693    // we're going to zero extend the register or fold the store (SSE41 only).
17694    if (IdxVal == 0 && !MayFoldIntoZeroExtend(Op) &&
17695        !(Subtarget.hasSSE41() && MayFoldIntoStore(Op)))
17696      return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
17697                         DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
17698                                     DAG.getBitcast(MVT::v4i32, Vec), Idx));
17699
17700    // Transform it so it match pextrw which produces a 32-bit result.
17701    SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
17702                                  Op.getOperand(0), Op.getOperand(1));
17703    return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
17704  }
17705
17706  if (Subtarget.hasSSE41())
17707    if (SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG))
17708      return Res;
17709
17710  // TODO: We only extract a single element from v16i8, we can probably afford
17711  // to be more aggressive here before using the default approach of spilling to
17712  // stack.
17713  if (VT.getSizeInBits() == 8 && Op->isOnlyUserOf(Vec.getNode())) {
17714    // Extract either the lowest i32 or any i16, and extract the sub-byte.
17715    int DWordIdx = IdxVal / 4;
17716    if (DWordIdx == 0) {
17717      SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
17718                                DAG.getBitcast(MVT::v4i32, Vec),
17719                                DAG.getIntPtrConstant(DWordIdx, dl));
17720      int ShiftVal = (IdxVal % 4) * 8;
17721      if (ShiftVal != 0)
17722        Res = DAG.getNode(ISD::SRL, dl, MVT::i32, Res,
17723                          DAG.getConstant(ShiftVal, dl, MVT::i8));
17724      return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
17725    }
17726
17727    int WordIdx = IdxVal / 2;
17728    SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
17729                              DAG.getBitcast(MVT::v8i16, Vec),
17730                              DAG.getIntPtrConstant(WordIdx, dl));
17731    int ShiftVal = (IdxVal % 2) * 8;
17732    if (ShiftVal != 0)
17733      Res = DAG.getNode(ISD::SRL, dl, MVT::i16, Res,
17734                        DAG.getConstant(ShiftVal, dl, MVT::i8));
17735    return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
17736  }
17737
17738  if (VT.getSizeInBits() == 32) {
17739    if (IdxVal == 0)
17740      return Op;
17741
17742    // SHUFPS the element to the lowest double word, then movss.
17743    int Mask[4] = { static_cast<int>(IdxVal), -1, -1, -1 };
17744    Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
17745    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
17746                       DAG.getIntPtrConstant(0, dl));
17747  }
17748
17749  if (VT.getSizeInBits() == 64) {
17750    // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
17751    // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
17752    //        to match extract_elt for f64.
17753    if (IdxVal == 0)
17754      return Op;
17755
17756    // UNPCKHPD the element to the lowest double word, then movsd.
17757    // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
17758    // to a f64mem, the whole operation is folded into a single MOVHPDmr.
17759    int Mask[2] = { 1, -1 };
17760    Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
17761    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
17762                       DAG.getIntPtrConstant(0, dl));
17763  }
17764
17765  return SDValue();
17766}
17767
17768/// Insert one bit to mask vector, like v16i1 or v8i1.
17769/// AVX-512 feature.
17770static SDValue InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG,
17771                                     const X86Subtarget &Subtarget) {
17772  SDLoc dl(Op);
17773  SDValue Vec = Op.getOperand(0);
17774  SDValue Elt = Op.getOperand(1);
17775  SDValue Idx = Op.getOperand(2);
17776  MVT VecVT = Vec.getSimpleValueType();
17777
17778  if (!isa<ConstantSDNode>(Idx)) {
17779    // Non constant index. Extend source and destination,
17780    // insert element and then truncate the result.
17781    unsigned NumElts = VecVT.getVectorNumElements();
17782    MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
17783    MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
17784    SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
17785      DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec),
17786      DAG.getNode(ISD::SIGN_EXTEND, dl, ExtEltVT, Elt), Idx);
17787    return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
17788  }
17789
17790  // Copy into a k-register, extract to v1i1 and insert_subvector.
17791  SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i1, Elt);
17792
17793  return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VecVT, Vec, EltInVec,
17794                     Op.getOperand(2));
17795}
17796
17797SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
17798                                                  SelectionDAG &DAG) const {
17799  MVT VT = Op.getSimpleValueType();
17800  MVT EltVT = VT.getVectorElementType();
17801  unsigned NumElts = VT.getVectorNumElements();
17802
17803  if (EltVT == MVT::i1)
17804    return InsertBitToMaskVector(Op, DAG, Subtarget);
17805
17806  SDLoc dl(Op);
17807  SDValue N0 = Op.getOperand(0);
17808  SDValue N1 = Op.getOperand(1);
17809  SDValue N2 = Op.getOperand(2);
17810
17811  auto *N2C = dyn_cast<ConstantSDNode>(N2);
17812  if (!N2C || N2C->getAPIntValue().uge(NumElts))
17813    return SDValue();
17814  uint64_t IdxVal = N2C->getZExtValue();
17815
17816  bool IsZeroElt = X86::isZeroNode(N1);
17817  bool IsAllOnesElt = VT.isInteger() && llvm::isAllOnesConstant(N1);
17818
17819  // If we are inserting a element, see if we can do this more efficiently with
17820  // a blend shuffle with a rematerializable vector than a costly integer
17821  // insertion.
17822  if ((IsZeroElt || IsAllOnesElt) && Subtarget.hasSSE41() &&
17823      16 <= EltVT.getSizeInBits()) {
17824    SmallVector<int, 8> BlendMask;
17825    for (unsigned i = 0; i != NumElts; ++i)
17826      BlendMask.push_back(i == IdxVal ? i + NumElts : i);
17827    SDValue CstVector = IsZeroElt ? getZeroVector(VT, Subtarget, DAG, dl)
17828                                  : getOnesVector(VT, DAG, dl);
17829    return DAG.getVectorShuffle(VT, dl, N0, CstVector, BlendMask);
17830  }
17831
17832  // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
17833  // into that, and then insert the subvector back into the result.
17834  if (VT.is256BitVector() || VT.is512BitVector()) {
17835    // With a 256-bit vector, we can insert into the zero element efficiently
17836    // using a blend if we have AVX or AVX2 and the right data type.
17837    if (VT.is256BitVector() && IdxVal == 0) {
17838      // TODO: It is worthwhile to cast integer to floating point and back
17839      // and incur a domain crossing penalty if that's what we'll end up
17840      // doing anyway after extracting to a 128-bit vector.
17841      if ((Subtarget.hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
17842          (Subtarget.hasAVX2() && EltVT == MVT::i32)) {
17843        SDValue N1Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
17844        return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec,
17845                           DAG.getTargetConstant(1, dl, MVT::i8));
17846      }
17847    }
17848
17849    // Get the desired 128-bit vector chunk.
17850    SDValue V = extract128BitVector(N0, IdxVal, DAG, dl);
17851
17852    // Insert the element into the desired chunk.
17853    unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
17854    assert(isPowerOf2_32(NumEltsIn128));
17855    // Since NumEltsIn128 is a power of 2 we can use mask instead of modulo.
17856    unsigned IdxIn128 = IdxVal & (NumEltsIn128 - 1);
17857
17858    V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
17859                    DAG.getIntPtrConstant(IdxIn128, dl));
17860
17861    // Insert the changed part back into the bigger vector
17862    return insert128BitVector(N0, V, IdxVal, DAG, dl);
17863  }
17864  assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
17865
17866  // This will be just movd/movq/movss/movsd.
17867  if (IdxVal == 0 && ISD::isBuildVectorAllZeros(N0.getNode()) &&
17868      (EltVT == MVT::i32 || EltVT == MVT::f32 || EltVT == MVT::f64 ||
17869       EltVT == MVT::i64)) {
17870    N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
17871    return getShuffleVectorZeroOrUndef(N1, 0, true, Subtarget, DAG);
17872  }
17873
17874  // Transform it so it match pinsr{b,w} which expects a GR32 as its second
17875  // argument. SSE41 required for pinsrb.
17876  if (VT == MVT::v8i16 || (VT == MVT::v16i8 && Subtarget.hasSSE41())) {
17877    unsigned Opc;
17878    if (VT == MVT::v8i16) {
17879      assert(Subtarget.hasSSE2() && "SSE2 required for PINSRW");
17880      Opc = X86ISD::PINSRW;
17881    } else {
17882      assert(VT == MVT::v16i8 && "PINSRB requires v16i8 vector");
17883      assert(Subtarget.hasSSE41() && "SSE41 required for PINSRB");
17884      Opc = X86ISD::PINSRB;
17885    }
17886
17887    if (N1.getValueType() != MVT::i32)
17888      N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
17889    if (N2.getValueType() != MVT::i32)
17890      N2 = DAG.getIntPtrConstant(IdxVal, dl);
17891    return DAG.getNode(Opc, dl, VT, N0, N1, N2);
17892  }
17893
17894  if (Subtarget.hasSSE41()) {
17895    if (EltVT == MVT::f32) {
17896      // Bits [7:6] of the constant are the source select. This will always be
17897      //   zero here. The DAG Combiner may combine an extract_elt index into
17898      //   these bits. For example (insert (extract, 3), 2) could be matched by
17899      //   putting the '3' into bits [7:6] of X86ISD::INSERTPS.
17900      // Bits [5:4] of the constant are the destination select. This is the
17901      //   value of the incoming immediate.
17902      // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
17903      //   combine either bitwise AND or insert of float 0.0 to set these bits.
17904
17905      bool MinSize = DAG.getMachineFunction().getFunction().hasMinSize();
17906      if (IdxVal == 0 && (!MinSize || !MayFoldLoad(N1))) {
17907        // If this is an insertion of 32-bits into the low 32-bits of
17908        // a vector, we prefer to generate a blend with immediate rather
17909        // than an insertps. Blends are simpler operations in hardware and so
17910        // will always have equal or better performance than insertps.
17911        // But if optimizing for size and there's a load folding opportunity,
17912        // generate insertps because blendps does not have a 32-bit memory
17913        // operand form.
17914        N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
17915        return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1,
17916                           DAG.getTargetConstant(1, dl, MVT::i8));
17917      }
17918      // Create this as a scalar to vector..
17919      N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
17920      return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1,
17921                         DAG.getTargetConstant(IdxVal << 4, dl, MVT::i8));
17922    }
17923
17924    // PINSR* works with constant index.
17925    if (EltVT == MVT::i32 || EltVT == MVT::i64)
17926      return Op;
17927  }
17928
17929  return SDValue();
17930}
17931
17932static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, const X86Subtarget &Subtarget,
17933                                     SelectionDAG &DAG) {
17934  SDLoc dl(Op);
17935  MVT OpVT = Op.getSimpleValueType();
17936
17937  // It's always cheaper to replace a xor+movd with xorps and simplifies further
17938  // combines.
17939  if (X86::isZeroNode(Op.getOperand(0)))
17940    return getZeroVector(OpVT, Subtarget, DAG, dl);
17941
17942  // If this is a 256-bit vector result, first insert into a 128-bit
17943  // vector and then insert into the 256-bit vector.
17944  if (!OpVT.is128BitVector()) {
17945    // Insert into a 128-bit vector.
17946    unsigned SizeFactor = OpVT.getSizeInBits() / 128;
17947    MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
17948                                 OpVT.getVectorNumElements() / SizeFactor);
17949
17950    Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
17951
17952    // Insert the 128-bit vector.
17953    return insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
17954  }
17955  assert(OpVT.is128BitVector() && OpVT.isInteger() && OpVT != MVT::v2i64 &&
17956         "Expected an SSE type!");
17957
17958  // Pass through a v4i32 SCALAR_TO_VECTOR as that's what we use in tblgen.
17959  if (OpVT == MVT::v4i32)
17960    return Op;
17961
17962  SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
17963  return DAG.getBitcast(
17964      OpVT, DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, AnyExt));
17965}
17966
17967// Lower a node with an INSERT_SUBVECTOR opcode.  This may result in a
17968// simple superregister reference or explicit instructions to insert
17969// the upper bits of a vector.
17970static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
17971                                     SelectionDAG &DAG) {
17972  assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1);
17973
17974  return insert1BitVector(Op, DAG, Subtarget);
17975}
17976
17977static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
17978                                      SelectionDAG &DAG) {
17979  assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1 &&
17980         "Only vXi1 extract_subvectors need custom lowering");
17981
17982  SDLoc dl(Op);
17983  SDValue Vec = Op.getOperand(0);
17984  SDValue Idx = Op.getOperand(1);
17985
17986  if (!isa<ConstantSDNode>(Idx))
17987    return SDValue();
17988
17989  unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
17990  if (IdxVal == 0) // the operation is legal
17991    return Op;
17992
17993  MVT VecVT = Vec.getSimpleValueType();
17994  unsigned NumElems = VecVT.getVectorNumElements();
17995
17996  // Extend to natively supported kshift.
17997  MVT WideVecVT = VecVT;
17998  if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) {
17999    WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
18000    Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT,
18001                      DAG.getUNDEF(WideVecVT), Vec,
18002                      DAG.getIntPtrConstant(0, dl));
18003  }
18004
18005  // Shift to the LSB.
18006  Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec,
18007                    DAG.getTargetConstant(IdxVal, dl, MVT::i8));
18008
18009  return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, Op.getValueType(), Vec,
18010                     DAG.getIntPtrConstant(0, dl));
18011}
18012
18013// Returns the appropriate wrapper opcode for a global reference.
18014unsigned X86TargetLowering::getGlobalWrapperKind(
18015    const GlobalValue *GV, const unsigned char OpFlags) const {
18016  // References to absolute symbols are never PC-relative.
18017  if (GV && GV->isAbsoluteSymbolRef())
18018    return X86ISD::Wrapper;
18019
18020  CodeModel::Model M = getTargetMachine().getCodeModel();
18021  if (Subtarget.isPICStyleRIPRel() &&
18022      (M == CodeModel::Small || M == CodeModel::Kernel))
18023    return X86ISD::WrapperRIP;
18024
18025  // GOTPCREL references must always use RIP.
18026  if (OpFlags == X86II::MO_GOTPCREL)
18027    return X86ISD::WrapperRIP;
18028
18029  return X86ISD::Wrapper;
18030}
18031
18032// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
18033// their target counterpart wrapped in the X86ISD::Wrapper node. Suppose N is
18034// one of the above mentioned nodes. It has to be wrapped because otherwise
18035// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
18036// be used to form addressing mode. These wrapped nodes will be selected
18037// into MOV32ri.
18038SDValue
18039X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
18040  ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
18041
18042  // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
18043  // global base reg.
18044  unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
18045
18046  auto PtrVT = getPointerTy(DAG.getDataLayout());
18047  SDValue Result = DAG.getTargetConstantPool(
18048      CP->getConstVal(), PtrVT, CP->getAlignment(), CP->getOffset(), OpFlag);
18049  SDLoc DL(CP);
18050  Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
18051  // With PIC, the address is actually $g + Offset.
18052  if (OpFlag) {
18053    Result =
18054        DAG.getNode(ISD::ADD, DL, PtrVT,
18055                    DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
18056  }
18057
18058  return Result;
18059}
18060
18061SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
18062  JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
18063
18064  // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
18065  // global base reg.
18066  unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
18067
18068  auto PtrVT = getPointerTy(DAG.getDataLayout());
18069  SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
18070  SDLoc DL(JT);
18071  Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
18072
18073  // With PIC, the address is actually $g + Offset.
18074  if (OpFlag)
18075    Result =
18076        DAG.getNode(ISD::ADD, DL, PtrVT,
18077                    DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
18078
18079  return Result;
18080}
18081
18082SDValue X86TargetLowering::LowerExternalSymbol(SDValue Op,
18083                                               SelectionDAG &DAG) const {
18084  return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
18085}
18086
18087SDValue
18088X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
18089  // Create the TargetBlockAddressAddress node.
18090  unsigned char OpFlags =
18091    Subtarget.classifyBlockAddressReference();
18092  const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
18093  int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
18094  SDLoc dl(Op);
18095  auto PtrVT = getPointerTy(DAG.getDataLayout());
18096  SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags);
18097  Result = DAG.getNode(getGlobalWrapperKind(), dl, PtrVT, Result);
18098
18099  // With PIC, the address is actually $g + Offset.
18100  if (isGlobalRelativeToPICBase(OpFlags)) {
18101    Result = DAG.getNode(ISD::ADD, dl, PtrVT,
18102                         DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
18103  }
18104
18105  return Result;
18106}
18107
18108/// Creates target global address or external symbol nodes for calls or
18109/// other uses.
18110SDValue X86TargetLowering::LowerGlobalOrExternal(SDValue Op, SelectionDAG &DAG,
18111                                                 bool ForCall) const {
18112  // Unpack the global address or external symbol.
18113  const SDLoc &dl = SDLoc(Op);
18114  const GlobalValue *GV = nullptr;
18115  int64_t Offset = 0;
18116  const char *ExternalSym = nullptr;
18117  if (const auto *G = dyn_cast<GlobalAddressSDNode>(Op)) {
18118    GV = G->getGlobal();
18119    Offset = G->getOffset();
18120  } else {
18121    const auto *ES = cast<ExternalSymbolSDNode>(Op);
18122    ExternalSym = ES->getSymbol();
18123  }
18124
18125  // Calculate some flags for address lowering.
18126  const Module &Mod = *DAG.getMachineFunction().getFunction().getParent();
18127  unsigned char OpFlags;
18128  if (ForCall)
18129    OpFlags = Subtarget.classifyGlobalFunctionReference(GV, Mod);
18130  else
18131    OpFlags = Subtarget.classifyGlobalReference(GV, Mod);
18132  bool HasPICReg = isGlobalRelativeToPICBase(OpFlags);
18133  bool NeedsLoad = isGlobalStubReference(OpFlags);
18134
18135  CodeModel::Model M = DAG.getTarget().getCodeModel();
18136  auto PtrVT = getPointerTy(DAG.getDataLayout());
18137  SDValue Result;
18138
18139  if (GV) {
18140    // Create a target global address if this is a global. If possible, fold the
18141    // offset into the global address reference. Otherwise, ADD it on later.
18142    int64_t GlobalOffset = 0;
18143    if (OpFlags == X86II::MO_NO_FLAG &&
18144        X86::isOffsetSuitableForCodeModel(Offset, M)) {
18145      std::swap(GlobalOffset, Offset);
18146    }
18147    Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, GlobalOffset, OpFlags);
18148  } else {
18149    // If this is not a global address, this must be an external symbol.
18150    Result = DAG.getTargetExternalSymbol(ExternalSym, PtrVT, OpFlags);
18151  }
18152
18153  // If this is a direct call, avoid the wrapper if we don't need to do any
18154  // loads or adds. This allows SDAG ISel to match direct calls.
18155  if (ForCall && !NeedsLoad && !HasPICReg && Offset == 0)
18156    return Result;
18157
18158  Result = DAG.getNode(getGlobalWrapperKind(GV, OpFlags), dl, PtrVT, Result);
18159
18160  // With PIC, the address is actually $g + Offset.
18161  if (HasPICReg) {
18162    Result = DAG.getNode(ISD::ADD, dl, PtrVT,
18163                         DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
18164  }
18165
18166  // For globals that require a load from a stub to get the address, emit the
18167  // load.
18168  if (NeedsLoad)
18169    Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
18170                         MachinePointerInfo::getGOT(DAG.getMachineFunction()));
18171
18172  // If there was a non-zero offset that we didn't fold, create an explicit
18173  // addition for it.
18174  if (Offset != 0)
18175    Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result,
18176                         DAG.getConstant(Offset, dl, PtrVT));
18177
18178  return Result;
18179}
18180
18181SDValue
18182X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
18183  return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
18184}
18185
18186static SDValue
18187GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
18188           SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
18189           unsigned char OperandFlags, bool LocalDynamic = false) {
18190  MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
18191  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
18192  SDLoc dl(GA);
18193  SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
18194                                           GA->getValueType(0),
18195                                           GA->getOffset(),
18196                                           OperandFlags);
18197
18198  X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
18199                                           : X86ISD::TLSADDR;
18200
18201  if (InFlag) {
18202    SDValue Ops[] = { Chain,  TGA, *InFlag };
18203    Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
18204  } else {
18205    SDValue Ops[]  = { Chain, TGA };
18206    Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
18207  }
18208
18209  // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
18210  MFI.setAdjustsStack(true);
18211  MFI.setHasCalls(true);
18212
18213  SDValue Flag = Chain.getValue(1);
18214  return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
18215}
18216
18217// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
18218static SDValue
18219LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
18220                                const EVT PtrVT) {
18221  SDValue InFlag;
18222  SDLoc dl(GA);  // ? function entry point might be better
18223  SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
18224                                   DAG.getNode(X86ISD::GlobalBaseReg,
18225                                               SDLoc(), PtrVT), InFlag);
18226  InFlag = Chain.getValue(1);
18227
18228  return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
18229}
18230
18231// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
18232static SDValue
18233LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
18234                                const EVT PtrVT) {
18235  return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
18236                    X86::RAX, X86II::MO_TLSGD);
18237}
18238
18239static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
18240                                           SelectionDAG &DAG,
18241                                           const EVT PtrVT,
18242                                           bool is64Bit) {
18243  SDLoc dl(GA);
18244
18245  // Get the start address of the TLS block for this module.
18246  X86MachineFunctionInfo *MFI = DAG.getMachineFunction()
18247      .getInfo<X86MachineFunctionInfo>();
18248  MFI->incNumLocalDynamicTLSAccesses();
18249
18250  SDValue Base;
18251  if (is64Bit) {
18252    Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
18253                      X86II::MO_TLSLD, /*LocalDynamic=*/true);
18254  } else {
18255    SDValue InFlag;
18256    SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
18257        DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
18258    InFlag = Chain.getValue(1);
18259    Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
18260                      X86II::MO_TLSLDM, /*LocalDynamic=*/true);
18261  }
18262
18263  // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
18264  // of Base.
18265
18266  // Build x@dtpoff.
18267  unsigned char OperandFlags = X86II::MO_DTPOFF;
18268  unsigned WrapperKind = X86ISD::Wrapper;
18269  SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
18270                                           GA->getValueType(0),
18271                                           GA->getOffset(), OperandFlags);
18272  SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
18273
18274  // Add x@dtpoff with the base.
18275  return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
18276}
18277
18278// Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
18279static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
18280                                   const EVT PtrVT, TLSModel::Model model,
18281                                   bool is64Bit, bool isPIC) {
18282  SDLoc dl(GA);
18283
18284  // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
18285  Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
18286                                                         is64Bit ? 257 : 256));
18287
18288  SDValue ThreadPointer =
18289      DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0, dl),
18290                  MachinePointerInfo(Ptr));
18291
18292  unsigned char OperandFlags = 0;
18293  // Most TLS accesses are not RIP relative, even on x86-64.  One exception is
18294  // initialexec.
18295  unsigned WrapperKind = X86ISD::Wrapper;
18296  if (model == TLSModel::LocalExec) {
18297    OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
18298  } else if (model == TLSModel::InitialExec) {
18299    if (is64Bit) {
18300      OperandFlags = X86II::MO_GOTTPOFF;
18301      WrapperKind = X86ISD::WrapperRIP;
18302    } else {
18303      OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
18304    }
18305  } else {
18306    llvm_unreachable("Unexpected model");
18307  }
18308
18309  // emit "addl x@ntpoff,%eax" (local exec)
18310  // or "addl x@indntpoff,%eax" (initial exec)
18311  // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
18312  SDValue TGA =
18313      DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
18314                                 GA->getOffset(), OperandFlags);
18315  SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
18316
18317  if (model == TLSModel::InitialExec) {
18318    if (isPIC && !is64Bit) {
18319      Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
18320                           DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
18321                           Offset);
18322    }
18323
18324    Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
18325                         MachinePointerInfo::getGOT(DAG.getMachineFunction()));
18326  }
18327
18328  // The address of the thread local variable is the add of the thread
18329  // pointer with the offset of the variable.
18330  return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
18331}
18332
18333SDValue
18334X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
18335
18336  GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
18337
18338  if (DAG.getTarget().useEmulatedTLS())
18339    return LowerToTLSEmulatedModel(GA, DAG);
18340
18341  const GlobalValue *GV = GA->getGlobal();
18342  auto PtrVT = getPointerTy(DAG.getDataLayout());
18343  bool PositionIndependent = isPositionIndependent();
18344
18345  if (Subtarget.isTargetELF()) {
18346    TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
18347    switch (model) {
18348      case TLSModel::GeneralDynamic:
18349        if (Subtarget.is64Bit())
18350          return LowerToTLSGeneralDynamicModel64(GA, DAG, PtrVT);
18351        return LowerToTLSGeneralDynamicModel32(GA, DAG, PtrVT);
18352      case TLSModel::LocalDynamic:
18353        return LowerToTLSLocalDynamicModel(GA, DAG, PtrVT,
18354                                           Subtarget.is64Bit());
18355      case TLSModel::InitialExec:
18356      case TLSModel::LocalExec:
18357        return LowerToTLSExecModel(GA, DAG, PtrVT, model, Subtarget.is64Bit(),
18358                                   PositionIndependent);
18359    }
18360    llvm_unreachable("Unknown TLS model.");
18361  }
18362
18363  if (Subtarget.isTargetDarwin()) {
18364    // Darwin only has one model of TLS.  Lower to that.
18365    unsigned char OpFlag = 0;
18366    unsigned WrapperKind = Subtarget.isPICStyleRIPRel() ?
18367                           X86ISD::WrapperRIP : X86ISD::Wrapper;
18368
18369    // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
18370    // global base reg.
18371    bool PIC32 = PositionIndependent && !Subtarget.is64Bit();
18372    if (PIC32)
18373      OpFlag = X86II::MO_TLVP_PIC_BASE;
18374    else
18375      OpFlag = X86II::MO_TLVP;
18376    SDLoc DL(Op);
18377    SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
18378                                                GA->getValueType(0),
18379                                                GA->getOffset(), OpFlag);
18380    SDValue Offset = DAG.getNode(WrapperKind, DL, PtrVT, Result);
18381
18382    // With PIC32, the address is actually $g + Offset.
18383    if (PIC32)
18384      Offset = DAG.getNode(ISD::ADD, DL, PtrVT,
18385                           DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
18386                           Offset);
18387
18388    // Lowering the machine isd will make sure everything is in the right
18389    // location.
18390    SDValue Chain = DAG.getEntryNode();
18391    SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
18392    Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
18393    SDValue Args[] = { Chain, Offset };
18394    Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
18395    Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, DL, true),
18396                               DAG.getIntPtrConstant(0, DL, true),
18397                               Chain.getValue(1), DL);
18398
18399    // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
18400    MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
18401    MFI.setAdjustsStack(true);
18402
18403    // And our return value (tls address) is in the standard call return value
18404    // location.
18405    unsigned Reg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
18406    return DAG.getCopyFromReg(Chain, DL, Reg, PtrVT, Chain.getValue(1));
18407  }
18408
18409  if (Subtarget.isOSWindows()) {
18410    // Just use the implicit TLS architecture
18411    // Need to generate something similar to:
18412    //   mov     rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
18413    //                                  ; from TEB
18414    //   mov     ecx, dword [rel _tls_index]: Load index (from C runtime)
18415    //   mov     rcx, qword [rdx+rcx*8]
18416    //   mov     eax, .tls$:tlsvar
18417    //   [rax+rcx] contains the address
18418    // Windows 64bit: gs:0x58
18419    // Windows 32bit: fs:__tls_array
18420
18421    SDLoc dl(GA);
18422    SDValue Chain = DAG.getEntryNode();
18423
18424    // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
18425    // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
18426    // use its literal value of 0x2C.
18427    Value *Ptr = Constant::getNullValue(Subtarget.is64Bit()
18428                                        ? Type::getInt8PtrTy(*DAG.getContext(),
18429                                                             256)
18430                                        : Type::getInt32PtrTy(*DAG.getContext(),
18431                                                              257));
18432
18433    SDValue TlsArray = Subtarget.is64Bit()
18434                           ? DAG.getIntPtrConstant(0x58, dl)
18435                           : (Subtarget.isTargetWindowsGNU()
18436                                  ? DAG.getIntPtrConstant(0x2C, dl)
18437                                  : DAG.getExternalSymbol("_tls_array", PtrVT));
18438
18439    SDValue ThreadPointer =
18440        DAG.getLoad(PtrVT, dl, Chain, TlsArray, MachinePointerInfo(Ptr));
18441
18442    SDValue res;
18443    if (GV->getThreadLocalMode() == GlobalVariable::LocalExecTLSModel) {
18444      res = ThreadPointer;
18445    } else {
18446      // Load the _tls_index variable
18447      SDValue IDX = DAG.getExternalSymbol("_tls_index", PtrVT);
18448      if (Subtarget.is64Bit())
18449        IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain, IDX,
18450                             MachinePointerInfo(), MVT::i32);
18451      else
18452        IDX = DAG.getLoad(PtrVT, dl, Chain, IDX, MachinePointerInfo());
18453
18454      auto &DL = DAG.getDataLayout();
18455      SDValue Scale =
18456          DAG.getConstant(Log2_64_Ceil(DL.getPointerSize()), dl, MVT::i8);
18457      IDX = DAG.getNode(ISD::SHL, dl, PtrVT, IDX, Scale);
18458
18459      res = DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, IDX);
18460    }
18461
18462    res = DAG.getLoad(PtrVT, dl, Chain, res, MachinePointerInfo());
18463
18464    // Get the offset of start of .tls section
18465    SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
18466                                             GA->getValueType(0),
18467                                             GA->getOffset(), X86II::MO_SECREL);
18468    SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, PtrVT, TGA);
18469
18470    // The address of the thread local variable is the add of the thread
18471    // pointer with the offset of the variable.
18472    return DAG.getNode(ISD::ADD, dl, PtrVT, res, Offset);
18473  }
18474
18475  llvm_unreachable("TLS not implemented for this target.");
18476}
18477
18478/// Lower SRA_PARTS and friends, which return two i32 values
18479/// and take a 2 x i32 value to shift plus a shift amount.
18480/// TODO: Can this be moved to general expansion code?
18481static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
18482  assert(Op.getNumOperands() == 3 && "Not a double-shift!");
18483  MVT VT = Op.getSimpleValueType();
18484  unsigned VTBits = VT.getSizeInBits();
18485  SDLoc dl(Op);
18486  bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
18487  SDValue ShOpLo = Op.getOperand(0);
18488  SDValue ShOpHi = Op.getOperand(1);
18489  SDValue ShAmt  = Op.getOperand(2);
18490  // ISD::FSHL and ISD::FSHR have defined overflow behavior but ISD::SHL and
18491  // ISD::SRA/L nodes haven't. Insert an AND to be safe, it's optimized away
18492  // during isel.
18493  SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
18494                                  DAG.getConstant(VTBits - 1, dl, MVT::i8));
18495  SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
18496                                     DAG.getConstant(VTBits - 1, dl, MVT::i8))
18497                       : DAG.getConstant(0, dl, VT);
18498
18499  SDValue Tmp2, Tmp3;
18500  if (Op.getOpcode() == ISD::SHL_PARTS) {
18501    Tmp2 = DAG.getNode(ISD::FSHL, dl, VT, ShOpHi, ShOpLo, ShAmt);
18502    Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
18503  } else {
18504    Tmp2 = DAG.getNode(ISD::FSHR, dl, VT, ShOpHi, ShOpLo, ShAmt);
18505    Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
18506  }
18507
18508  // If the shift amount is larger or equal than the width of a part we can't
18509  // rely on the results of shld/shrd. Insert a test and select the appropriate
18510  // values for large shift amounts.
18511  SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
18512                                DAG.getConstant(VTBits, dl, MVT::i8));
18513  SDValue Cond = DAG.getSetCC(dl, MVT::i8, AndNode,
18514                             DAG.getConstant(0, dl, MVT::i8), ISD::SETNE);
18515
18516  SDValue Hi, Lo;
18517  if (Op.getOpcode() == ISD::SHL_PARTS) {
18518    Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2);
18519    Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3);
18520  } else {
18521    Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2);
18522    Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3);
18523  }
18524
18525  return DAG.getMergeValues({ Lo, Hi }, dl);
18526}
18527
18528static SDValue LowerFunnelShift(SDValue Op, const X86Subtarget &Subtarget,
18529                                SelectionDAG &DAG) {
18530  MVT VT = Op.getSimpleValueType();
18531  assert((Op.getOpcode() == ISD::FSHL || Op.getOpcode() == ISD::FSHR) &&
18532         "Unexpected funnel shift opcode!");
18533
18534  SDLoc DL(Op);
18535  SDValue Op0 = Op.getOperand(0);
18536  SDValue Op1 = Op.getOperand(1);
18537  SDValue Amt = Op.getOperand(2);
18538
18539  bool IsFSHR = Op.getOpcode() == ISD::FSHR;
18540
18541  if (VT.isVector()) {
18542    assert(Subtarget.hasVBMI2() && "Expected VBMI2");
18543
18544    if (IsFSHR)
18545      std::swap(Op0, Op1);
18546
18547    APInt APIntShiftAmt;
18548    if (X86::isConstantSplat(Amt, APIntShiftAmt)) {
18549      uint64_t ShiftAmt = APIntShiftAmt.urem(VT.getScalarSizeInBits());
18550      return DAG.getNode(IsFSHR ? X86ISD::VSHRD : X86ISD::VSHLD, DL, VT, Op0,
18551                         Op1, DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
18552    }
18553
18554    return DAG.getNode(IsFSHR ? X86ISD::VSHRDV : X86ISD::VSHLDV, DL, VT,
18555                       Op0, Op1, Amt);
18556  }
18557
18558  assert((VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) &&
18559         "Unexpected funnel shift type!");
18560
18561  // Expand slow SHLD/SHRD cases if we are not optimizing for size.
18562  bool OptForSize = DAG.shouldOptForSize();
18563  if (!OptForSize && Subtarget.isSHLDSlow())
18564    return SDValue();
18565
18566  if (IsFSHR)
18567    std::swap(Op0, Op1);
18568
18569  // i16 needs to modulo the shift amount, but i32/i64 have implicit modulo.
18570  if (VT == MVT::i16)
18571    Amt = DAG.getNode(ISD::AND, DL, Amt.getValueType(), Amt,
18572                      DAG.getConstant(15, DL, Amt.getValueType()));
18573
18574  unsigned SHDOp = (IsFSHR ? X86ISD::SHRD : X86ISD::SHLD);
18575  return DAG.getNode(SHDOp, DL, VT, Op0, Op1, Amt);
18576}
18577
18578// Try to use a packed vector operation to handle i64 on 32-bit targets when
18579// AVX512DQ is enabled.
18580static SDValue LowerI64IntToFP_AVX512DQ(SDValue Op, SelectionDAG &DAG,
18581                                        const X86Subtarget &Subtarget) {
18582  assert((Op.getOpcode() == ISD::SINT_TO_FP ||
18583          Op.getOpcode() == ISD::STRICT_SINT_TO_FP ||
18584          Op.getOpcode() == ISD::STRICT_UINT_TO_FP ||
18585          Op.getOpcode() == ISD::UINT_TO_FP) &&
18586         "Unexpected opcode!");
18587  bool IsStrict = Op->isStrictFPOpcode();
18588  unsigned OpNo = IsStrict ? 1 : 0;
18589  SDValue Src = Op.getOperand(OpNo);
18590  MVT SrcVT = Src.getSimpleValueType();
18591  MVT VT = Op.getSimpleValueType();
18592
18593   if (!Subtarget.hasDQI() || SrcVT != MVT::i64 || Subtarget.is64Bit() ||
18594       (VT != MVT::f32 && VT != MVT::f64))
18595    return SDValue();
18596
18597  // Pack the i64 into a vector, do the operation and extract.
18598
18599  // Using 256-bit to ensure result is 128-bits for f32 case.
18600  unsigned NumElts = Subtarget.hasVLX() ? 4 : 8;
18601  MVT VecInVT = MVT::getVectorVT(MVT::i64, NumElts);
18602  MVT VecVT = MVT::getVectorVT(VT, NumElts);
18603
18604  SDLoc dl(Op);
18605  SDValue InVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecInVT, Src);
18606  if (IsStrict) {
18607    SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, {VecVT, MVT::Other},
18608                                 {Op.getOperand(0), InVec});
18609    SDValue Chain = CvtVec.getValue(1);
18610    SDValue Value = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
18611                                DAG.getIntPtrConstant(0, dl));
18612    return DAG.getMergeValues({Value, Chain}, dl);
18613  }
18614
18615  SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, VecVT, InVec);
18616
18617  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
18618                     DAG.getIntPtrConstant(0, dl));
18619}
18620
18621static bool useVectorCast(unsigned Opcode, MVT FromVT, MVT ToVT,
18622                          const X86Subtarget &Subtarget) {
18623  switch (Opcode) {
18624    case ISD::SINT_TO_FP:
18625      // TODO: Handle wider types with AVX/AVX512.
18626      if (!Subtarget.hasSSE2() || FromVT != MVT::v4i32)
18627        return false;
18628      // CVTDQ2PS or (V)CVTDQ2PD
18629      return ToVT == MVT::v4f32 || (Subtarget.hasAVX() && ToVT == MVT::v4f64);
18630
18631    case ISD::UINT_TO_FP:
18632      // TODO: Handle wider types and i64 elements.
18633      if (!Subtarget.hasAVX512() || FromVT != MVT::v4i32)
18634        return false;
18635      // VCVTUDQ2PS or VCVTUDQ2PD
18636      return ToVT == MVT::v4f32 || ToVT == MVT::v4f64;
18637
18638    default:
18639      return false;
18640  }
18641}
18642
18643/// Given a scalar cast operation that is extracted from a vector, try to
18644/// vectorize the cast op followed by extraction. This will avoid an expensive
18645/// round-trip between XMM and GPR.
18646static SDValue vectorizeExtractedCast(SDValue Cast, SelectionDAG &DAG,
18647                                      const X86Subtarget &Subtarget) {
18648  // TODO: This could be enhanced to handle smaller integer types by peeking
18649  // through an extend.
18650  SDValue Extract = Cast.getOperand(0);
18651  MVT DestVT = Cast.getSimpleValueType();
18652  if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
18653      !isa<ConstantSDNode>(Extract.getOperand(1)))
18654    return SDValue();
18655
18656  // See if we have a 128-bit vector cast op for this type of cast.
18657  SDValue VecOp = Extract.getOperand(0);
18658  MVT FromVT = VecOp.getSimpleValueType();
18659  unsigned NumEltsInXMM = 128 / FromVT.getScalarSizeInBits();
18660  MVT Vec128VT = MVT::getVectorVT(FromVT.getScalarType(), NumEltsInXMM);
18661  MVT ToVT = MVT::getVectorVT(DestVT, NumEltsInXMM);
18662  if (!useVectorCast(Cast.getOpcode(), Vec128VT, ToVT, Subtarget))
18663    return SDValue();
18664
18665  // If we are extracting from a non-zero element, first shuffle the source
18666  // vector to allow extracting from element zero.
18667  SDLoc DL(Cast);
18668  if (!isNullConstant(Extract.getOperand(1))) {
18669    SmallVector<int, 16> Mask(FromVT.getVectorNumElements(), -1);
18670    Mask[0] = Extract.getConstantOperandVal(1);
18671    VecOp = DAG.getVectorShuffle(FromVT, DL, VecOp, DAG.getUNDEF(FromVT), Mask);
18672  }
18673  // If the source vector is wider than 128-bits, extract the low part. Do not
18674  // create an unnecessarily wide vector cast op.
18675  if (FromVT != Vec128VT)
18676    VecOp = extract128BitVector(VecOp, 0, DAG, DL);
18677
18678  // cast (extelt V, 0) --> extelt (cast (extract_subv V)), 0
18679  // cast (extelt V, C) --> extelt (cast (extract_subv (shuffle V, [C...]))), 0
18680  SDValue VCast = DAG.getNode(Cast.getOpcode(), DL, ToVT, VecOp);
18681  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, DestVT, VCast,
18682                     DAG.getIntPtrConstant(0, DL));
18683}
18684
18685static SDValue lowerINT_TO_FP_vXi64(SDValue Op, SelectionDAG &DAG,
18686                                    const X86Subtarget &Subtarget) {
18687  SDLoc DL(Op);
18688  bool IsStrict = Op->isStrictFPOpcode();
18689  MVT VT = Op->getSimpleValueType(0);
18690  SDValue Src = Op->getOperand(IsStrict ? 1 : 0);
18691
18692  if (Subtarget.hasDQI()) {
18693    assert(!Subtarget.hasVLX() && "Unexpected features");
18694
18695    assert((Src.getSimpleValueType() == MVT::v2i64 ||
18696            Src.getSimpleValueType() == MVT::v4i64) &&
18697           "Unsupported custom type");
18698
18699    // With AVX512DQ, but not VLX we need to widen to get a 512-bit result type.
18700    assert((VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v4f64) &&
18701           "Unexpected VT!");
18702    MVT WideVT = VT == MVT::v4f32 ? MVT::v8f32 : MVT::v8f64;
18703
18704    // Need to concat with zero vector for strict fp to avoid spurious
18705    // exceptions.
18706    SDValue Tmp = IsStrict ? DAG.getConstant(0, DL, MVT::v8i64)
18707                           : DAG.getUNDEF(MVT::v8i64);
18708    Src = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v8i64, Tmp, Src,
18709                      DAG.getIntPtrConstant(0, DL));
18710    SDValue Res, Chain;
18711    if (IsStrict) {
18712      Res = DAG.getNode(Op.getOpcode(), DL, {WideVT, MVT::Other},
18713                        {Op->getOperand(0), Src});
18714      Chain = Res.getValue(1);
18715    } else {
18716      Res = DAG.getNode(Op.getOpcode(), DL, WideVT, Src);
18717    }
18718
18719    Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
18720                      DAG.getIntPtrConstant(0, DL));
18721
18722    if (IsStrict)
18723      return DAG.getMergeValues({Res, Chain}, DL);
18724    return Res;
18725  }
18726
18727  bool IsSigned = Op->getOpcode() == ISD::SINT_TO_FP ||
18728                  Op->getOpcode() == ISD::STRICT_SINT_TO_FP;
18729  if (VT != MVT::v4f32 || IsSigned)
18730    return SDValue();
18731
18732  SDValue Zero = DAG.getConstant(0, DL, MVT::v4i64);
18733  SDValue One  = DAG.getConstant(1, DL, MVT::v4i64);
18734  SDValue Sign = DAG.getNode(ISD::OR, DL, MVT::v4i64,
18735                             DAG.getNode(ISD::SRL, DL, MVT::v4i64, Src, One),
18736                             DAG.getNode(ISD::AND, DL, MVT::v4i64, Src, One));
18737  SDValue IsNeg = DAG.getSetCC(DL, MVT::v4i64, Src, Zero, ISD::SETLT);
18738  SDValue SignSrc = DAG.getSelect(DL, MVT::v4i64, IsNeg, Sign, Src);
18739  SmallVector<SDValue, 4> SignCvts(4);
18740  SmallVector<SDValue, 4> Chains(4);
18741  for (int i = 0; i != 4; ++i) {
18742    SDValue Src = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, SignSrc,
18743                              DAG.getIntPtrConstant(i, DL));
18744    if (IsStrict) {
18745      SignCvts[i] =
18746          DAG.getNode(ISD::STRICT_SINT_TO_FP, DL, {MVT::f32, MVT::Other},
18747                      {Op.getOperand(0), Src});
18748      Chains[i] = SignCvts[i].getValue(1);
18749    } else {
18750      SignCvts[i] = DAG.getNode(ISD::SINT_TO_FP, DL, MVT::f32, Src);
18751    }
18752  }
18753  SDValue SignCvt = DAG.getBuildVector(VT, DL, SignCvts);
18754
18755  SDValue Slow, Chain;
18756  if (IsStrict) {
18757    Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
18758    Slow = DAG.getNode(ISD::STRICT_FADD, DL, {MVT::v4f32, MVT::Other},
18759                       {Chain, SignCvt, SignCvt});
18760    Chain = Slow.getValue(1);
18761  } else {
18762    Slow = DAG.getNode(ISD::FADD, DL, MVT::v4f32, SignCvt, SignCvt);
18763  }
18764
18765  IsNeg = DAG.getNode(ISD::TRUNCATE, DL, MVT::v4i32, IsNeg);
18766  SDValue Cvt = DAG.getSelect(DL, MVT::v4f32, IsNeg, Slow, SignCvt);
18767
18768  if (IsStrict)
18769    return DAG.getMergeValues({Cvt, Chain}, DL);
18770
18771  return Cvt;
18772}
18773
18774SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
18775                                           SelectionDAG &DAG) const {
18776  bool IsStrict = Op->isStrictFPOpcode();
18777  unsigned OpNo = IsStrict ? 1 : 0;
18778  SDValue Src = Op.getOperand(OpNo);
18779  SDValue Chain = IsStrict ? Op->getOperand(0) : DAG.getEntryNode();
18780  MVT SrcVT = Src.getSimpleValueType();
18781  MVT VT = Op.getSimpleValueType();
18782  SDLoc dl(Op);
18783
18784  if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
18785    return Extract;
18786
18787  if (SrcVT.isVector()) {
18788    if (SrcVT == MVT::v2i32 && VT == MVT::v2f64) {
18789      // Note: Since v2f64 is a legal type. We don't need to zero extend the
18790      // source for strict FP.
18791      if (IsStrict)
18792        return DAG.getNode(
18793            X86ISD::STRICT_CVTSI2P, dl, {VT, MVT::Other},
18794            {Chain, DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
18795                                DAG.getUNDEF(SrcVT))});
18796      return DAG.getNode(X86ISD::CVTSI2P, dl, VT,
18797                         DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
18798                                     DAG.getUNDEF(SrcVT)));
18799    }
18800    if (SrcVT == MVT::v2i64 || SrcVT == MVT::v4i64)
18801      return lowerINT_TO_FP_vXi64(Op, DAG, Subtarget);
18802
18803    return SDValue();
18804  }
18805
18806  assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
18807         "Unknown SINT_TO_FP to lower!");
18808
18809  bool UseSSEReg = isScalarFPTypeInSSEReg(VT);
18810
18811  // These are really Legal; return the operand so the caller accepts it as
18812  // Legal.
18813  if (SrcVT == MVT::i32 && UseSSEReg)
18814    return Op;
18815  if (SrcVT == MVT::i64 && UseSSEReg && Subtarget.is64Bit())
18816    return Op;
18817
18818  if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
18819    return V;
18820
18821  // SSE doesn't have an i16 conversion so we need to promote.
18822  if (SrcVT == MVT::i16 && (UseSSEReg || VT == MVT::f128)) {
18823    SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, Src);
18824    if (IsStrict)
18825      return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
18826                         {Chain, Ext});
18827
18828    return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Ext);
18829  }
18830
18831  if (VT == MVT::f128)
18832    return LowerF128Call(Op, DAG, RTLIB::getSINTTOFP(SrcVT, VT));
18833
18834  SDValue ValueToStore = Src;
18835  if (SrcVT == MVT::i64 && UseSSEReg && !Subtarget.is64Bit())
18836    // Bitcasting to f64 here allows us to do a single 64-bit store from
18837    // an SSE register, avoiding the store forwarding penalty that would come
18838    // with two 32-bit stores.
18839    ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
18840
18841  unsigned Size = SrcVT.getSizeInBits()/8;
18842  MachineFunction &MF = DAG.getMachineFunction();
18843  auto PtrVT = getPointerTy(MF.getDataLayout());
18844  int SSFI = MF.getFrameInfo().CreateStackObject(Size, Size, false);
18845  SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
18846  Chain = DAG.getStore(
18847      Chain, dl, ValueToStore, StackSlot,
18848      MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
18849  std::pair<SDValue, SDValue> Tmp = BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
18850
18851  if (IsStrict)
18852    return DAG.getMergeValues({Tmp.first, Tmp.second}, dl);
18853
18854  return Tmp.first;
18855}
18856
18857std::pair<SDValue, SDValue> X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
18858                                     SDValue StackSlot,
18859                                     SelectionDAG &DAG) const {
18860  // Build the FILD
18861  SDLoc DL(Op);
18862  SDVTList Tys;
18863  bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
18864  if (useSSE)
18865    Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
18866  else
18867    Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
18868
18869  unsigned ByteSize = SrcVT.getSizeInBits() / 8;
18870
18871  FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot);
18872  MachineMemOperand *LoadMMO;
18873  if (FI) {
18874    int SSFI = FI->getIndex();
18875    LoadMMO = DAG.getMachineFunction().getMachineMemOperand(
18876        MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
18877        MachineMemOperand::MOLoad, ByteSize, ByteSize);
18878  } else {
18879    LoadMMO = cast<LoadSDNode>(StackSlot)->getMemOperand();
18880    StackSlot = StackSlot.getOperand(1);
18881  }
18882  SDValue FILDOps[] = {Chain, StackSlot};
18883  SDValue Result =
18884      DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG : X86ISD::FILD, DL,
18885                              Tys, FILDOps, SrcVT, LoadMMO);
18886  Chain = Result.getValue(1);
18887
18888  if (useSSE) {
18889    SDValue InFlag = Result.getValue(2);
18890
18891    // FIXME: Currently the FST is glued to the FILD_FLAG. This
18892    // shouldn't be necessary except that RFP cannot be live across
18893    // multiple blocks. When stackifier is fixed, they can be uncoupled.
18894    MachineFunction &MF = DAG.getMachineFunction();
18895    unsigned SSFISize = Op.getValueSizeInBits() / 8;
18896    int SSFI = MF.getFrameInfo().CreateStackObject(SSFISize, SSFISize, false);
18897    auto PtrVT = getPointerTy(MF.getDataLayout());
18898    SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
18899    Tys = DAG.getVTList(MVT::Other);
18900    SDValue FSTOps[] = {Chain, Result, StackSlot, InFlag};
18901    MachineMemOperand *StoreMMO = DAG.getMachineFunction().getMachineMemOperand(
18902        MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
18903        MachineMemOperand::MOStore, SSFISize, SSFISize);
18904
18905    Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys, FSTOps,
18906                                    Op.getValueType(), StoreMMO);
18907    Result = DAG.getLoad(
18908        Op.getValueType(), DL, Chain, StackSlot,
18909        MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
18910    Chain = Result.getValue(1);
18911  }
18912
18913  return { Result, Chain };
18914}
18915
18916/// Horizontal vector math instructions may be slower than normal math with
18917/// shuffles. Limit horizontal op codegen based on size/speed trade-offs, uarch
18918/// implementation, and likely shuffle complexity of the alternate sequence.
18919static bool shouldUseHorizontalOp(bool IsSingleSource, SelectionDAG &DAG,
18920                                  const X86Subtarget &Subtarget) {
18921  bool IsOptimizingSize = DAG.shouldOptForSize();
18922  bool HasFastHOps = Subtarget.hasFastHorizontalOps();
18923  return !IsSingleSource || IsOptimizingSize || HasFastHOps;
18924}
18925
18926/// 64-bit unsigned integer to double expansion.
18927static SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG,
18928                                   const X86Subtarget &Subtarget) {
18929  // This algorithm is not obvious. Here it is what we're trying to output:
18930  /*
18931     movq       %rax,  %xmm0
18932     punpckldq  (c0),  %xmm0  // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
18933     subpd      (c1),  %xmm0  // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
18934     #ifdef __SSE3__
18935       haddpd   %xmm0, %xmm0
18936     #else
18937       pshufd   $0x4e, %xmm0, %xmm1
18938       addpd    %xmm1, %xmm0
18939     #endif
18940  */
18941
18942  bool IsStrict = Op->isStrictFPOpcode();
18943  unsigned OpNo = IsStrict ? 1 : 0;
18944  SDLoc dl(Op);
18945  LLVMContext *Context = DAG.getContext();
18946
18947  // Build some magic constants.
18948  static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
18949  Constant *C0 = ConstantDataVector::get(*Context, CV0);
18950  auto PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
18951  SDValue CPIdx0 = DAG.getConstantPool(C0, PtrVT, 16);
18952
18953  SmallVector<Constant*,2> CV1;
18954  CV1.push_back(
18955    ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
18956                                      APInt(64, 0x4330000000000000ULL))));
18957  CV1.push_back(
18958    ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
18959                                      APInt(64, 0x4530000000000000ULL))));
18960  Constant *C1 = ConstantVector::get(CV1);
18961  SDValue CPIdx1 = DAG.getConstantPool(C1, PtrVT, 16);
18962
18963  // Load the 64-bit value into an XMM register.
18964  SDValue XR1 =
18965      DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Op.getOperand(OpNo));
18966  SDValue CLod0 =
18967      DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
18968                  MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
18969                  /* Alignment = */ 16);
18970  SDValue Unpck1 =
18971      getUnpackl(DAG, dl, MVT::v4i32, DAG.getBitcast(MVT::v4i32, XR1), CLod0);
18972
18973  SDValue CLod1 =
18974      DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
18975                  MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
18976                  /* Alignment = */ 16);
18977  SDValue XR2F = DAG.getBitcast(MVT::v2f64, Unpck1);
18978  SDValue Sub;
18979  SDValue Chain;
18980  // TODO: Are there any fast-math-flags to propagate here?
18981  if (IsStrict) {
18982    Sub = DAG.getNode(ISD::STRICT_FSUB, dl, {MVT::v2f64, MVT::Other},
18983                      {Op.getOperand(0), XR2F, CLod1});
18984    Chain = Sub.getValue(1);
18985  } else
18986    Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
18987  SDValue Result;
18988
18989  if (!IsStrict && Subtarget.hasSSE3() &&
18990      shouldUseHorizontalOp(true, DAG, Subtarget)) {
18991    // FIXME: Do we need a STRICT version of FHADD?
18992    Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
18993  } else {
18994    SDValue Shuffle = DAG.getVectorShuffle(MVT::v2f64, dl, Sub, Sub, {1,-1});
18995    if (IsStrict) {
18996      Result = DAG.getNode(ISD::STRICT_FADD, dl, {MVT::v2f64, MVT::Other},
18997                           {Chain, Shuffle, Sub});
18998      Chain = Result.getValue(1);
18999    } else
19000      Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64, Shuffle, Sub);
19001  }
19002  Result = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
19003                       DAG.getIntPtrConstant(0, dl));
19004  if (IsStrict)
19005    return DAG.getMergeValues({Result, Chain}, dl);
19006
19007  return Result;
19008}
19009
19010/// 32-bit unsigned integer to float expansion.
19011static SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG,
19012                                   const X86Subtarget &Subtarget) {
19013  unsigned OpNo = Op.getNode()->isStrictFPOpcode() ? 1 : 0;
19014  SDLoc dl(Op);
19015  // FP constant to bias correct the final result.
19016  SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl,
19017                                   MVT::f64);
19018
19019  // Load the 32-bit value into an XMM register.
19020  SDValue Load =
19021      DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Op.getOperand(OpNo));
19022
19023  // Zero out the upper parts of the register.
19024  Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
19025
19026  // Or the load with the bias.
19027  SDValue Or = DAG.getNode(
19028      ISD::OR, dl, MVT::v2i64,
19029      DAG.getBitcast(MVT::v2i64, Load),
19030      DAG.getBitcast(MVT::v2i64,
19031                     DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Bias)));
19032  Or =
19033      DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
19034                  DAG.getBitcast(MVT::v2f64, Or), DAG.getIntPtrConstant(0, dl));
19035
19036  if (Op.getNode()->isStrictFPOpcode()) {
19037    // Subtract the bias.
19038    // TODO: Are there any fast-math-flags to propagate here?
19039    SDValue Chain = Op.getOperand(0);
19040    SDValue Sub = DAG.getNode(ISD::STRICT_FSUB, dl, {MVT::f64, MVT::Other},
19041                              {Chain, Or, Bias});
19042
19043    if (Op.getValueType() == Sub.getValueType())
19044      return Sub;
19045
19046    // Handle final rounding.
19047    std::pair<SDValue, SDValue> ResultPair = DAG.getStrictFPExtendOrRound(
19048        Sub, Sub.getValue(1), dl, Op.getSimpleValueType());
19049
19050    return DAG.getMergeValues({ResultPair.first, ResultPair.second}, dl);
19051  }
19052
19053  // Subtract the bias.
19054  // TODO: Are there any fast-math-flags to propagate here?
19055  SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
19056
19057  // Handle final rounding.
19058  return DAG.getFPExtendOrRound(Sub, dl, Op.getSimpleValueType());
19059}
19060
19061static SDValue lowerUINT_TO_FP_v2i32(SDValue Op, SelectionDAG &DAG,
19062                                     const X86Subtarget &Subtarget,
19063                                     const SDLoc &DL) {
19064  if (Op.getSimpleValueType() != MVT::v2f64)
19065    return SDValue();
19066
19067  bool IsStrict = Op->isStrictFPOpcode();
19068
19069  SDValue N0 = Op.getOperand(IsStrict ? 1 : 0);
19070  assert(N0.getSimpleValueType() == MVT::v2i32 && "Unexpected input type");
19071
19072  if (Subtarget.hasAVX512()) {
19073    if (!Subtarget.hasVLX()) {
19074      // Let generic type legalization widen this.
19075      if (!IsStrict)
19076        return SDValue();
19077      // Otherwise pad the integer input with 0s and widen the operation.
19078      N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
19079                       DAG.getConstant(0, DL, MVT::v2i32));
19080      SDValue Res = DAG.getNode(Op->getOpcode(), DL, {MVT::v4f64, MVT::Other},
19081                                {Op.getOperand(0), N0});
19082      SDValue Chain = Res.getValue(1);
19083      Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2f64, Res,
19084                        DAG.getIntPtrConstant(0, DL));
19085      return DAG.getMergeValues({Res, Chain}, DL);
19086    }
19087
19088    // Legalize to v4i32 type.
19089    N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
19090                     DAG.getUNDEF(MVT::v2i32));
19091    if (IsStrict)
19092      return DAG.getNode(X86ISD::STRICT_CVTUI2P, DL, {MVT::v2f64, MVT::Other},
19093                         {Op.getOperand(0), N0});
19094    return DAG.getNode(X86ISD::CVTUI2P, DL, MVT::v2f64, N0);
19095  }
19096
19097  // Zero extend to 2i64, OR with the floating point representation of 2^52.
19098  // This gives us the floating point equivalent of 2^52 + the i32 integer
19099  // since double has 52-bits of mantissa. Then subtract 2^52 in floating
19100  // point leaving just our i32 integers in double format.
19101  SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v2i64, N0);
19102  SDValue VBias =
19103      DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), DL, MVT::v2f64);
19104  SDValue Or = DAG.getNode(ISD::OR, DL, MVT::v2i64, ZExtIn,
19105                           DAG.getBitcast(MVT::v2i64, VBias));
19106  Or = DAG.getBitcast(MVT::v2f64, Or);
19107
19108  if (IsStrict)
19109    return DAG.getNode(ISD::STRICT_FSUB, DL, {MVT::v2f64, MVT::Other},
19110                       {Op.getOperand(0), Or, VBias});
19111  return DAG.getNode(ISD::FSUB, DL, MVT::v2f64, Or, VBias);
19112}
19113
19114static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
19115                                     const X86Subtarget &Subtarget) {
19116  SDLoc DL(Op);
19117  bool IsStrict = Op->isStrictFPOpcode();
19118  SDValue V = Op->getOperand(IsStrict ? 1 : 0);
19119  MVT VecIntVT = V.getSimpleValueType();
19120  assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
19121         "Unsupported custom type");
19122
19123  if (Subtarget.hasAVX512()) {
19124    // With AVX512, but not VLX we need to widen to get a 512-bit result type.
19125    assert(!Subtarget.hasVLX() && "Unexpected features");
19126    MVT VT = Op->getSimpleValueType(0);
19127
19128    // v8i32->v8f64 is legal with AVX512 so just return it.
19129    if (VT == MVT::v8f64)
19130      return Op;
19131
19132    assert((VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v4f64) &&
19133           "Unexpected VT!");
19134    MVT WideVT = VT == MVT::v4f64 ? MVT::v8f64 : MVT::v16f32;
19135    MVT WideIntVT = VT == MVT::v4f64 ? MVT::v8i32 : MVT::v16i32;
19136    // Need to concat with zero vector for strict fp to avoid spurious
19137    // exceptions.
19138    SDValue Tmp =
19139        IsStrict ? DAG.getConstant(0, DL, WideIntVT) : DAG.getUNDEF(WideIntVT);
19140    V = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideIntVT, Tmp, V,
19141                    DAG.getIntPtrConstant(0, DL));
19142    SDValue Res, Chain;
19143    if (IsStrict) {
19144      Res = DAG.getNode(ISD::STRICT_UINT_TO_FP, DL, {WideVT, MVT::Other},
19145                        {Op->getOperand(0), V});
19146      Chain = Res.getValue(1);
19147    } else {
19148      Res = DAG.getNode(ISD::UINT_TO_FP, DL, WideVT, V);
19149    }
19150
19151    Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
19152                      DAG.getIntPtrConstant(0, DL));
19153
19154    if (IsStrict)
19155      return DAG.getMergeValues({Res, Chain}, DL);
19156    return Res;
19157  }
19158
19159  if (Subtarget.hasAVX() && VecIntVT == MVT::v4i32 &&
19160      Op->getSimpleValueType(0) == MVT::v4f64) {
19161    SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i64, V);
19162    Constant *Bias = ConstantFP::get(
19163        *DAG.getContext(),
19164        APFloat(APFloat::IEEEdouble(), APInt(64, 0x4330000000000000ULL)));
19165    auto PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
19166    SDValue CPIdx = DAG.getConstantPool(Bias, PtrVT, /*Alignment*/ 8);
19167    SDVTList Tys = DAG.getVTList(MVT::v4f64, MVT::Other);
19168    SDValue Ops[] = {DAG.getEntryNode(), CPIdx};
19169    SDValue VBias = DAG.getMemIntrinsicNode(
19170        X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::f64,
19171        MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
19172        /*Alignment*/ 8, MachineMemOperand::MOLoad);
19173
19174    SDValue Or = DAG.getNode(ISD::OR, DL, MVT::v4i64, ZExtIn,
19175                             DAG.getBitcast(MVT::v4i64, VBias));
19176    Or = DAG.getBitcast(MVT::v4f64, Or);
19177
19178    if (IsStrict)
19179      return DAG.getNode(ISD::STRICT_FSUB, DL, {MVT::v4f64, MVT::Other},
19180                         {Op.getOperand(0), Or, VBias});
19181    return DAG.getNode(ISD::FSUB, DL, MVT::v4f64, Or, VBias);
19182  }
19183
19184  // The algorithm is the following:
19185  // #ifdef __SSE4_1__
19186  //     uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
19187  //     uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
19188  //                                 (uint4) 0x53000000, 0xaa);
19189  // #else
19190  //     uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
19191  //     uint4 hi = (v >> 16) | (uint4) 0x53000000;
19192  // #endif
19193  //     float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
19194  //     return (float4) lo + fhi;
19195
19196  bool Is128 = VecIntVT == MVT::v4i32;
19197  MVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
19198  // If we convert to something else than the supported type, e.g., to v4f64,
19199  // abort early.
19200  if (VecFloatVT != Op->getSimpleValueType(0))
19201    return SDValue();
19202
19203  // In the #idef/#else code, we have in common:
19204  // - The vector of constants:
19205  // -- 0x4b000000
19206  // -- 0x53000000
19207  // - A shift:
19208  // -- v >> 16
19209
19210  // Create the splat vector for 0x4b000000.
19211  SDValue VecCstLow = DAG.getConstant(0x4b000000, DL, VecIntVT);
19212  // Create the splat vector for 0x53000000.
19213  SDValue VecCstHigh = DAG.getConstant(0x53000000, DL, VecIntVT);
19214
19215  // Create the right shift.
19216  SDValue VecCstShift = DAG.getConstant(16, DL, VecIntVT);
19217  SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
19218
19219  SDValue Low, High;
19220  if (Subtarget.hasSSE41()) {
19221    MVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
19222    //     uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
19223    SDValue VecCstLowBitcast = DAG.getBitcast(VecI16VT, VecCstLow);
19224    SDValue VecBitcast = DAG.getBitcast(VecI16VT, V);
19225    // Low will be bitcasted right away, so do not bother bitcasting back to its
19226    // original type.
19227    Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
19228                      VecCstLowBitcast, DAG.getTargetConstant(0xaa, DL, MVT::i8));
19229    //     uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
19230    //                                 (uint4) 0x53000000, 0xaa);
19231    SDValue VecCstHighBitcast = DAG.getBitcast(VecI16VT, VecCstHigh);
19232    SDValue VecShiftBitcast = DAG.getBitcast(VecI16VT, HighShift);
19233    // High will be bitcasted right away, so do not bother bitcasting back to
19234    // its original type.
19235    High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
19236                       VecCstHighBitcast, DAG.getTargetConstant(0xaa, DL, MVT::i8));
19237  } else {
19238    SDValue VecCstMask = DAG.getConstant(0xffff, DL, VecIntVT);
19239    //     uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
19240    SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
19241    Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
19242
19243    //     uint4 hi = (v >> 16) | (uint4) 0x53000000;
19244    High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
19245  }
19246
19247  // Create the vector constant for (0x1.0p39f + 0x1.0p23f).
19248  SDValue VecCstFSub = DAG.getConstantFP(
19249      APFloat(APFloat::IEEEsingle(), APInt(32, 0x53000080)), DL, VecFloatVT);
19250
19251  //     float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
19252  // NOTE: By using fsub of a positive constant instead of fadd of a negative
19253  // constant, we avoid reassociation in MachineCombiner when unsafe-fp-math is
19254  // enabled. See PR24512.
19255  SDValue HighBitcast = DAG.getBitcast(VecFloatVT, High);
19256  // TODO: Are there any fast-math-flags to propagate here?
19257  //     (float4) lo;
19258  SDValue LowBitcast = DAG.getBitcast(VecFloatVT, Low);
19259  //     return (float4) lo + fhi;
19260  if (IsStrict) {
19261    SDValue FHigh = DAG.getNode(ISD::STRICT_FSUB, DL, {VecFloatVT, MVT::Other},
19262                                {Op.getOperand(0), HighBitcast, VecCstFSub});
19263    return DAG.getNode(ISD::STRICT_FADD, DL, {VecFloatVT, MVT::Other},
19264                       {FHigh.getValue(1), LowBitcast, FHigh});
19265  }
19266
19267  SDValue FHigh =
19268      DAG.getNode(ISD::FSUB, DL, VecFloatVT, HighBitcast, VecCstFSub);
19269  return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
19270}
19271
19272static SDValue lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG,
19273                                   const X86Subtarget &Subtarget) {
19274  unsigned OpNo = Op.getNode()->isStrictFPOpcode() ? 1 : 0;
19275  SDValue N0 = Op.getOperand(OpNo);
19276  MVT SrcVT = N0.getSimpleValueType();
19277  SDLoc dl(Op);
19278
19279  switch (SrcVT.SimpleTy) {
19280  default:
19281    llvm_unreachable("Custom UINT_TO_FP is not supported!");
19282  case MVT::v2i32:
19283    return lowerUINT_TO_FP_v2i32(Op, DAG, Subtarget, dl);
19284  case MVT::v4i32:
19285  case MVT::v8i32:
19286    return lowerUINT_TO_FP_vXi32(Op, DAG, Subtarget);
19287  case MVT::v2i64:
19288  case MVT::v4i64:
19289    return lowerINT_TO_FP_vXi64(Op, DAG, Subtarget);
19290  }
19291}
19292
19293SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
19294                                           SelectionDAG &DAG) const {
19295  bool IsStrict = Op->isStrictFPOpcode();
19296  unsigned OpNo = IsStrict ? 1 : 0;
19297  SDValue Src = Op.getOperand(OpNo);
19298  SDLoc dl(Op);
19299  auto PtrVT = getPointerTy(DAG.getDataLayout());
19300  MVT SrcVT = Src.getSimpleValueType();
19301  MVT DstVT = Op->getSimpleValueType(0);
19302  SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
19303
19304  if (DstVT == MVT::f128)
19305    return LowerF128Call(Op, DAG, RTLIB::getUINTTOFP(SrcVT, DstVT));
19306
19307  if (DstVT.isVector())
19308    return lowerUINT_TO_FP_vec(Op, DAG, Subtarget);
19309
19310  if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
19311    return Extract;
19312
19313  if (Subtarget.hasAVX512() && isScalarFPTypeInSSEReg(DstVT) &&
19314      (SrcVT == MVT::i32 || (SrcVT == MVT::i64 && Subtarget.is64Bit()))) {
19315    // Conversions from unsigned i32 to f32/f64 are legal,
19316    // using VCVTUSI2SS/SD.  Same for i64 in 64-bit mode.
19317    return Op;
19318  }
19319
19320  // Promote i32 to i64 and use a signed conversion on 64-bit targets.
19321  if (SrcVT == MVT::i32 && Subtarget.is64Bit()) {
19322    Src = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Src);
19323    if (IsStrict)
19324      return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {DstVT, MVT::Other},
19325                         {Chain, Src});
19326    return DAG.getNode(ISD::SINT_TO_FP, dl, DstVT, Src);
19327  }
19328
19329  if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
19330    return V;
19331
19332  if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
19333    return LowerUINT_TO_FP_i64(Op, DAG, Subtarget);
19334  if (SrcVT == MVT::i32 && X86ScalarSSEf64 && DstVT != MVT::f80)
19335    return LowerUINT_TO_FP_i32(Op, DAG, Subtarget);
19336  if (Subtarget.is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
19337    return SDValue();
19338
19339  // Make a 64-bit buffer, and use it to build an FILD.
19340  SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
19341  if (SrcVT == MVT::i32) {
19342    SDValue OffsetSlot = DAG.getMemBasePlusOffset(StackSlot, 4, dl);
19343    SDValue Store1 =
19344        DAG.getStore(Chain, dl, Src, StackSlot, MachinePointerInfo());
19345    SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, dl, MVT::i32),
19346                                  OffsetSlot, MachinePointerInfo());
19347    std::pair<SDValue, SDValue> Tmp =
19348        BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
19349    if (IsStrict)
19350      return DAG.getMergeValues({Tmp.first, Tmp.second}, dl);
19351
19352    return Tmp.first;
19353  }
19354
19355  assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
19356  SDValue ValueToStore = Src;
19357  if (isScalarFPTypeInSSEReg(Op.getValueType()) && !Subtarget.is64Bit()) {
19358    // Bitcasting to f64 here allows us to do a single 64-bit store from
19359    // an SSE register, avoiding the store forwarding penalty that would come
19360    // with two 32-bit stores.
19361    ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
19362  }
19363  SDValue Store =
19364      DAG.getStore(Chain, dl, ValueToStore, StackSlot, MachinePointerInfo());
19365  // For i64 source, we need to add the appropriate power of 2 if the input
19366  // was negative.  This is the same as the optimization in
19367  // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
19368  // we must be careful to do the computation in x87 extended precision, not
19369  // in SSE. (The generic code can't know it's OK to do this, or how to.)
19370  int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
19371  MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
19372      MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
19373      MachineMemOperand::MOLoad, 8, 8);
19374
19375  SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
19376  SDValue Ops[] = { Store, StackSlot };
19377  SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops,
19378                                         MVT::i64, MMO);
19379  Chain = Fild.getValue(1);
19380
19381
19382  // Check whether the sign bit is set.
19383  SDValue SignSet = DAG.getSetCC(
19384      dl, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
19385      Op.getOperand(OpNo), DAG.getConstant(0, dl, MVT::i64), ISD::SETLT);
19386
19387  // Build a 64 bit pair (FF, 0) in the constant pool, with FF in the hi bits.
19388  APInt FF(64, 0x5F80000000000000ULL);
19389  SDValue FudgePtr = DAG.getConstantPool(
19390      ConstantInt::get(*DAG.getContext(), FF), PtrVT);
19391
19392  // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
19393  SDValue Zero = DAG.getIntPtrConstant(0, dl);
19394  SDValue Four = DAG.getIntPtrConstant(4, dl);
19395  SDValue Offset = DAG.getSelect(dl, Zero.getValueType(), SignSet, Four, Zero);
19396  FudgePtr = DAG.getNode(ISD::ADD, dl, PtrVT, FudgePtr, Offset);
19397
19398  // Load the value out, extending it from f32 to f80.
19399  SDValue Fudge = DAG.getExtLoad(
19400      ISD::EXTLOAD, dl, MVT::f80, Chain, FudgePtr,
19401      MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), MVT::f32,
19402      /* Alignment = */ 4);
19403  Chain = Fudge.getValue(1);
19404  // Extend everything to 80 bits to force it to be done on x87.
19405  // TODO: Are there any fast-math-flags to propagate here?
19406  if (IsStrict) {
19407    SDValue Add = DAG.getNode(ISD::STRICT_FADD, dl, {MVT::f80, MVT::Other},
19408                              {Chain, Fild, Fudge});
19409    // STRICT_FP_ROUND can't handle equal types.
19410    if (DstVT == MVT::f80)
19411      return Add;
19412    return DAG.getNode(ISD::STRICT_FP_ROUND, dl, {DstVT, MVT::Other},
19413                       {Add.getValue(1), Add, DAG.getIntPtrConstant(0, dl)});
19414  }
19415  SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
19416  return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add,
19417                     DAG.getIntPtrConstant(0, dl));
19418}
19419
19420// If the given FP_TO_SINT (IsSigned) or FP_TO_UINT (!IsSigned) operation
19421// is legal, or has an fp128 or f16 source (which needs to be promoted to f32),
19422// just return an SDValue().
19423// Otherwise it is assumed to be a conversion from one of f32, f64 or f80
19424// to i16, i32 or i64, and we lower it to a legal sequence and return the
19425// result.
19426SDValue
19427X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
19428                                   bool IsSigned, SDValue &Chain) const {
19429  bool IsStrict = Op->isStrictFPOpcode();
19430  SDLoc DL(Op);
19431
19432  EVT DstTy = Op.getValueType();
19433  SDValue Value = Op.getOperand(IsStrict ? 1 : 0);
19434  EVT TheVT = Value.getValueType();
19435  auto PtrVT = getPointerTy(DAG.getDataLayout());
19436
19437  if (TheVT != MVT::f32 && TheVT != MVT::f64 && TheVT != MVT::f80) {
19438    // f16 must be promoted before using the lowering in this routine.
19439    // fp128 does not use this lowering.
19440    return SDValue();
19441  }
19442
19443  // If using FIST to compute an unsigned i64, we'll need some fixup
19444  // to handle values above the maximum signed i64.  A FIST is always
19445  // used for the 32-bit subtarget, but also for f80 on a 64-bit target.
19446  bool UnsignedFixup = !IsSigned && DstTy == MVT::i64;
19447
19448  // FIXME: This does not generate an invalid exception if the input does not
19449  // fit in i32. PR44019
19450  if (!IsSigned && DstTy != MVT::i64) {
19451    // Replace the fp-to-uint32 operation with an fp-to-sint64 FIST.
19452    // The low 32 bits of the fist result will have the correct uint32 result.
19453    assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
19454    DstTy = MVT::i64;
19455  }
19456
19457  assert(DstTy.getSimpleVT() <= MVT::i64 &&
19458         DstTy.getSimpleVT() >= MVT::i16 &&
19459         "Unknown FP_TO_INT to lower!");
19460
19461  // We lower FP->int64 into FISTP64 followed by a load from a temporary
19462  // stack slot.
19463  MachineFunction &MF = DAG.getMachineFunction();
19464  unsigned MemSize = DstTy.getStoreSize();
19465  int SSFI = MF.getFrameInfo().CreateStackObject(MemSize, MemSize, false);
19466  SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
19467
19468  Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
19469
19470  SDValue Adjust; // 0x0 or 0x80000000, for result sign bit adjustment.
19471
19472  if (UnsignedFixup) {
19473    //
19474    // Conversion to unsigned i64 is implemented with a select,
19475    // depending on whether the source value fits in the range
19476    // of a signed i64.  Let Thresh be the FP equivalent of
19477    // 0x8000000000000000ULL.
19478    //
19479    //  Adjust = (Value < Thresh) ? 0 : 0x80000000;
19480    //  FltOfs = (Value < Thresh) ? 0 : 0x80000000;
19481    //  FistSrc = (Value - FltOfs);
19482    //  Fist-to-mem64 FistSrc
19483    //  Add 0 or 0x800...0ULL to the 64-bit result, which is equivalent
19484    //  to XOR'ing the high 32 bits with Adjust.
19485    //
19486    // Being a power of 2, Thresh is exactly representable in all FP formats.
19487    // For X87 we'd like to use the smallest FP type for this constant, but
19488    // for DAG type consistency we have to match the FP operand type.
19489
19490    APFloat Thresh(APFloat::IEEEsingle(), APInt(32, 0x5f000000));
19491    LLVM_ATTRIBUTE_UNUSED APFloat::opStatus Status = APFloat::opOK;
19492    bool LosesInfo = false;
19493    if (TheVT == MVT::f64)
19494      // The rounding mode is irrelevant as the conversion should be exact.
19495      Status = Thresh.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven,
19496                              &LosesInfo);
19497    else if (TheVT == MVT::f80)
19498      Status = Thresh.convert(APFloat::x87DoubleExtended(),
19499                              APFloat::rmNearestTiesToEven, &LosesInfo);
19500
19501    assert(Status == APFloat::opOK && !LosesInfo &&
19502           "FP conversion should have been exact");
19503
19504    SDValue ThreshVal = DAG.getConstantFP(Thresh, DL, TheVT);
19505
19506    EVT ResVT = getSetCCResultType(DAG.getDataLayout(),
19507                                   *DAG.getContext(), TheVT);
19508    SDValue Cmp;
19509    if (IsStrict) {
19510      Cmp = DAG.getSetCC(DL, ResVT, Value, ThreshVal, ISD::SETLT,
19511                         Chain, /*IsSignaling*/ true);
19512      Chain = Cmp.getValue(1);
19513    } else {
19514      Cmp = DAG.getSetCC(DL, ResVT, Value, ThreshVal, ISD::SETLT);
19515    }
19516
19517    Adjust = DAG.getSelect(DL, MVT::i64, Cmp,
19518                           DAG.getConstant(0, DL, MVT::i64),
19519                           DAG.getConstant(APInt::getSignMask(64),
19520                                           DL, MVT::i64));
19521    SDValue FltOfs = DAG.getSelect(DL, TheVT, Cmp,
19522                                   DAG.getConstantFP(0.0, DL, TheVT),
19523                                   ThreshVal);
19524
19525    if (IsStrict) {
19526      Value = DAG.getNode(ISD::STRICT_FSUB, DL, { TheVT, MVT::Other},
19527                          { Chain, Value, FltOfs });
19528      Chain = Value.getValue(1);
19529    } else
19530      Value = DAG.getNode(ISD::FSUB, DL, TheVT, Value, FltOfs);
19531  }
19532
19533  MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, SSFI);
19534
19535  // FIXME This causes a redundant load/store if the SSE-class value is already
19536  // in memory, such as if it is on the callstack.
19537  if (isScalarFPTypeInSSEReg(TheVT)) {
19538    assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
19539    Chain = DAG.getStore(Chain, DL, Value, StackSlot, MPI);
19540    SDVTList Tys = DAG.getVTList(TheVT, MVT::Other);
19541    SDValue Ops[] = { Chain, StackSlot };
19542
19543    unsigned FLDSize = TheVT.getStoreSize();
19544    assert(FLDSize <= MemSize && "Stack slot not big enough");
19545    MachineMemOperand *MMO = MF.getMachineMemOperand(
19546        MPI, MachineMemOperand::MOLoad, FLDSize, FLDSize);
19547    Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, TheVT, MMO);
19548    Chain = Value.getValue(1);
19549  }
19550
19551  // Build the FP_TO_INT*_IN_MEM
19552  MachineMemOperand *MMO = MF.getMachineMemOperand(
19553      MPI, MachineMemOperand::MOStore, MemSize, MemSize);
19554  SDValue Ops[] = { Chain, Value, StackSlot };
19555  SDValue FIST = DAG.getMemIntrinsicNode(X86ISD::FP_TO_INT_IN_MEM, DL,
19556                                         DAG.getVTList(MVT::Other),
19557                                         Ops, DstTy, MMO);
19558
19559  SDValue Res = DAG.getLoad(Op.getValueType(), SDLoc(Op), FIST, StackSlot, MPI);
19560  Chain = Res.getValue(1);
19561
19562  // If we need an unsigned fixup, XOR the result with adjust.
19563  if (UnsignedFixup)
19564    Res = DAG.getNode(ISD::XOR, DL, MVT::i64, Res, Adjust);
19565
19566  return Res;
19567}
19568
19569static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
19570                              const X86Subtarget &Subtarget) {
19571  MVT VT = Op.getSimpleValueType();
19572  SDValue In = Op.getOperand(0);
19573  MVT InVT = In.getSimpleValueType();
19574  SDLoc dl(Op);
19575  unsigned Opc = Op.getOpcode();
19576
19577  assert(VT.isVector() && InVT.isVector() && "Expected vector type");
19578  assert((Opc == ISD::ANY_EXTEND || Opc == ISD::ZERO_EXTEND) &&
19579         "Unexpected extension opcode");
19580  assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
19581         "Expected same number of elements");
19582  assert((VT.getVectorElementType() == MVT::i16 ||
19583          VT.getVectorElementType() == MVT::i32 ||
19584          VT.getVectorElementType() == MVT::i64) &&
19585         "Unexpected element type");
19586  assert((InVT.getVectorElementType() == MVT::i8 ||
19587          InVT.getVectorElementType() == MVT::i16 ||
19588          InVT.getVectorElementType() == MVT::i32) &&
19589         "Unexpected element type");
19590
19591  unsigned ExtendInVecOpc = getOpcode_EXTEND_VECTOR_INREG(Opc);
19592
19593  // Custom legalize v8i8->v8i64 on CPUs without avx512bw.
19594  if (InVT == MVT::v8i8) {
19595    if (VT != MVT::v8i64)
19596      return SDValue();
19597
19598    In = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op),
19599                     MVT::v16i8, In, DAG.getUNDEF(MVT::v8i8));
19600    return DAG.getNode(ExtendInVecOpc, dl, VT, In);
19601  }
19602
19603  if (Subtarget.hasInt256())
19604    return Op;
19605
19606  // Optimize vectors in AVX mode:
19607  //
19608  //   v8i16 -> v8i32
19609  //   Use vpmovzwd for 4 lower elements  v8i16 -> v4i32.
19610  //   Use vpunpckhwd for 4 upper elements  v8i16 -> v4i32.
19611  //   Concat upper and lower parts.
19612  //
19613  //   v4i32 -> v4i64
19614  //   Use vpmovzdq for 4 lower elements  v4i32 -> v2i64.
19615  //   Use vpunpckhdq for 4 upper elements  v4i32 -> v2i64.
19616  //   Concat upper and lower parts.
19617  //
19618  MVT HalfVT = VT.getHalfNumVectorElementsVT();
19619  SDValue OpLo = DAG.getNode(ExtendInVecOpc, dl, HalfVT, In);
19620
19621  // Short-circuit if we can determine that each 128-bit half is the same value.
19622  // Otherwise, this is difficult to match and optimize.
19623  if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(In))
19624    if (hasIdenticalHalvesShuffleMask(Shuf->getMask()))
19625      return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpLo);
19626
19627  SDValue ZeroVec = DAG.getConstant(0, dl, InVT);
19628  SDValue Undef = DAG.getUNDEF(InVT);
19629  bool NeedZero = Opc == ISD::ZERO_EXTEND;
19630  SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
19631  OpHi = DAG.getBitcast(HalfVT, OpHi);
19632
19633  return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
19634}
19635
19636// Helper to split and extend a v16i1 mask to v16i8 or v16i16.
19637static SDValue SplitAndExtendv16i1(unsigned ExtOpc, MVT VT, SDValue In,
19638                                   const SDLoc &dl, SelectionDAG &DAG) {
19639  assert((VT == MVT::v16i8 || VT == MVT::v16i16) && "Unexpected VT.");
19640  SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
19641                           DAG.getIntPtrConstant(0, dl));
19642  SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
19643                           DAG.getIntPtrConstant(8, dl));
19644  Lo = DAG.getNode(ExtOpc, dl, MVT::v8i16, Lo);
19645  Hi = DAG.getNode(ExtOpc, dl, MVT::v8i16, Hi);
19646  SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i16, Lo, Hi);
19647  return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
19648}
19649
19650static  SDValue LowerZERO_EXTEND_Mask(SDValue Op,
19651                                      const X86Subtarget &Subtarget,
19652                                      SelectionDAG &DAG) {
19653  MVT VT = Op->getSimpleValueType(0);
19654  SDValue In = Op->getOperand(0);
19655  MVT InVT = In.getSimpleValueType();
19656  assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
19657  SDLoc DL(Op);
19658  unsigned NumElts = VT.getVectorNumElements();
19659
19660  // For all vectors, but vXi8 we can just emit a sign_extend and a shift. This
19661  // avoids a constant pool load.
19662  if (VT.getVectorElementType() != MVT::i8) {
19663    SDValue Extend = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, In);
19664    return DAG.getNode(ISD::SRL, DL, VT, Extend,
19665                       DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT));
19666  }
19667
19668  // Extend VT if BWI is not supported.
19669  MVT ExtVT = VT;
19670  if (!Subtarget.hasBWI()) {
19671    // If v16i32 is to be avoided, we'll need to split and concatenate.
19672    if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
19673      return SplitAndExtendv16i1(ISD::ZERO_EXTEND, VT, In, DL, DAG);
19674
19675    ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
19676  }
19677
19678  // Widen to 512-bits if VLX is not supported.
19679  MVT WideVT = ExtVT;
19680  if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
19681    NumElts *= 512 / ExtVT.getSizeInBits();
19682    InVT = MVT::getVectorVT(MVT::i1, NumElts);
19683    In = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InVT, DAG.getUNDEF(InVT),
19684                     In, DAG.getIntPtrConstant(0, DL));
19685    WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(),
19686                              NumElts);
19687  }
19688
19689  SDValue One = DAG.getConstant(1, DL, WideVT);
19690  SDValue Zero = DAG.getConstant(0, DL, WideVT);
19691
19692  SDValue SelectedVal = DAG.getSelect(DL, WideVT, In, One, Zero);
19693
19694  // Truncate if we had to extend above.
19695  if (VT != ExtVT) {
19696    WideVT = MVT::getVectorVT(MVT::i8, NumElts);
19697    SelectedVal = DAG.getNode(ISD::TRUNCATE, DL, WideVT, SelectedVal);
19698  }
19699
19700  // Extract back to 128/256-bit if we widened.
19701  if (WideVT != VT)
19702    SelectedVal = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SelectedVal,
19703                              DAG.getIntPtrConstant(0, DL));
19704
19705  return SelectedVal;
19706}
19707
19708static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
19709                                SelectionDAG &DAG) {
19710  SDValue In = Op.getOperand(0);
19711  MVT SVT = In.getSimpleValueType();
19712
19713  if (SVT.getVectorElementType() == MVT::i1)
19714    return LowerZERO_EXTEND_Mask(Op, Subtarget, DAG);
19715
19716  assert(Subtarget.hasAVX() && "Expected AVX support");
19717  return LowerAVXExtend(Op, DAG, Subtarget);
19718}
19719
19720/// Helper to recursively truncate vector elements in half with PACKSS/PACKUS.
19721/// It makes use of the fact that vectors with enough leading sign/zero bits
19722/// prevent the PACKSS/PACKUS from saturating the results.
19723/// AVX2 (Int256) sub-targets require extra shuffling as the PACK*S operates
19724/// within each 128-bit lane.
19725static SDValue truncateVectorWithPACK(unsigned Opcode, EVT DstVT, SDValue In,
19726                                      const SDLoc &DL, SelectionDAG &DAG,
19727                                      const X86Subtarget &Subtarget) {
19728  assert((Opcode == X86ISD::PACKSS || Opcode == X86ISD::PACKUS) &&
19729         "Unexpected PACK opcode");
19730  assert(DstVT.isVector() && "VT not a vector?");
19731
19732  // Requires SSE2 but AVX512 has fast vector truncate.
19733  if (!Subtarget.hasSSE2())
19734    return SDValue();
19735
19736  EVT SrcVT = In.getValueType();
19737
19738  // No truncation required, we might get here due to recursive calls.
19739  if (SrcVT == DstVT)
19740    return In;
19741
19742  // We only support vector truncation to 64bits or greater from a
19743  // 128bits or greater source.
19744  unsigned DstSizeInBits = DstVT.getSizeInBits();
19745  unsigned SrcSizeInBits = SrcVT.getSizeInBits();
19746  if ((DstSizeInBits % 64) != 0 || (SrcSizeInBits % 128) != 0)
19747    return SDValue();
19748
19749  unsigned NumElems = SrcVT.getVectorNumElements();
19750  if (!isPowerOf2_32(NumElems))
19751    return SDValue();
19752
19753  LLVMContext &Ctx = *DAG.getContext();
19754  assert(DstVT.getVectorNumElements() == NumElems && "Illegal truncation");
19755  assert(SrcSizeInBits > DstSizeInBits && "Illegal truncation");
19756
19757  EVT PackedSVT = EVT::getIntegerVT(Ctx, SrcVT.getScalarSizeInBits() / 2);
19758
19759  // Pack to the largest type possible:
19760  // vXi64/vXi32 -> PACK*SDW and vXi16 -> PACK*SWB.
19761  EVT InVT = MVT::i16, OutVT = MVT::i8;
19762  if (SrcVT.getScalarSizeInBits() > 16 &&
19763      (Opcode == X86ISD::PACKSS || Subtarget.hasSSE41())) {
19764    InVT = MVT::i32;
19765    OutVT = MVT::i16;
19766  }
19767
19768  // 128bit -> 64bit truncate - PACK 128-bit src in the lower subvector.
19769  if (SrcVT.is128BitVector()) {
19770    InVT = EVT::getVectorVT(Ctx, InVT, 128 / InVT.getSizeInBits());
19771    OutVT = EVT::getVectorVT(Ctx, OutVT, 128 / OutVT.getSizeInBits());
19772    In = DAG.getBitcast(InVT, In);
19773    SDValue Res = DAG.getNode(Opcode, DL, OutVT, In, In);
19774    Res = extractSubVector(Res, 0, DAG, DL, 64);
19775    return DAG.getBitcast(DstVT, Res);
19776  }
19777
19778  // Extract lower/upper subvectors.
19779  unsigned NumSubElts = NumElems / 2;
19780  SDValue Lo = extractSubVector(In, 0 * NumSubElts, DAG, DL, SrcSizeInBits / 2);
19781  SDValue Hi = extractSubVector(In, 1 * NumSubElts, DAG, DL, SrcSizeInBits / 2);
19782
19783  unsigned SubSizeInBits = SrcSizeInBits / 2;
19784  InVT = EVT::getVectorVT(Ctx, InVT, SubSizeInBits / InVT.getSizeInBits());
19785  OutVT = EVT::getVectorVT(Ctx, OutVT, SubSizeInBits / OutVT.getSizeInBits());
19786
19787  // 256bit -> 128bit truncate - PACK lower/upper 128-bit subvectors.
19788  if (SrcVT.is256BitVector() && DstVT.is128BitVector()) {
19789    Lo = DAG.getBitcast(InVT, Lo);
19790    Hi = DAG.getBitcast(InVT, Hi);
19791    SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
19792    return DAG.getBitcast(DstVT, Res);
19793  }
19794
19795  // AVX2: 512bit -> 256bit truncate - PACK lower/upper 256-bit subvectors.
19796  // AVX2: 512bit -> 128bit truncate - PACK(PACK, PACK).
19797  if (SrcVT.is512BitVector() && Subtarget.hasInt256()) {
19798    Lo = DAG.getBitcast(InVT, Lo);
19799    Hi = DAG.getBitcast(InVT, Hi);
19800    SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
19801
19802    // 256-bit PACK(ARG0, ARG1) leaves us with ((LO0,LO1),(HI0,HI1)),
19803    // so we need to shuffle to get ((LO0,HI0),(LO1,HI1)).
19804    // Scale shuffle mask to avoid bitcasts and help ComputeNumSignBits.
19805    SmallVector<int, 64> Mask;
19806    int Scale = 64 / OutVT.getScalarSizeInBits();
19807    scaleShuffleMask<int>(Scale, ArrayRef<int>({ 0, 2, 1, 3 }), Mask);
19808    Res = DAG.getVectorShuffle(OutVT, DL, Res, Res, Mask);
19809
19810    if (DstVT.is256BitVector())
19811      return DAG.getBitcast(DstVT, Res);
19812
19813    // If 512bit -> 128bit truncate another stage.
19814    EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
19815    Res = DAG.getBitcast(PackedVT, Res);
19816    return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
19817  }
19818
19819  // Recursively pack lower/upper subvectors, concat result and pack again.
19820  assert(SrcSizeInBits >= 256 && "Expected 256-bit vector or greater");
19821  EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumSubElts);
19822  Lo = truncateVectorWithPACK(Opcode, PackedVT, Lo, DL, DAG, Subtarget);
19823  Hi = truncateVectorWithPACK(Opcode, PackedVT, Hi, DL, DAG, Subtarget);
19824
19825  PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
19826  SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, PackedVT, Lo, Hi);
19827  return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
19828}
19829
19830static SDValue LowerTruncateVecI1(SDValue Op, SelectionDAG &DAG,
19831                                  const X86Subtarget &Subtarget) {
19832
19833  SDLoc DL(Op);
19834  MVT VT = Op.getSimpleValueType();
19835  SDValue In = Op.getOperand(0);
19836  MVT InVT = In.getSimpleValueType();
19837
19838  assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type.");
19839
19840  // Shift LSB to MSB and use VPMOVB/W2M or TESTD/Q.
19841  unsigned ShiftInx = InVT.getScalarSizeInBits() - 1;
19842  if (InVT.getScalarSizeInBits() <= 16) {
19843    if (Subtarget.hasBWI()) {
19844      // legal, will go to VPMOVB2M, VPMOVW2M
19845      if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
19846        // We need to shift to get the lsb into sign position.
19847        // Shift packed bytes not supported natively, bitcast to word
19848        MVT ExtVT = MVT::getVectorVT(MVT::i16, InVT.getSizeInBits()/16);
19849        In = DAG.getNode(ISD::SHL, DL, ExtVT,
19850                         DAG.getBitcast(ExtVT, In),
19851                         DAG.getConstant(ShiftInx, DL, ExtVT));
19852        In = DAG.getBitcast(InVT, In);
19853      }
19854      return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT),
19855                          In, ISD::SETGT);
19856    }
19857    // Use TESTD/Q, extended vector to packed dword/qword.
19858    assert((InVT.is256BitVector() || InVT.is128BitVector()) &&
19859           "Unexpected vector type.");
19860    unsigned NumElts = InVT.getVectorNumElements();
19861    assert((NumElts == 8 || NumElts == 16) && "Unexpected number of elements");
19862    // We need to change to a wider element type that we have support for.
19863    // For 8 element vectors this is easy, we either extend to v8i32 or v8i64.
19864    // For 16 element vectors we extend to v16i32 unless we are explicitly
19865    // trying to avoid 512-bit vectors. If we are avoiding 512-bit vectors
19866    // we need to split into two 8 element vectors which we can extend to v8i32,
19867    // truncate and concat the results. There's an additional complication if
19868    // the original type is v16i8. In that case we can't split the v16i8 so
19869    // first we pre-extend it to v16i16 which we can split to v8i16, then extend
19870    // to v8i32, truncate that to v8i1 and concat the two halves.
19871    if (NumElts == 16 && !Subtarget.canExtendTo512DQ()) {
19872      if (InVT == MVT::v16i8) {
19873        // First we need to sign extend up to 256-bits so we can split that.
19874        InVT = MVT::v16i16;
19875        In = DAG.getNode(ISD::SIGN_EXTEND, DL, InVT, In);
19876      }
19877      SDValue Lo = extract128BitVector(In, 0, DAG, DL);
19878      SDValue Hi = extract128BitVector(In, 8, DAG, DL);
19879      // We're split now, just emit two truncates and a concat. The two
19880      // truncates will trigger legalization to come back to this function.
19881      Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Lo);
19882      Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Hi);
19883      return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
19884    }
19885    // We either have 8 elements or we're allowed to use 512-bit vectors.
19886    // If we have VLX, we want to use the narrowest vector that can get the
19887    // job done so we use vXi32.
19888    MVT EltVT = Subtarget.hasVLX() ? MVT::i32 : MVT::getIntegerVT(512/NumElts);
19889    MVT ExtVT = MVT::getVectorVT(EltVT, NumElts);
19890    In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
19891    InVT = ExtVT;
19892    ShiftInx = InVT.getScalarSizeInBits() - 1;
19893  }
19894
19895  if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
19896    // We need to shift to get the lsb into sign position.
19897    In = DAG.getNode(ISD::SHL, DL, InVT, In,
19898                     DAG.getConstant(ShiftInx, DL, InVT));
19899  }
19900  // If we have DQI, emit a pattern that will be iseled as vpmovq2m/vpmovd2m.
19901  if (Subtarget.hasDQI())
19902    return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT), In, ISD::SETGT);
19903  return DAG.getSetCC(DL, VT, In, DAG.getConstant(0, DL, InVT), ISD::SETNE);
19904}
19905
19906SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
19907  SDLoc DL(Op);
19908  MVT VT = Op.getSimpleValueType();
19909  SDValue In = Op.getOperand(0);
19910  MVT InVT = In.getSimpleValueType();
19911  unsigned InNumEltBits = InVT.getScalarSizeInBits();
19912
19913  assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
19914         "Invalid TRUNCATE operation");
19915
19916  // If we're called by the type legalizer, handle a few cases.
19917  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19918  if (!TLI.isTypeLegal(InVT)) {
19919    if ((InVT == MVT::v8i64 || InVT == MVT::v16i32 || InVT == MVT::v16i64) &&
19920        VT.is128BitVector()) {
19921      assert(Subtarget.hasVLX() && "Unexpected subtarget!");
19922      // The default behavior is to truncate one step, concatenate, and then
19923      // truncate the remainder. We'd rather produce two 64-bit results and
19924      // concatenate those.
19925      SDValue Lo, Hi;
19926      std::tie(Lo, Hi) = DAG.SplitVector(In, DL);
19927
19928      EVT LoVT, HiVT;
19929      std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
19930
19931      Lo = DAG.getNode(ISD::TRUNCATE, DL, LoVT, Lo);
19932      Hi = DAG.getNode(ISD::TRUNCATE, DL, HiVT, Hi);
19933      return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
19934    }
19935
19936    // Otherwise let default legalization handle it.
19937    return SDValue();
19938  }
19939
19940  if (VT.getVectorElementType() == MVT::i1)
19941    return LowerTruncateVecI1(Op, DAG, Subtarget);
19942
19943  // vpmovqb/w/d, vpmovdb/w, vpmovwb
19944  if (Subtarget.hasAVX512()) {
19945    // word to byte only under BWI. Otherwise we have to promoted to v16i32
19946    // and then truncate that. But we should only do that if we haven't been
19947    // asked to avoid 512-bit vectors. The actual promotion to v16i32 will be
19948    // handled by isel patterns.
19949    if (InVT != MVT::v16i16 || Subtarget.hasBWI() ||
19950        Subtarget.canExtendTo512DQ())
19951      return Op;
19952  }
19953
19954  unsigned NumPackedSignBits = std::min<unsigned>(VT.getScalarSizeInBits(), 16);
19955  unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
19956
19957  // Truncate with PACKUS if we are truncating a vector with leading zero bits
19958  // that extend all the way to the packed/truncated value.
19959  // Pre-SSE41 we can only use PACKUSWB.
19960  KnownBits Known = DAG.computeKnownBits(In);
19961  if ((InNumEltBits - NumPackedZeroBits) <= Known.countMinLeadingZeros())
19962    if (SDValue V =
19963            truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget))
19964      return V;
19965
19966  // Truncate with PACKSS if we are truncating a vector with sign-bits that
19967  // extend all the way to the packed/truncated value.
19968  if ((InNumEltBits - NumPackedSignBits) < DAG.ComputeNumSignBits(In))
19969    if (SDValue V =
19970            truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget))
19971      return V;
19972
19973  // Handle truncation of V256 to V128 using shuffles.
19974  assert(VT.is128BitVector() && InVT.is256BitVector() && "Unexpected types!");
19975
19976  if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
19977    // On AVX2, v4i64 -> v4i32 becomes VPERMD.
19978    if (Subtarget.hasInt256()) {
19979      static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
19980      In = DAG.getBitcast(MVT::v8i32, In);
19981      In = DAG.getVectorShuffle(MVT::v8i32, DL, In, In, ShufMask);
19982      return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
19983                         DAG.getIntPtrConstant(0, DL));
19984    }
19985
19986    SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
19987                               DAG.getIntPtrConstant(0, DL));
19988    SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
19989                               DAG.getIntPtrConstant(2, DL));
19990    OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
19991    OpHi = DAG.getBitcast(MVT::v4i32, OpHi);
19992    static const int ShufMask[] = {0, 2, 4, 6};
19993    return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
19994  }
19995
19996  if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
19997    // On AVX2, v8i32 -> v8i16 becomes PSHUFB.
19998    if (Subtarget.hasInt256()) {
19999      In = DAG.getBitcast(MVT::v32i8, In);
20000
20001      // The PSHUFB mask:
20002      static const int ShufMask1[] = { 0,  1,  4,  5,  8,  9, 12, 13,
20003                                      -1, -1, -1, -1, -1, -1, -1, -1,
20004                                      16, 17, 20, 21, 24, 25, 28, 29,
20005                                      -1, -1, -1, -1, -1, -1, -1, -1 };
20006      In = DAG.getVectorShuffle(MVT::v32i8, DL, In, In, ShufMask1);
20007      In = DAG.getBitcast(MVT::v4i64, In);
20008
20009      static const int ShufMask2[] = {0,  2,  -1,  -1};
20010      In = DAG.getVectorShuffle(MVT::v4i64, DL,  In, In, ShufMask2);
20011      In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
20012                       DAG.getIntPtrConstant(0, DL));
20013      return DAG.getBitcast(VT, In);
20014    }
20015
20016    SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
20017                               DAG.getIntPtrConstant(0, DL));
20018
20019    SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
20020                               DAG.getIntPtrConstant(4, DL));
20021
20022    OpLo = DAG.getBitcast(MVT::v16i8, OpLo);
20023    OpHi = DAG.getBitcast(MVT::v16i8, OpHi);
20024
20025    // The PSHUFB mask:
20026    static const int ShufMask1[] = {0,  1,  4,  5,  8,  9, 12, 13,
20027                                   -1, -1, -1, -1, -1, -1, -1, -1};
20028
20029    OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, OpLo, ShufMask1);
20030    OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, OpHi, ShufMask1);
20031
20032    OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
20033    OpHi = DAG.getBitcast(MVT::v4i32, OpHi);
20034
20035    // The MOVLHPS Mask:
20036    static const int ShufMask2[] = {0, 1, 4, 5};
20037    SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
20038    return DAG.getBitcast(MVT::v8i16, res);
20039  }
20040
20041  if (VT == MVT::v16i8 && InVT == MVT::v16i16) {
20042    // Use an AND to zero uppper bits for PACKUS.
20043    In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(255, DL, InVT));
20044
20045    SDValue InLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i16, In,
20046                               DAG.getIntPtrConstant(0, DL));
20047    SDValue InHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i16, In,
20048                               DAG.getIntPtrConstant(8, DL));
20049    return DAG.getNode(X86ISD::PACKUS, DL, VT, InLo, InHi);
20050  }
20051
20052  llvm_unreachable("All 256->128 cases should have been handled above!");
20053}
20054
20055SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
20056  bool IsStrict = Op->isStrictFPOpcode();
20057  bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
20058                  Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
20059  MVT VT = Op->getSimpleValueType(0);
20060  SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
20061  MVT SrcVT = Src.getSimpleValueType();
20062  SDLoc dl(Op);
20063
20064  if (VT.isVector()) {
20065    if (VT == MVT::v2i1 && SrcVT == MVT::v2f64) {
20066      MVT ResVT = MVT::v4i32;
20067      MVT TruncVT = MVT::v4i1;
20068      unsigned Opc;
20069      if (IsStrict)
20070        Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
20071      else
20072        Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
20073
20074      if (!IsSigned && !Subtarget.hasVLX()) {
20075        assert(Subtarget.useAVX512Regs() && "Unexpected features!");
20076        // Widen to 512-bits.
20077        ResVT = MVT::v8i32;
20078        TruncVT = MVT::v8i1;
20079        Opc = Op.getOpcode();
20080        // Need to concat with zero vector for strict fp to avoid spurious
20081        // exceptions.
20082        // TODO: Should we just do this for non-strict as well?
20083        SDValue Tmp = IsStrict ? DAG.getConstantFP(0.0, dl, MVT::v8f64)
20084                               : DAG.getUNDEF(MVT::v8f64);
20085        Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8f64, Tmp, Src,
20086                          DAG.getIntPtrConstant(0, dl));
20087      }
20088      SDValue Res, Chain;
20089      if (IsStrict) {
20090        Res =
20091            DAG.getNode(Opc, dl, {ResVT, MVT::Other}, {Op->getOperand(0), Src});
20092        Chain = Res.getValue(1);
20093      } else {
20094        Res = DAG.getNode(Opc, dl, ResVT, Src);
20095      }
20096
20097      Res = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Res);
20098      Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i1, Res,
20099                        DAG.getIntPtrConstant(0, dl));
20100      if (IsStrict)
20101        return DAG.getMergeValues({Res, Chain}, dl);
20102      return Res;
20103    }
20104
20105    // v8f64->v8i32 is legal, but we need v8i32 to be custom for v8f32.
20106    if (VT == MVT::v8i32 && SrcVT == MVT::v8f64) {
20107      assert(!IsSigned && "Expected unsigned conversion!");
20108      assert(Subtarget.useAVX512Regs() && "Requires avx512f");
20109      return Op;
20110    }
20111
20112    // Widen vXi32 fp_to_uint with avx512f to 512-bit source.
20113    if ((VT == MVT::v4i32 || VT == MVT::v8i32) &&
20114        (SrcVT == MVT::v4f64 || SrcVT == MVT::v4f32 || SrcVT == MVT::v8f32)) {
20115      assert(!IsSigned && "Expected unsigned conversion!");
20116      assert(Subtarget.useAVX512Regs() && !Subtarget.hasVLX() &&
20117             "Unexpected features!");
20118      MVT WideVT = SrcVT == MVT::v4f64 ? MVT::v8f64 : MVT::v16f32;
20119      MVT ResVT = SrcVT == MVT::v4f64 ? MVT::v8i32 : MVT::v16i32;
20120      // Need to concat with zero vector for strict fp to avoid spurious
20121      // exceptions.
20122      // TODO: Should we just do this for non-strict as well?
20123      SDValue Tmp =
20124          IsStrict ? DAG.getConstantFP(0.0, dl, WideVT) : DAG.getUNDEF(WideVT);
20125      Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVT, Tmp, Src,
20126                        DAG.getIntPtrConstant(0, dl));
20127
20128      SDValue Res, Chain;
20129      if (IsStrict) {
20130        Res = DAG.getNode(ISD::STRICT_FP_TO_UINT, dl, {ResVT, MVT::Other},
20131                          {Op->getOperand(0), Src});
20132        Chain = Res.getValue(1);
20133      } else {
20134        Res = DAG.getNode(ISD::FP_TO_UINT, dl, ResVT, Src);
20135      }
20136
20137      Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
20138                        DAG.getIntPtrConstant(0, dl));
20139
20140      if (IsStrict)
20141        return DAG.getMergeValues({Res, Chain}, dl);
20142      return Res;
20143    }
20144
20145    // Widen vXi64 fp_to_uint/fp_to_sint with avx512dq to 512-bit source.
20146    if ((VT == MVT::v2i64 || VT == MVT::v4i64) &&
20147        (SrcVT == MVT::v2f64 || SrcVT == MVT::v4f64 || SrcVT == MVT::v4f32)) {
20148      assert(Subtarget.useAVX512Regs() && Subtarget.hasDQI() &&
20149             !Subtarget.hasVLX() && "Unexpected features!");
20150      MVT WideVT = SrcVT == MVT::v4f32 ? MVT::v8f32 : MVT::v8f64;
20151      // Need to concat with zero vector for strict fp to avoid spurious
20152      // exceptions.
20153      // TODO: Should we just do this for non-strict as well?
20154      SDValue Tmp =
20155          IsStrict ? DAG.getConstantFP(0.0, dl, WideVT) : DAG.getUNDEF(WideVT);
20156      Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVT, Tmp, Src,
20157                        DAG.getIntPtrConstant(0, dl));
20158
20159      SDValue Res, Chain;
20160      if (IsStrict) {
20161        Res = DAG.getNode(Op.getOpcode(), dl, {MVT::v8i64, MVT::Other},
20162                          {Op->getOperand(0), Src});
20163        Chain = Res.getValue(1);
20164      } else {
20165        Res = DAG.getNode(Op.getOpcode(), dl, MVT::v8i64, Src);
20166      }
20167
20168      Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
20169                        DAG.getIntPtrConstant(0, dl));
20170
20171      if (IsStrict)
20172        return DAG.getMergeValues({Res, Chain}, dl);
20173      return Res;
20174    }
20175
20176    if (VT == MVT::v2i64 && SrcVT  == MVT::v2f32) {
20177      assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL");
20178      SDValue Tmp = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
20179                                DAG.getUNDEF(MVT::v2f32));
20180      if (IsStrict) {
20181        unsigned Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI
20182                                : X86ISD::STRICT_CVTTP2UI;
20183        return DAG.getNode(Opc, dl, {VT, MVT::Other}, {Op->getOperand(0), Tmp});
20184      }
20185      unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
20186      return DAG.getNode(Opc, dl, VT, Tmp);
20187    }
20188
20189    return SDValue();
20190  }
20191
20192  assert(!VT.isVector());
20193
20194  bool UseSSEReg = isScalarFPTypeInSSEReg(SrcVT);
20195
20196  if (!IsSigned && UseSSEReg) {
20197    // Conversions from f32/f64 with AVX512 should be legal.
20198    if (Subtarget.hasAVX512())
20199      return Op;
20200
20201    // Use default expansion for i64.
20202    if (VT == MVT::i64)
20203      return SDValue();
20204
20205    assert(VT == MVT::i32 && "Unexpected VT!");
20206
20207    // Promote i32 to i64 and use a signed operation on 64-bit targets.
20208    // FIXME: This does not generate an invalid exception if the input does not
20209    // fit in i32. PR44019
20210    if (Subtarget.is64Bit()) {
20211      SDValue Res, Chain;
20212      if (IsStrict) {
20213        Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { MVT::i64, MVT::Other},
20214                          { Op.getOperand(0), Src });
20215        Chain = Res.getValue(1);
20216      } else
20217        Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i64, Src);
20218
20219      Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
20220      if (IsStrict)
20221        return DAG.getMergeValues({ Res, Chain }, dl);
20222      return Res;
20223    }
20224
20225    // Use default expansion for SSE1/2 targets without SSE3. With SSE3 we can
20226    // use fisttp which will be handled later.
20227    if (!Subtarget.hasSSE3())
20228      return SDValue();
20229  }
20230
20231  // Promote i16 to i32 if we can use a SSE operation or the type is f128.
20232  // FIXME: This does not generate an invalid exception if the input does not
20233  // fit in i16. PR44019
20234  if (VT == MVT::i16 && (UseSSEReg || SrcVT == MVT::f128)) {
20235    assert(IsSigned && "Expected i16 FP_TO_UINT to have been promoted!");
20236    SDValue Res, Chain;
20237    if (IsStrict) {
20238      Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { MVT::i32, MVT::Other},
20239                        { Op.getOperand(0), Src });
20240      Chain = Res.getValue(1);
20241    } else
20242      Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src);
20243
20244    Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
20245    if (IsStrict)
20246      return DAG.getMergeValues({ Res, Chain }, dl);
20247    return Res;
20248  }
20249
20250  // If this is a FP_TO_SINT using SSEReg we're done.
20251  if (UseSSEReg && IsSigned)
20252    return Op;
20253
20254  // fp128 needs to use a libcall.
20255  if (SrcVT == MVT::f128) {
20256    RTLIB::Libcall LC;
20257    if (IsSigned)
20258      LC = RTLIB::getFPTOSINT(SrcVT, VT);
20259    else
20260      LC = RTLIB::getFPTOUINT(SrcVT, VT);
20261
20262    SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
20263    MakeLibCallOptions CallOptions;
20264    std::pair<SDValue, SDValue> Tmp = makeLibCall(DAG, LC, VT, Src, CallOptions,
20265                                                  SDLoc(Op), Chain);
20266
20267    if (IsStrict)
20268      return DAG.getMergeValues({ Tmp.first, Tmp.second }, dl);
20269
20270    return Tmp.first;
20271  }
20272
20273  // Fall back to X87.
20274  SDValue Chain;
20275  if (SDValue V = FP_TO_INTHelper(Op, DAG, IsSigned, Chain)) {
20276    if (IsStrict)
20277      return DAG.getMergeValues({V, Chain}, dl);
20278    return V;
20279  }
20280
20281  llvm_unreachable("Expected FP_TO_INTHelper to handle all remaining cases.");
20282}
20283
20284SDValue X86TargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
20285  bool IsStrict = Op->isStrictFPOpcode();
20286
20287  SDLoc DL(Op);
20288  MVT VT = Op.getSimpleValueType();
20289  SDValue In = Op.getOperand(IsStrict ? 1 : 0);
20290  MVT SVT = In.getSimpleValueType();
20291
20292  if (VT == MVT::f128) {
20293    RTLIB::Libcall LC = RTLIB::getFPEXT(SVT, VT);
20294    return LowerF128Call(Op, DAG, LC);
20295  }
20296
20297  assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
20298
20299  SDValue Res =
20300      DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32, In, DAG.getUNDEF(SVT));
20301  if (IsStrict)
20302    return DAG.getNode(X86ISD::STRICT_VFPEXT, DL, {VT, MVT::Other},
20303                       {Op->getOperand(0), Res});
20304  return DAG.getNode(X86ISD::VFPEXT, DL, VT, Res);
20305}
20306
20307SDValue X86TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
20308  bool IsStrict = Op->isStrictFPOpcode();
20309
20310  MVT VT = Op.getSimpleValueType();
20311  SDValue In = Op.getOperand(IsStrict ? 1 : 0);
20312  MVT SVT = In.getSimpleValueType();
20313
20314  // It's legal except when f128 is involved
20315  if (SVT != MVT::f128)
20316    return Op;
20317
20318  RTLIB::Libcall LC = RTLIB::getFPROUND(SVT, VT);
20319
20320  // FP_ROUND node has a second operand indicating whether it is known to be
20321  // precise. That doesn't take part in the LibCall so we can't directly use
20322  // LowerF128Call.
20323
20324  SDLoc dl(Op);
20325  SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
20326  MakeLibCallOptions CallOptions;
20327  std::pair<SDValue, SDValue> Tmp = makeLibCall(DAG, LC, VT, In, CallOptions,
20328                                                dl, Chain);
20329
20330  if (IsStrict)
20331    return DAG.getMergeValues({ Tmp.first, Tmp.second }, dl);
20332
20333  return Tmp.first;
20334}
20335
20336/// Depending on uarch and/or optimizing for size, we might prefer to use a
20337/// vector operation in place of the typical scalar operation.
20338static SDValue lowerAddSubToHorizontalOp(SDValue Op, SelectionDAG &DAG,
20339                                         const X86Subtarget &Subtarget) {
20340  // If both operands have other uses, this is probably not profitable.
20341  SDValue LHS = Op.getOperand(0);
20342  SDValue RHS = Op.getOperand(1);
20343  if (!LHS.hasOneUse() && !RHS.hasOneUse())
20344    return Op;
20345
20346  // FP horizontal add/sub were added with SSE3. Integer with SSSE3.
20347  bool IsFP = Op.getSimpleValueType().isFloatingPoint();
20348  if (IsFP && !Subtarget.hasSSE3())
20349    return Op;
20350  if (!IsFP && !Subtarget.hasSSSE3())
20351    return Op;
20352
20353  // Extract from a common vector.
20354  if (LHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
20355      RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
20356      LHS.getOperand(0) != RHS.getOperand(0) ||
20357      !isa<ConstantSDNode>(LHS.getOperand(1)) ||
20358      !isa<ConstantSDNode>(RHS.getOperand(1)) ||
20359      !shouldUseHorizontalOp(true, DAG, Subtarget))
20360    return Op;
20361
20362  // Allow commuted 'hadd' ops.
20363  // TODO: Allow commuted (f)sub by negating the result of (F)HSUB?
20364  unsigned HOpcode;
20365  switch (Op.getOpcode()) {
20366    case ISD::ADD: HOpcode = X86ISD::HADD; break;
20367    case ISD::SUB: HOpcode = X86ISD::HSUB; break;
20368    case ISD::FADD: HOpcode = X86ISD::FHADD; break;
20369    case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
20370    default:
20371      llvm_unreachable("Trying to lower unsupported opcode to horizontal op");
20372  }
20373  unsigned LExtIndex = LHS.getConstantOperandVal(1);
20374  unsigned RExtIndex = RHS.getConstantOperandVal(1);
20375  if ((LExtIndex & 1) == 1 && (RExtIndex & 1) == 0 &&
20376      (HOpcode == X86ISD::HADD || HOpcode == X86ISD::FHADD))
20377    std::swap(LExtIndex, RExtIndex);
20378
20379  if ((LExtIndex & 1) != 0 || RExtIndex != (LExtIndex + 1))
20380    return Op;
20381
20382  SDValue X = LHS.getOperand(0);
20383  EVT VecVT = X.getValueType();
20384  unsigned BitWidth = VecVT.getSizeInBits();
20385  unsigned NumLanes = BitWidth / 128;
20386  unsigned NumEltsPerLane = VecVT.getVectorNumElements() / NumLanes;
20387  assert((BitWidth == 128 || BitWidth == 256 || BitWidth == 512) &&
20388         "Not expecting illegal vector widths here");
20389
20390  // Creating a 256-bit horizontal op would be wasteful, and there is no 512-bit
20391  // equivalent, so extract the 256/512-bit source op to 128-bit if we can.
20392  SDLoc DL(Op);
20393  if (BitWidth == 256 || BitWidth == 512) {
20394    unsigned LaneIdx = LExtIndex / NumEltsPerLane;
20395    X = extract128BitVector(X, LaneIdx * NumEltsPerLane, DAG, DL);
20396    LExtIndex %= NumEltsPerLane;
20397  }
20398
20399  // add (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hadd X, X), 0
20400  // add (extractelt (X, 1), extractelt (X, 0)) --> extractelt (hadd X, X), 0
20401  // add (extractelt (X, 2), extractelt (X, 3)) --> extractelt (hadd X, X), 1
20402  // sub (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hsub X, X), 0
20403  SDValue HOp = DAG.getNode(HOpcode, DL, X.getValueType(), X, X);
20404  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Op.getSimpleValueType(), HOp,
20405                     DAG.getIntPtrConstant(LExtIndex / 2, DL));
20406}
20407
20408/// Depending on uarch and/or optimizing for size, we might prefer to use a
20409/// vector operation in place of the typical scalar operation.
20410SDValue X86TargetLowering::lowerFaddFsub(SDValue Op, SelectionDAG &DAG) const {
20411  assert((Op.getValueType() == MVT::f32 || Op.getValueType() == MVT::f64) &&
20412         "Only expecting float/double");
20413  return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
20414}
20415
20416/// The only differences between FABS and FNEG are the mask and the logic op.
20417/// FNEG also has a folding opportunity for FNEG(FABS(x)).
20418static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
20419  assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
20420         "Wrong opcode for lowering FABS or FNEG.");
20421
20422  bool IsFABS = (Op.getOpcode() == ISD::FABS);
20423
20424  // If this is a FABS and it has an FNEG user, bail out to fold the combination
20425  // into an FNABS. We'll lower the FABS after that if it is still in use.
20426  if (IsFABS)
20427    for (SDNode *User : Op->uses())
20428      if (User->getOpcode() == ISD::FNEG)
20429        return Op;
20430
20431  SDLoc dl(Op);
20432  MVT VT = Op.getSimpleValueType();
20433
20434  bool IsF128 = (VT == MVT::f128);
20435  assert((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 ||
20436          VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
20437          VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) &&
20438         "Unexpected type in LowerFABSorFNEG");
20439
20440  // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
20441  // decide if we should generate a 16-byte constant mask when we only need 4 or
20442  // 8 bytes for the scalar case.
20443
20444  // There are no scalar bitwise logical SSE/AVX instructions, so we
20445  // generate a 16-byte vector constant and logic op even for the scalar case.
20446  // Using a 16-byte mask allows folding the load of the mask with
20447  // the logic op, so it can save (~4 bytes) on code size.
20448  bool IsFakeVector = !VT.isVector() && !IsF128;
20449  MVT LogicVT = VT;
20450  if (IsFakeVector)
20451    LogicVT = (VT == MVT::f64) ? MVT::v2f64 : MVT::v4f32;
20452
20453  unsigned EltBits = VT.getScalarSizeInBits();
20454  // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
20455  APInt MaskElt = IsFABS ? APInt::getSignedMaxValue(EltBits) :
20456                           APInt::getSignMask(EltBits);
20457  const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
20458  SDValue Mask = DAG.getConstantFP(APFloat(Sem, MaskElt), dl, LogicVT);
20459
20460  SDValue Op0 = Op.getOperand(0);
20461  bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
20462  unsigned LogicOp = IsFABS  ? X86ISD::FAND :
20463                     IsFNABS ? X86ISD::FOR  :
20464                               X86ISD::FXOR;
20465  SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
20466
20467  if (VT.isVector() || IsF128)
20468    return DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
20469
20470  // For the scalar case extend to a 128-bit vector, perform the logic op,
20471  // and extract the scalar result back out.
20472  Operand = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Operand);
20473  SDValue LogicNode = DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
20474  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, LogicNode,
20475                     DAG.getIntPtrConstant(0, dl));
20476}
20477
20478static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
20479  SDValue Mag = Op.getOperand(0);
20480  SDValue Sign = Op.getOperand(1);
20481  SDLoc dl(Op);
20482
20483  // If the sign operand is smaller, extend it first.
20484  MVT VT = Op.getSimpleValueType();
20485  if (Sign.getSimpleValueType().bitsLT(VT))
20486    Sign = DAG.getNode(ISD::FP_EXTEND, dl, VT, Sign);
20487
20488  // And if it is bigger, shrink it first.
20489  if (Sign.getSimpleValueType().bitsGT(VT))
20490    Sign = DAG.getNode(ISD::FP_ROUND, dl, VT, Sign, DAG.getIntPtrConstant(1, dl));
20491
20492  // At this point the operands and the result should have the same
20493  // type, and that won't be f80 since that is not custom lowered.
20494  bool IsF128 = (VT == MVT::f128);
20495  assert((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 ||
20496          VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
20497          VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) &&
20498         "Unexpected type in LowerFCOPYSIGN");
20499
20500  const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
20501
20502  // Perform all scalar logic operations as 16-byte vectors because there are no
20503  // scalar FP logic instructions in SSE.
20504  // TODO: This isn't necessary. If we used scalar types, we might avoid some
20505  // unnecessary splats, but we might miss load folding opportunities. Should
20506  // this decision be based on OptimizeForSize?
20507  bool IsFakeVector = !VT.isVector() && !IsF128;
20508  MVT LogicVT = VT;
20509  if (IsFakeVector)
20510    LogicVT = (VT == MVT::f64) ? MVT::v2f64 : MVT::v4f32;
20511
20512  // The mask constants are automatically splatted for vector types.
20513  unsigned EltSizeInBits = VT.getScalarSizeInBits();
20514  SDValue SignMask = DAG.getConstantFP(
20515      APFloat(Sem, APInt::getSignMask(EltSizeInBits)), dl, LogicVT);
20516  SDValue MagMask = DAG.getConstantFP(
20517      APFloat(Sem, APInt::getSignedMaxValue(EltSizeInBits)), dl, LogicVT);
20518
20519  // First, clear all bits but the sign bit from the second operand (sign).
20520  if (IsFakeVector)
20521    Sign = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Sign);
20522  SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, LogicVT, Sign, SignMask);
20523
20524  // Next, clear the sign bit from the first operand (magnitude).
20525  // TODO: If we had general constant folding for FP logic ops, this check
20526  // wouldn't be necessary.
20527  SDValue MagBits;
20528  if (ConstantFPSDNode *Op0CN = isConstOrConstSplatFP(Mag)) {
20529    APFloat APF = Op0CN->getValueAPF();
20530    APF.clearSign();
20531    MagBits = DAG.getConstantFP(APF, dl, LogicVT);
20532  } else {
20533    // If the magnitude operand wasn't a constant, we need to AND out the sign.
20534    if (IsFakeVector)
20535      Mag = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Mag);
20536    MagBits = DAG.getNode(X86ISD::FAND, dl, LogicVT, Mag, MagMask);
20537  }
20538
20539  // OR the magnitude value with the sign bit.
20540  SDValue Or = DAG.getNode(X86ISD::FOR, dl, LogicVT, MagBits, SignBit);
20541  return !IsFakeVector ? Or : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Or,
20542                                          DAG.getIntPtrConstant(0, dl));
20543}
20544
20545static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
20546  SDValue N0 = Op.getOperand(0);
20547  SDLoc dl(Op);
20548  MVT VT = Op.getSimpleValueType();
20549
20550  MVT OpVT = N0.getSimpleValueType();
20551  assert((OpVT == MVT::f32 || OpVT == MVT::f64) &&
20552         "Unexpected type for FGETSIGN");
20553
20554  // Lower ISD::FGETSIGN to (AND (X86ISD::MOVMSK ...) 1).
20555  MVT VecVT = (OpVT == MVT::f32 ? MVT::v4f32 : MVT::v2f64);
20556  SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, N0);
20557  Res = DAG.getNode(X86ISD::MOVMSK, dl, MVT::i32, Res);
20558  Res = DAG.getZExtOrTrunc(Res, dl, VT);
20559  Res = DAG.getNode(ISD::AND, dl, VT, Res, DAG.getConstant(1, dl, VT));
20560  return Res;
20561}
20562
20563/// Helper for creating a X86ISD::SETCC node.
20564static SDValue getSETCC(X86::CondCode Cond, SDValue EFLAGS, const SDLoc &dl,
20565                        SelectionDAG &DAG) {
20566  return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
20567                     DAG.getTargetConstant(Cond, dl, MVT::i8), EFLAGS);
20568}
20569
20570/// Helper for matching OR(EXTRACTELT(X,0),OR(EXTRACTELT(X,1),...))
20571/// style scalarized (associative) reduction patterns.
20572static bool matchScalarReduction(SDValue Op, ISD::NodeType BinOp,
20573                                 SmallVectorImpl<SDValue> &SrcOps) {
20574  SmallVector<SDValue, 8> Opnds;
20575  DenseMap<SDValue, APInt> SrcOpMap;
20576  EVT VT = MVT::Other;
20577
20578  // Recognize a special case where a vector is casted into wide integer to
20579  // test all 0s.
20580  assert(Op.getOpcode() == unsigned(BinOp) &&
20581         "Unexpected bit reduction opcode");
20582  Opnds.push_back(Op.getOperand(0));
20583  Opnds.push_back(Op.getOperand(1));
20584
20585  for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
20586    SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
20587    // BFS traverse all BinOp operands.
20588    if (I->getOpcode() == unsigned(BinOp)) {
20589      Opnds.push_back(I->getOperand(0));
20590      Opnds.push_back(I->getOperand(1));
20591      // Re-evaluate the number of nodes to be traversed.
20592      e += 2; // 2 more nodes (LHS and RHS) are pushed.
20593      continue;
20594    }
20595
20596    // Quit if a non-EXTRACT_VECTOR_ELT
20597    if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
20598      return false;
20599
20600    // Quit if without a constant index.
20601    SDValue Idx = I->getOperand(1);
20602    if (!isa<ConstantSDNode>(Idx))
20603      return false;
20604
20605    SDValue Src = I->getOperand(0);
20606    DenseMap<SDValue, APInt>::iterator M = SrcOpMap.find(Src);
20607    if (M == SrcOpMap.end()) {
20608      VT = Src.getValueType();
20609      // Quit if not the same type.
20610      if (SrcOpMap.begin() != SrcOpMap.end() &&
20611          VT != SrcOpMap.begin()->first.getValueType())
20612        return false;
20613      unsigned NumElts = VT.getVectorNumElements();
20614      APInt EltCount = APInt::getNullValue(NumElts);
20615      M = SrcOpMap.insert(std::make_pair(Src, EltCount)).first;
20616      SrcOps.push_back(Src);
20617    }
20618    // Quit if element already used.
20619    unsigned CIdx = cast<ConstantSDNode>(Idx)->getZExtValue();
20620    if (M->second[CIdx])
20621      return false;
20622    M->second.setBit(CIdx);
20623  }
20624
20625  // Quit if not all elements are used.
20626  for (DenseMap<SDValue, APInt>::const_iterator I = SrcOpMap.begin(),
20627                                                E = SrcOpMap.end();
20628       I != E; ++I) {
20629    if (!I->second.isAllOnesValue())
20630      return false;
20631  }
20632
20633  return true;
20634}
20635
20636// Check whether an OR'd tree is PTEST-able.
20637static SDValue LowerVectorAllZeroTest(SDValue Op, ISD::CondCode CC,
20638                                      const X86Subtarget &Subtarget,
20639                                      SelectionDAG &DAG, SDValue &X86CC) {
20640  assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
20641
20642  if (!Subtarget.hasSSE41() || !Op->hasOneUse())
20643    return SDValue();
20644
20645  SmallVector<SDValue, 8> VecIns;
20646  if (!matchScalarReduction(Op, ISD::OR, VecIns))
20647    return SDValue();
20648
20649  // Quit if not 128/256-bit vector.
20650  EVT VT = VecIns[0].getValueType();
20651  if (!VT.is128BitVector() && !VT.is256BitVector())
20652    return SDValue();
20653
20654  SDLoc DL(Op);
20655  MVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
20656
20657  // Cast all vectors into TestVT for PTEST.
20658  for (unsigned i = 0, e = VecIns.size(); i < e; ++i)
20659    VecIns[i] = DAG.getBitcast(TestVT, VecIns[i]);
20660
20661  // If more than one full vector is evaluated, OR them first before PTEST.
20662  for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) {
20663    // Each iteration will OR 2 nodes and append the result until there is only
20664    // 1 node left, i.e. the final OR'd value of all vectors.
20665    SDValue LHS = VecIns[Slot];
20666    SDValue RHS = VecIns[Slot + 1];
20667    VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
20668  }
20669
20670  X86CC = DAG.getTargetConstant(CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE,
20671                                DL, MVT::i8);
20672  return DAG.getNode(X86ISD::PTEST, DL, MVT::i32, VecIns.back(), VecIns.back());
20673}
20674
20675/// return true if \c Op has a use that doesn't just read flags.
20676static bool hasNonFlagsUse(SDValue Op) {
20677  for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
20678       ++UI) {
20679    SDNode *User = *UI;
20680    unsigned UOpNo = UI.getOperandNo();
20681    if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
20682      // Look pass truncate.
20683      UOpNo = User->use_begin().getOperandNo();
20684      User = *User->use_begin();
20685    }
20686
20687    if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
20688        !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
20689      return true;
20690  }
20691  return false;
20692}
20693
20694// Transform to an x86-specific ALU node with flags if there is a chance of
20695// using an RMW op or only the flags are used. Otherwise, leave
20696// the node alone and emit a 'cmp' or 'test' instruction.
20697static bool isProfitableToUseFlagOp(SDValue Op) {
20698  for (SDNode *U : Op->uses())
20699    if (U->getOpcode() != ISD::CopyToReg &&
20700        U->getOpcode() != ISD::SETCC &&
20701        U->getOpcode() != ISD::STORE)
20702      return false;
20703
20704  return true;
20705}
20706
20707/// Emit nodes that will be selected as "test Op0,Op0", or something
20708/// equivalent.
20709static SDValue EmitTest(SDValue Op, unsigned X86CC, const SDLoc &dl,
20710                        SelectionDAG &DAG, const X86Subtarget &Subtarget) {
20711  // CF and OF aren't always set the way we want. Determine which
20712  // of these we need.
20713  bool NeedCF = false;
20714  bool NeedOF = false;
20715  switch (X86CC) {
20716  default: break;
20717  case X86::COND_A: case X86::COND_AE:
20718  case X86::COND_B: case X86::COND_BE:
20719    NeedCF = true;
20720    break;
20721  case X86::COND_G: case X86::COND_GE:
20722  case X86::COND_L: case X86::COND_LE:
20723  case X86::COND_O: case X86::COND_NO: {
20724    // Check if we really need to set the
20725    // Overflow flag. If NoSignedWrap is present
20726    // that is not actually needed.
20727    switch (Op->getOpcode()) {
20728    case ISD::ADD:
20729    case ISD::SUB:
20730    case ISD::MUL:
20731    case ISD::SHL:
20732      if (Op.getNode()->getFlags().hasNoSignedWrap())
20733        break;
20734      LLVM_FALLTHROUGH;
20735    default:
20736      NeedOF = true;
20737      break;
20738    }
20739    break;
20740  }
20741  }
20742  // See if we can use the EFLAGS value from the operand instead of
20743  // doing a separate TEST. TEST always sets OF and CF to 0, so unless
20744  // we prove that the arithmetic won't overflow, we can't use OF or CF.
20745  if (Op.getResNo() != 0 || NeedOF || NeedCF) {
20746    // Emit a CMP with 0, which is the TEST pattern.
20747    return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
20748                       DAG.getConstant(0, dl, Op.getValueType()));
20749  }
20750  unsigned Opcode = 0;
20751  unsigned NumOperands = 0;
20752
20753  SDValue ArithOp = Op;
20754
20755  // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
20756  // which may be the result of a CAST.  We use the variable 'Op', which is the
20757  // non-casted variable when we check for possible users.
20758  switch (ArithOp.getOpcode()) {
20759  case ISD::AND:
20760    // If the primary 'and' result isn't used, don't bother using X86ISD::AND,
20761    // because a TEST instruction will be better.
20762    if (!hasNonFlagsUse(Op))
20763      break;
20764
20765    LLVM_FALLTHROUGH;
20766  case ISD::ADD:
20767  case ISD::SUB:
20768  case ISD::OR:
20769  case ISD::XOR:
20770    if (!isProfitableToUseFlagOp(Op))
20771      break;
20772
20773    // Otherwise use a regular EFLAGS-setting instruction.
20774    switch (ArithOp.getOpcode()) {
20775    default: llvm_unreachable("unexpected operator!");
20776    case ISD::ADD: Opcode = X86ISD::ADD; break;
20777    case ISD::SUB: Opcode = X86ISD::SUB; break;
20778    case ISD::XOR: Opcode = X86ISD::XOR; break;
20779    case ISD::AND: Opcode = X86ISD::AND; break;
20780    case ISD::OR:  Opcode = X86ISD::OR;  break;
20781    }
20782
20783    NumOperands = 2;
20784    break;
20785  case X86ISD::ADD:
20786  case X86ISD::SUB:
20787  case X86ISD::OR:
20788  case X86ISD::XOR:
20789  case X86ISD::AND:
20790    return SDValue(Op.getNode(), 1);
20791  case ISD::SSUBO:
20792  case ISD::USUBO: {
20793    // /USUBO/SSUBO will become a X86ISD::SUB and we can use its Z flag.
20794    SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
20795    return DAG.getNode(X86ISD::SUB, dl, VTs, Op->getOperand(0),
20796                       Op->getOperand(1)).getValue(1);
20797  }
20798  default:
20799    break;
20800  }
20801
20802  if (Opcode == 0) {
20803    // Emit a CMP with 0, which is the TEST pattern.
20804    return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
20805                       DAG.getConstant(0, dl, Op.getValueType()));
20806  }
20807  SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
20808  SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
20809
20810  SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
20811  DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), New);
20812  return SDValue(New.getNode(), 1);
20813}
20814
20815/// Emit nodes that will be selected as "cmp Op0,Op1", or something
20816/// equivalent.
20817static std::pair<SDValue, SDValue> EmitCmp(SDValue Op0, SDValue Op1,
20818                                           unsigned X86CC, const SDLoc &dl,
20819                                           SelectionDAG &DAG,
20820                                           const X86Subtarget &Subtarget,
20821                                           SDValue Chain, bool IsSignaling) {
20822  if (isNullConstant(Op1))
20823    return std::make_pair(EmitTest(Op0, X86CC, dl, DAG, Subtarget), Chain);
20824
20825  EVT CmpVT = Op0.getValueType();
20826
20827  if (CmpVT.isFloatingPoint()) {
20828    if (Chain) {
20829      SDValue Res =
20830          DAG.getNode(IsSignaling ? X86ISD::STRICT_FCMPS : X86ISD::STRICT_FCMP,
20831                      dl, {MVT::i32, MVT::Other}, {Chain, Op0, Op1});
20832      return std::make_pair(Res, Res.getValue(1));
20833    }
20834    return std::make_pair(DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1),
20835                          SDValue());
20836  }
20837
20838  assert((CmpVT == MVT::i8 || CmpVT == MVT::i16 ||
20839          CmpVT == MVT::i32 || CmpVT == MVT::i64) && "Unexpected VT!");
20840
20841  // Only promote the compare up to I32 if it is a 16 bit operation
20842  // with an immediate.  16 bit immediates are to be avoided.
20843  if (CmpVT == MVT::i16 && !Subtarget.isAtom() &&
20844      !DAG.getMachineFunction().getFunction().hasMinSize()) {
20845    ConstantSDNode *COp0 = dyn_cast<ConstantSDNode>(Op0);
20846    ConstantSDNode *COp1 = dyn_cast<ConstantSDNode>(Op1);
20847    // Don't do this if the immediate can fit in 8-bits.
20848    if ((COp0 && !COp0->getAPIntValue().isSignedIntN(8)) ||
20849        (COp1 && !COp1->getAPIntValue().isSignedIntN(8))) {
20850      unsigned ExtendOp =
20851          isX86CCSigned(X86CC) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
20852      if (X86CC == X86::COND_E || X86CC == X86::COND_NE) {
20853        // For equality comparisons try to use SIGN_EXTEND if the input was
20854        // truncate from something with enough sign bits.
20855        if (Op0.getOpcode() == ISD::TRUNCATE) {
20856          SDValue In = Op0.getOperand(0);
20857          unsigned EffBits =
20858              In.getScalarValueSizeInBits() - DAG.ComputeNumSignBits(In) + 1;
20859          if (EffBits <= 16)
20860            ExtendOp = ISD::SIGN_EXTEND;
20861        } else if (Op1.getOpcode() == ISD::TRUNCATE) {
20862          SDValue In = Op1.getOperand(0);
20863          unsigned EffBits =
20864              In.getScalarValueSizeInBits() - DAG.ComputeNumSignBits(In) + 1;
20865          if (EffBits <= 16)
20866            ExtendOp = ISD::SIGN_EXTEND;
20867        }
20868      }
20869
20870      CmpVT = MVT::i32;
20871      Op0 = DAG.getNode(ExtendOp, dl, CmpVT, Op0);
20872      Op1 = DAG.getNode(ExtendOp, dl, CmpVT, Op1);
20873    }
20874  }
20875
20876  // Try to shrink i64 compares if the input has enough zero bits.
20877  // FIXME: Do this for non-constant compares for constant on LHS?
20878  if (CmpVT == MVT::i64 && isa<ConstantSDNode>(Op1) && !isX86CCSigned(X86CC) &&
20879      Op0.hasOneUse() && // Hacky way to not break CSE opportunities with sub.
20880      cast<ConstantSDNode>(Op1)->getAPIntValue().getActiveBits() <= 32 &&
20881      DAG.MaskedValueIsZero(Op0, APInt::getHighBitsSet(64, 32))) {
20882    CmpVT = MVT::i32;
20883    Op0 = DAG.getNode(ISD::TRUNCATE, dl, CmpVT, Op0);
20884    Op1 = DAG.getNode(ISD::TRUNCATE, dl, CmpVT, Op1);
20885  }
20886
20887  // Use SUB instead of CMP to enable CSE between SUB and CMP.
20888  SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
20889  SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs, Op0, Op1);
20890  return std::make_pair(Sub.getValue(1), SDValue());
20891}
20892
20893/// Convert a comparison if required by the subtarget.
20894SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
20895                                                 SelectionDAG &DAG) const {
20896  // If the subtarget does not support the FUCOMI instruction, floating-point
20897  // comparisons have to be converted.
20898  bool IsCmp = Cmp.getOpcode() == X86ISD::CMP;
20899  bool IsStrictCmp = Cmp.getOpcode() == X86ISD::STRICT_FCMP ||
20900                     Cmp.getOpcode() == X86ISD::STRICT_FCMPS;
20901
20902  if (Subtarget.hasCMov() || (!IsCmp && !IsStrictCmp) ||
20903      !Cmp.getOperand(IsStrictCmp ? 1 : 0).getValueType().isFloatingPoint() ||
20904      !Cmp.getOperand(IsStrictCmp ? 2 : 1).getValueType().isFloatingPoint())
20905    return Cmp;
20906
20907  // The instruction selector will select an FUCOM instruction instead of
20908  // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
20909  // build an SDNode sequence that transfers the result from FPSW into EFLAGS:
20910  // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86any_fcmp ...)), 8))))
20911  SDLoc dl(Cmp);
20912  SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
20913  SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
20914  SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
20915                            DAG.getConstant(8, dl, MVT::i8));
20916  SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
20917
20918  // Some 64-bit targets lack SAHF support, but they do support FCOMI.
20919  assert(Subtarget.hasLAHFSAHF() && "Target doesn't support SAHF or FCOMI?");
20920  return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
20921}
20922
20923/// Check if replacement of SQRT with RSQRT should be disabled.
20924bool X86TargetLowering::isFsqrtCheap(SDValue Op, SelectionDAG &DAG) const {
20925  EVT VT = Op.getValueType();
20926
20927  // We never want to use both SQRT and RSQRT instructions for the same input.
20928  if (DAG.getNodeIfExists(X86ISD::FRSQRT, DAG.getVTList(VT), Op))
20929    return false;
20930
20931  if (VT.isVector())
20932    return Subtarget.hasFastVectorFSQRT();
20933  return Subtarget.hasFastScalarFSQRT();
20934}
20935
20936/// The minimum architected relative accuracy is 2^-12. We need one
20937/// Newton-Raphson step to have a good float result (24 bits of precision).
20938SDValue X86TargetLowering::getSqrtEstimate(SDValue Op,
20939                                           SelectionDAG &DAG, int Enabled,
20940                                           int &RefinementSteps,
20941                                           bool &UseOneConstNR,
20942                                           bool Reciprocal) const {
20943  EVT VT = Op.getValueType();
20944
20945  // SSE1 has rsqrtss and rsqrtps. AVX adds a 256-bit variant for rsqrtps.
20946  // It is likely not profitable to do this for f64 because a double-precision
20947  // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
20948  // instructions: convert to single, rsqrtss, convert back to double, refine
20949  // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
20950  // along with FMA, this could be a throughput win.
20951  // TODO: SQRT requires SSE2 to prevent the introduction of an illegal v4i32
20952  // after legalize types.
20953  if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
20954      (VT == MVT::v4f32 && Subtarget.hasSSE1() && Reciprocal) ||
20955      (VT == MVT::v4f32 && Subtarget.hasSSE2() && !Reciprocal) ||
20956      (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
20957      (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
20958    if (RefinementSteps == ReciprocalEstimate::Unspecified)
20959      RefinementSteps = 1;
20960
20961    UseOneConstNR = false;
20962    // There is no FSQRT for 512-bits, but there is RSQRT14.
20963    unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RSQRT14 : X86ISD::FRSQRT;
20964    return DAG.getNode(Opcode, SDLoc(Op), VT, Op);
20965  }
20966  return SDValue();
20967}
20968
20969/// The minimum architected relative accuracy is 2^-12. We need one
20970/// Newton-Raphson step to have a good float result (24 bits of precision).
20971SDValue X86TargetLowering::getRecipEstimate(SDValue Op, SelectionDAG &DAG,
20972                                            int Enabled,
20973                                            int &RefinementSteps) const {
20974  EVT VT = Op.getValueType();
20975
20976  // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
20977  // It is likely not profitable to do this for f64 because a double-precision
20978  // reciprocal estimate with refinement on x86 prior to FMA requires
20979  // 15 instructions: convert to single, rcpss, convert back to double, refine
20980  // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
20981  // along with FMA, this could be a throughput win.
20982
20983  if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
20984      (VT == MVT::v4f32 && Subtarget.hasSSE1()) ||
20985      (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
20986      (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
20987    // Enable estimate codegen with 1 refinement step for vector division.
20988    // Scalar division estimates are disabled because they break too much
20989    // real-world code. These defaults are intended to match GCC behavior.
20990    if (VT == MVT::f32 && Enabled == ReciprocalEstimate::Unspecified)
20991      return SDValue();
20992
20993    if (RefinementSteps == ReciprocalEstimate::Unspecified)
20994      RefinementSteps = 1;
20995
20996    // There is no FSQRT for 512-bits, but there is RCP14.
20997    unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RCP14 : X86ISD::FRCP;
20998    return DAG.getNode(Opcode, SDLoc(Op), VT, Op);
20999  }
21000  return SDValue();
21001}
21002
21003/// If we have at least two divisions that use the same divisor, convert to
21004/// multiplication by a reciprocal. This may need to be adjusted for a given
21005/// CPU if a division's cost is not at least twice the cost of a multiplication.
21006/// This is because we still need one division to calculate the reciprocal and
21007/// then we need two multiplies by that reciprocal as replacements for the
21008/// original divisions.
21009unsigned X86TargetLowering::combineRepeatedFPDivisors() const {
21010  return 2;
21011}
21012
21013SDValue
21014X86TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
21015                                 SelectionDAG &DAG,
21016                                 SmallVectorImpl<SDNode *> &Created) const {
21017  AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
21018  if (isIntDivCheap(N->getValueType(0), Attr))
21019    return SDValue(N,0); // Lower SDIV as SDIV
21020
21021  assert((Divisor.isPowerOf2() || (-Divisor).isPowerOf2()) &&
21022         "Unexpected divisor!");
21023
21024  // Only perform this transform if CMOV is supported otherwise the select
21025  // below will become a branch.
21026  if (!Subtarget.hasCMov())
21027    return SDValue();
21028
21029  // fold (sdiv X, pow2)
21030  EVT VT = N->getValueType(0);
21031  // FIXME: Support i8.
21032  if (VT != MVT::i16 && VT != MVT::i32 &&
21033      !(Subtarget.is64Bit() && VT == MVT::i64))
21034    return SDValue();
21035
21036  unsigned Lg2 = Divisor.countTrailingZeros();
21037
21038  // If the divisor is 2 or -2, the default expansion is better.
21039  if (Lg2 == 1)
21040    return SDValue();
21041
21042  SDLoc DL(N);
21043  SDValue N0 = N->getOperand(0);
21044  SDValue Zero = DAG.getConstant(0, DL, VT);
21045  APInt Lg2Mask = APInt::getLowBitsSet(VT.getSizeInBits(), Lg2);
21046  SDValue Pow2MinusOne = DAG.getConstant(Lg2Mask, DL, VT);
21047
21048  // If N0 is negative, we need to add (Pow2 - 1) to it before shifting right.
21049  SDValue Cmp = DAG.getSetCC(DL, MVT::i8, N0, Zero, ISD::SETLT);
21050  SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
21051  SDValue CMov = DAG.getNode(ISD::SELECT, DL, VT, Cmp, Add, N0);
21052
21053  Created.push_back(Cmp.getNode());
21054  Created.push_back(Add.getNode());
21055  Created.push_back(CMov.getNode());
21056
21057  // Divide by pow2.
21058  SDValue SRA =
21059      DAG.getNode(ISD::SRA, DL, VT, CMov, DAG.getConstant(Lg2, DL, MVT::i8));
21060
21061  // If we're dividing by a positive value, we're done.  Otherwise, we must
21062  // negate the result.
21063  if (Divisor.isNonNegative())
21064    return SRA;
21065
21066  Created.push_back(SRA.getNode());
21067  return DAG.getNode(ISD::SUB, DL, VT, Zero, SRA);
21068}
21069
21070/// Result of 'and' is compared against zero. Change to a BT node if possible.
21071/// Returns the BT node and the condition code needed to use it.
21072static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC,
21073                            const SDLoc &dl, SelectionDAG &DAG,
21074                            SDValue &X86CC) {
21075  assert(And.getOpcode() == ISD::AND && "Expected AND node!");
21076  SDValue Op0 = And.getOperand(0);
21077  SDValue Op1 = And.getOperand(1);
21078  if (Op0.getOpcode() == ISD::TRUNCATE)
21079    Op0 = Op0.getOperand(0);
21080  if (Op1.getOpcode() == ISD::TRUNCATE)
21081    Op1 = Op1.getOperand(0);
21082
21083  SDValue Src, BitNo;
21084  if (Op1.getOpcode() == ISD::SHL)
21085    std::swap(Op0, Op1);
21086  if (Op0.getOpcode() == ISD::SHL) {
21087    if (isOneConstant(Op0.getOperand(0))) {
21088      // If we looked past a truncate, check that it's only truncating away
21089      // known zeros.
21090      unsigned BitWidth = Op0.getValueSizeInBits();
21091      unsigned AndBitWidth = And.getValueSizeInBits();
21092      if (BitWidth > AndBitWidth) {
21093        KnownBits Known = DAG.computeKnownBits(Op0);
21094        if (Known.countMinLeadingZeros() < BitWidth - AndBitWidth)
21095          return SDValue();
21096      }
21097      Src = Op1;
21098      BitNo = Op0.getOperand(1);
21099    }
21100  } else if (Op1.getOpcode() == ISD::Constant) {
21101    ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
21102    uint64_t AndRHSVal = AndRHS->getZExtValue();
21103    SDValue AndLHS = Op0;
21104
21105    if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
21106      Src = AndLHS.getOperand(0);
21107      BitNo = AndLHS.getOperand(1);
21108    } else {
21109      // Use BT if the immediate can't be encoded in a TEST instruction or we
21110      // are optimizing for size and the immedaite won't fit in a byte.
21111      bool OptForSize = DAG.shouldOptForSize();
21112      if ((!isUInt<32>(AndRHSVal) || (OptForSize && !isUInt<8>(AndRHSVal))) &&
21113          isPowerOf2_64(AndRHSVal)) {
21114        Src = AndLHS;
21115        BitNo = DAG.getConstant(Log2_64_Ceil(AndRHSVal), dl,
21116                                Src.getValueType());
21117      }
21118    }
21119  }
21120
21121  // No patterns found, give up.
21122  if (!Src.getNode())
21123    return SDValue();
21124
21125  // If Src is i8, promote it to i32 with any_extend.  There is no i8 BT
21126  // instruction.  Since the shift amount is in-range-or-undefined, we know
21127  // that doing a bittest on the i32 value is ok.  We extend to i32 because
21128  // the encoding for the i16 version is larger than the i32 version.
21129  // Also promote i16 to i32 for performance / code size reason.
21130  if (Src.getValueType() == MVT::i8 || Src.getValueType() == MVT::i16)
21131    Src = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Src);
21132
21133  // See if we can use the 32-bit instruction instead of the 64-bit one for a
21134  // shorter encoding. Since the former takes the modulo 32 of BitNo and the
21135  // latter takes the modulo 64, this is only valid if the 5th bit of BitNo is
21136  // known to be zero.
21137  if (Src.getValueType() == MVT::i64 &&
21138      DAG.MaskedValueIsZero(BitNo, APInt(BitNo.getValueSizeInBits(), 32)))
21139    Src = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src);
21140
21141  // If the operand types disagree, extend the shift amount to match.  Since
21142  // BT ignores high bits (like shifts) we can use anyextend.
21143  if (Src.getValueType() != BitNo.getValueType())
21144    BitNo = DAG.getNode(ISD::ANY_EXTEND, dl, Src.getValueType(), BitNo);
21145
21146  X86CC = DAG.getTargetConstant(CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B,
21147                                dl, MVT::i8);
21148  return DAG.getNode(X86ISD::BT, dl, MVT::i32, Src, BitNo);
21149}
21150
21151/// Turns an ISD::CondCode into a value suitable for SSE floating-point mask
21152/// CMPs.
21153static unsigned translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
21154                                   SDValue &Op1, bool &IsAlwaysSignaling) {
21155  unsigned SSECC;
21156  bool Swap = false;
21157
21158  // SSE Condition code mapping:
21159  //  0 - EQ
21160  //  1 - LT
21161  //  2 - LE
21162  //  3 - UNORD
21163  //  4 - NEQ
21164  //  5 - NLT
21165  //  6 - NLE
21166  //  7 - ORD
21167  switch (SetCCOpcode) {
21168  default: llvm_unreachable("Unexpected SETCC condition");
21169  case ISD::SETOEQ:
21170  case ISD::SETEQ:  SSECC = 0; break;
21171  case ISD::SETOGT:
21172  case ISD::SETGT:  Swap = true; LLVM_FALLTHROUGH;
21173  case ISD::SETLT:
21174  case ISD::SETOLT: SSECC = 1; break;
21175  case ISD::SETOGE:
21176  case ISD::SETGE:  Swap = true; LLVM_FALLTHROUGH;
21177  case ISD::SETLE:
21178  case ISD::SETOLE: SSECC = 2; break;
21179  case ISD::SETUO:  SSECC = 3; break;
21180  case ISD::SETUNE:
21181  case ISD::SETNE:  SSECC = 4; break;
21182  case ISD::SETULE: Swap = true; LLVM_FALLTHROUGH;
21183  case ISD::SETUGE: SSECC = 5; break;
21184  case ISD::SETULT: Swap = true; LLVM_FALLTHROUGH;
21185  case ISD::SETUGT: SSECC = 6; break;
21186  case ISD::SETO:   SSECC = 7; break;
21187  case ISD::SETUEQ: SSECC = 8; break;
21188  case ISD::SETONE: SSECC = 12; break;
21189  }
21190  if (Swap)
21191    std::swap(Op0, Op1);
21192
21193  switch (SetCCOpcode) {
21194  default:
21195    IsAlwaysSignaling = true;
21196    break;
21197  case ISD::SETEQ:
21198  case ISD::SETOEQ:
21199  case ISD::SETUEQ:
21200  case ISD::SETNE:
21201  case ISD::SETONE:
21202  case ISD::SETUNE:
21203  case ISD::SETO:
21204  case ISD::SETUO:
21205    IsAlwaysSignaling = false;
21206    break;
21207  }
21208
21209  return SSECC;
21210}
21211
21212/// Break a VSETCC 256-bit integer VSETCC into two new 128 ones and then
21213/// concatenate the result back.
21214static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
21215  MVT VT = Op.getSimpleValueType();
21216
21217  assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&
21218         "Unsupported value type for operation");
21219
21220  unsigned NumElems = VT.getVectorNumElements();
21221  SDLoc dl(Op);
21222  SDValue CC = Op.getOperand(2);
21223
21224  // Extract the LHS vectors
21225  SDValue LHS = Op.getOperand(0);
21226  SDValue LHS1 = extract128BitVector(LHS, 0, DAG, dl);
21227  SDValue LHS2 = extract128BitVector(LHS, NumElems / 2, DAG, dl);
21228
21229  // Extract the RHS vectors
21230  SDValue RHS = Op.getOperand(1);
21231  SDValue RHS1 = extract128BitVector(RHS, 0, DAG, dl);
21232  SDValue RHS2 = extract128BitVector(RHS, NumElems / 2, DAG, dl);
21233
21234  // Issue the operation on the smaller types and concatenate the result back
21235  MVT EltVT = VT.getVectorElementType();
21236  MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
21237  return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
21238                     DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC),
21239                     DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
21240}
21241
21242static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
21243
21244  SDValue Op0 = Op.getOperand(0);
21245  SDValue Op1 = Op.getOperand(1);
21246  SDValue CC = Op.getOperand(2);
21247  MVT VT = Op.getSimpleValueType();
21248  SDLoc dl(Op);
21249
21250  assert(VT.getVectorElementType() == MVT::i1 &&
21251         "Cannot set masked compare for this operation");
21252
21253  ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
21254
21255  // Prefer SETGT over SETLT.
21256  if (SetCCOpcode == ISD::SETLT) {
21257    SetCCOpcode = ISD::getSetCCSwappedOperands(SetCCOpcode);
21258    std::swap(Op0, Op1);
21259  }
21260
21261  return DAG.getSetCC(dl, VT, Op0, Op1, SetCCOpcode);
21262}
21263
21264/// Given a buildvector constant, return a new vector constant with each element
21265/// incremented or decremented. If incrementing or decrementing would result in
21266/// unsigned overflow or underflow or this is not a simple vector constant,
21267/// return an empty value.
21268static SDValue incDecVectorConstant(SDValue V, SelectionDAG &DAG, bool IsInc) {
21269  auto *BV = dyn_cast<BuildVectorSDNode>(V.getNode());
21270  if (!BV)
21271    return SDValue();
21272
21273  MVT VT = V.getSimpleValueType();
21274  MVT EltVT = VT.getVectorElementType();
21275  unsigned NumElts = VT.getVectorNumElements();
21276  SmallVector<SDValue, 8> NewVecC;
21277  SDLoc DL(V);
21278  for (unsigned i = 0; i < NumElts; ++i) {
21279    auto *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
21280    if (!Elt || Elt->isOpaque() || Elt->getSimpleValueType(0) != EltVT)
21281      return SDValue();
21282
21283    // Avoid overflow/underflow.
21284    const APInt &EltC = Elt->getAPIntValue();
21285    if ((IsInc && EltC.isMaxValue()) || (!IsInc && EltC.isNullValue()))
21286      return SDValue();
21287
21288    NewVecC.push_back(DAG.getConstant(EltC + (IsInc ? 1 : -1), DL, EltVT));
21289  }
21290
21291  return DAG.getBuildVector(VT, DL, NewVecC);
21292}
21293
21294/// As another special case, use PSUBUS[BW] when it's profitable. E.g. for
21295/// Op0 u<= Op1:
21296///   t = psubus Op0, Op1
21297///   pcmpeq t, <0..0>
21298static SDValue LowerVSETCCWithSUBUS(SDValue Op0, SDValue Op1, MVT VT,
21299                                    ISD::CondCode Cond, const SDLoc &dl,
21300                                    const X86Subtarget &Subtarget,
21301                                    SelectionDAG &DAG) {
21302  if (!Subtarget.hasSSE2())
21303    return SDValue();
21304
21305  MVT VET = VT.getVectorElementType();
21306  if (VET != MVT::i8 && VET != MVT::i16)
21307    return SDValue();
21308
21309  switch (Cond) {
21310  default:
21311    return SDValue();
21312  case ISD::SETULT: {
21313    // If the comparison is against a constant we can turn this into a
21314    // setule.  With psubus, setule does not require a swap.  This is
21315    // beneficial because the constant in the register is no longer
21316    // destructed as the destination so it can be hoisted out of a loop.
21317    // Only do this pre-AVX since vpcmp* is no longer destructive.
21318    if (Subtarget.hasAVX())
21319      return SDValue();
21320    SDValue ULEOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/false);
21321    if (!ULEOp1)
21322      return SDValue();
21323    Op1 = ULEOp1;
21324    break;
21325  }
21326  case ISD::SETUGT: {
21327    // If the comparison is against a constant, we can turn this into a setuge.
21328    // This is beneficial because materializing a constant 0 for the PCMPEQ is
21329    // probably cheaper than XOR+PCMPGT using 2 different vector constants:
21330    // cmpgt (xor X, SignMaskC) CmpC --> cmpeq (usubsat (CmpC+1), X), 0
21331    SDValue UGEOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/true);
21332    if (!UGEOp1)
21333      return SDValue();
21334    Op1 = Op0;
21335    Op0 = UGEOp1;
21336    break;
21337  }
21338  // Psubus is better than flip-sign because it requires no inversion.
21339  case ISD::SETUGE:
21340    std::swap(Op0, Op1);
21341    break;
21342  case ISD::SETULE:
21343    break;
21344  }
21345
21346  SDValue Result = DAG.getNode(ISD::USUBSAT, dl, VT, Op0, Op1);
21347  return DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
21348                     DAG.getConstant(0, dl, VT));
21349}
21350
21351static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
21352                           SelectionDAG &DAG) {
21353  bool IsStrict = Op.getOpcode() == ISD::STRICT_FSETCC ||
21354                  Op.getOpcode() == ISD::STRICT_FSETCCS;
21355  SDValue Op0 = Op.getOperand(IsStrict ? 1 : 0);
21356  SDValue Op1 = Op.getOperand(IsStrict ? 2 : 1);
21357  SDValue CC = Op.getOperand(IsStrict ? 3 : 2);
21358  MVT VT = Op->getSimpleValueType(0);
21359  ISD::CondCode Cond = cast<CondCodeSDNode>(CC)->get();
21360  bool isFP = Op1.getSimpleValueType().isFloatingPoint();
21361  SDLoc dl(Op);
21362
21363  if (isFP) {
21364#ifndef NDEBUG
21365    MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
21366    assert(EltVT == MVT::f32 || EltVT == MVT::f64);
21367#endif
21368
21369    bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS;
21370    SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
21371
21372    unsigned Opc;
21373    if (Subtarget.hasAVX512() && VT.getVectorElementType() == MVT::i1) {
21374      assert(VT.getVectorNumElements() <= 16);
21375      Opc = IsStrict ? X86ISD::STRICT_CMPM : X86ISD::CMPM;
21376    } else {
21377      Opc = IsStrict ? X86ISD::STRICT_CMPP : X86ISD::CMPP;
21378      // The SSE/AVX packed FP comparison nodes are defined with a
21379      // floating-point vector result that matches the operand type. This allows
21380      // them to work with an SSE1 target (integer vector types are not legal).
21381      VT = Op0.getSimpleValueType();
21382    }
21383
21384    SDValue Cmp;
21385    bool IsAlwaysSignaling;
21386    unsigned SSECC = translateX86FSETCC(Cond, Op0, Op1, IsAlwaysSignaling);
21387    if (!Subtarget.hasAVX()) {
21388      // TODO: We could use following steps to handle a quiet compare with
21389      // signaling encodings.
21390      // 1. Get ordered masks from a quiet ISD::SETO
21391      // 2. Use the masks to mask potential unordered elements in operand A, B
21392      // 3. Get the compare results of masked A, B
21393      // 4. Calculating final result using the mask and result from 3
21394      // But currently, we just fall back to scalar operations.
21395      if (IsStrict && IsAlwaysSignaling && !IsSignaling)
21396        return SDValue();
21397
21398      // Insert an extra signaling instruction to raise exception.
21399      if (IsStrict && !IsAlwaysSignaling && IsSignaling) {
21400        SDValue SignalCmp = DAG.getNode(
21401            Opc, dl, {VT, MVT::Other},
21402            {Chain, Op0, Op1, DAG.getTargetConstant(1, dl, MVT::i8)}); // LT_OS
21403        // FIXME: It seems we need to update the flags of all new strict nodes.
21404        // Otherwise, mayRaiseFPException in MI will return false due to
21405        // NoFPExcept = false by default. However, I didn't find it in other
21406        // patches.
21407        SignalCmp->setFlags(Op->getFlags());
21408        Chain = SignalCmp.getValue(1);
21409      }
21410
21411      // In the two cases not handled by SSE compare predicates (SETUEQ/SETONE),
21412      // emit two comparisons and a logic op to tie them together.
21413      if (SSECC >= 8) {
21414        // LLVM predicate is SETUEQ or SETONE.
21415        unsigned CC0, CC1;
21416        unsigned CombineOpc;
21417        if (Cond == ISD::SETUEQ) {
21418          CC0 = 3; // UNORD
21419          CC1 = 0; // EQ
21420          CombineOpc = X86ISD::FOR;
21421        } else {
21422          assert(Cond == ISD::SETONE);
21423          CC0 = 7; // ORD
21424          CC1 = 4; // NEQ
21425          CombineOpc = X86ISD::FAND;
21426        }
21427
21428        SDValue Cmp0, Cmp1;
21429        if (IsStrict) {
21430          Cmp0 = DAG.getNode(
21431              Opc, dl, {VT, MVT::Other},
21432              {Chain, Op0, Op1, DAG.getTargetConstant(CC0, dl, MVT::i8)});
21433          Cmp1 = DAG.getNode(
21434              Opc, dl, {VT, MVT::Other},
21435              {Chain, Op0, Op1, DAG.getTargetConstant(CC1, dl, MVT::i8)});
21436          Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Cmp0.getValue(1),
21437                              Cmp1.getValue(1));
21438        } else {
21439          Cmp0 = DAG.getNode(
21440              Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(CC0, dl, MVT::i8));
21441          Cmp1 = DAG.getNode(
21442              Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(CC1, dl, MVT::i8));
21443        }
21444        Cmp = DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
21445      } else {
21446        if (IsStrict) {
21447          Cmp = DAG.getNode(
21448              Opc, dl, {VT, MVT::Other},
21449              {Chain, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8)});
21450          Chain = Cmp.getValue(1);
21451        } else
21452          Cmp = DAG.getNode(
21453              Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8));
21454      }
21455    } else {
21456      // Handle all other FP comparisons here.
21457      if (IsStrict) {
21458        // Make a flip on already signaling CCs before setting bit 4 of AVX CC.
21459        SSECC |= (IsAlwaysSignaling ^ IsSignaling) << 4;
21460        Cmp = DAG.getNode(
21461            Opc, dl, {VT, MVT::Other},
21462            {Chain, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8)});
21463        Chain = Cmp.getValue(1);
21464      } else
21465        Cmp = DAG.getNode(
21466            Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8));
21467    }
21468
21469    // If this is SSE/AVX CMPP, bitcast the result back to integer to match the
21470    // result type of SETCC. The bitcast is expected to be optimized away
21471    // during combining/isel.
21472    Cmp = DAG.getBitcast(Op.getSimpleValueType(), Cmp);
21473
21474    if (IsStrict)
21475      return DAG.getMergeValues({Cmp, Chain}, dl);
21476
21477    return Cmp;
21478  }
21479
21480  assert(!IsStrict && "Strict SETCC only handles FP operands.");
21481
21482  MVT VTOp0 = Op0.getSimpleValueType();
21483  (void)VTOp0;
21484  assert(VTOp0 == Op1.getSimpleValueType() &&
21485         "Expected operands with same type!");
21486  assert(VT.getVectorNumElements() == VTOp0.getVectorNumElements() &&
21487         "Invalid number of packed elements for source and destination!");
21488
21489  // The non-AVX512 code below works under the assumption that source and
21490  // destination types are the same.
21491  assert((Subtarget.hasAVX512() || (VT == VTOp0)) &&
21492         "Value types for source and destination must be the same!");
21493
21494  // The result is boolean, but operands are int/float
21495  if (VT.getVectorElementType() == MVT::i1) {
21496    // In AVX-512 architecture setcc returns mask with i1 elements,
21497    // But there is no compare instruction for i8 and i16 elements in KNL.
21498    assert((VTOp0.getScalarSizeInBits() >= 32 || Subtarget.hasBWI()) &&
21499           "Unexpected operand type");
21500    return LowerIntVSETCC_AVX512(Op, DAG);
21501  }
21502
21503  // Lower using XOP integer comparisons.
21504  if (VT.is128BitVector() && Subtarget.hasXOP()) {
21505    // Translate compare code to XOP PCOM compare mode.
21506    unsigned CmpMode = 0;
21507    switch (Cond) {
21508    default: llvm_unreachable("Unexpected SETCC condition");
21509    case ISD::SETULT:
21510    case ISD::SETLT: CmpMode = 0x00; break;
21511    case ISD::SETULE:
21512    case ISD::SETLE: CmpMode = 0x01; break;
21513    case ISD::SETUGT:
21514    case ISD::SETGT: CmpMode = 0x02; break;
21515    case ISD::SETUGE:
21516    case ISD::SETGE: CmpMode = 0x03; break;
21517    case ISD::SETEQ: CmpMode = 0x04; break;
21518    case ISD::SETNE: CmpMode = 0x05; break;
21519    }
21520
21521    // Are we comparing unsigned or signed integers?
21522    unsigned Opc =
21523        ISD::isUnsignedIntSetCC(Cond) ? X86ISD::VPCOMU : X86ISD::VPCOM;
21524
21525    return DAG.getNode(Opc, dl, VT, Op0, Op1,
21526                       DAG.getTargetConstant(CmpMode, dl, MVT::i8));
21527  }
21528
21529  // (X & Y) != 0 --> (X & Y) == Y iff Y is power-of-2.
21530  // Revert part of the simplifySetCCWithAnd combine, to avoid an invert.
21531  if (Cond == ISD::SETNE && ISD::isBuildVectorAllZeros(Op1.getNode())) {
21532    SDValue BC0 = peekThroughBitcasts(Op0);
21533    if (BC0.getOpcode() == ISD::AND) {
21534      APInt UndefElts;
21535      SmallVector<APInt, 64> EltBits;
21536      if (getTargetConstantBitsFromNode(BC0.getOperand(1),
21537                                        VT.getScalarSizeInBits(), UndefElts,
21538                                        EltBits, false, false)) {
21539        if (llvm::all_of(EltBits, [](APInt &V) { return V.isPowerOf2(); })) {
21540          Cond = ISD::SETEQ;
21541          Op1 = DAG.getBitcast(VT, BC0.getOperand(1));
21542        }
21543      }
21544    }
21545  }
21546
21547  // ICMP_EQ(AND(X,C),C) -> SRA(SHL(X,LOG2(C)),BW-1) iff C is power-of-2.
21548  if (Cond == ISD::SETEQ && Op0.getOpcode() == ISD::AND &&
21549      Op0.getOperand(1) == Op1 && Op0.hasOneUse()) {
21550    ConstantSDNode *C1 = isConstOrConstSplat(Op1);
21551    if (C1 && C1->getAPIntValue().isPowerOf2()) {
21552      unsigned BitWidth = VT.getScalarSizeInBits();
21553      unsigned ShiftAmt = BitWidth - C1->getAPIntValue().logBase2() - 1;
21554
21555      SDValue Result = Op0.getOperand(0);
21556      Result = DAG.getNode(ISD::SHL, dl, VT, Result,
21557                           DAG.getConstant(ShiftAmt, dl, VT));
21558      Result = DAG.getNode(ISD::SRA, dl, VT, Result,
21559                           DAG.getConstant(BitWidth - 1, dl, VT));
21560      return Result;
21561    }
21562  }
21563
21564  // Break 256-bit integer vector compare into smaller ones.
21565  if (VT.is256BitVector() && !Subtarget.hasInt256())
21566    return Lower256IntVSETCC(Op, DAG);
21567
21568  // If this is a SETNE against the signed minimum value, change it to SETGT.
21569  // If this is a SETNE against the signed maximum value, change it to SETLT.
21570  // which will be swapped to SETGT.
21571  // Otherwise we use PCMPEQ+invert.
21572  APInt ConstValue;
21573  if (Cond == ISD::SETNE &&
21574      ISD::isConstantSplatVector(Op1.getNode(), ConstValue)) {
21575    if (ConstValue.isMinSignedValue())
21576      Cond = ISD::SETGT;
21577    else if (ConstValue.isMaxSignedValue())
21578      Cond = ISD::SETLT;
21579  }
21580
21581  // If both operands are known non-negative, then an unsigned compare is the
21582  // same as a signed compare and there's no need to flip signbits.
21583  // TODO: We could check for more general simplifications here since we're
21584  // computing known bits.
21585  bool FlipSigns = ISD::isUnsignedIntSetCC(Cond) &&
21586                   !(DAG.SignBitIsZero(Op0) && DAG.SignBitIsZero(Op1));
21587
21588  // Special case: Use min/max operations for unsigned compares.
21589  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
21590  if (ISD::isUnsignedIntSetCC(Cond) &&
21591      (FlipSigns || ISD::isTrueWhenEqual(Cond)) &&
21592      TLI.isOperationLegal(ISD::UMIN, VT)) {
21593    // If we have a constant operand, increment/decrement it and change the
21594    // condition to avoid an invert.
21595    if (Cond == ISD::SETUGT) {
21596      // X > C --> X >= (C+1) --> X == umax(X, C+1)
21597      if (SDValue UGTOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/true)) {
21598        Op1 = UGTOp1;
21599        Cond = ISD::SETUGE;
21600      }
21601    }
21602    if (Cond == ISD::SETULT) {
21603      // X < C --> X <= (C-1) --> X == umin(X, C-1)
21604      if (SDValue ULTOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/false)) {
21605        Op1 = ULTOp1;
21606        Cond = ISD::SETULE;
21607      }
21608    }
21609    bool Invert = false;
21610    unsigned Opc;
21611    switch (Cond) {
21612    default: llvm_unreachable("Unexpected condition code");
21613    case ISD::SETUGT: Invert = true; LLVM_FALLTHROUGH;
21614    case ISD::SETULE: Opc = ISD::UMIN; break;
21615    case ISD::SETULT: Invert = true; LLVM_FALLTHROUGH;
21616    case ISD::SETUGE: Opc = ISD::UMAX; break;
21617    }
21618
21619    SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
21620    Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
21621
21622    // If the logical-not of the result is required, perform that now.
21623    if (Invert)
21624      Result = DAG.getNOT(dl, Result, VT);
21625
21626    return Result;
21627  }
21628
21629  // Try to use SUBUS and PCMPEQ.
21630  if (SDValue V = LowerVSETCCWithSUBUS(Op0, Op1, VT, Cond, dl, Subtarget, DAG))
21631    return V;
21632
21633  // We are handling one of the integer comparisons here. Since SSE only has
21634  // GT and EQ comparisons for integer, swapping operands and multiple
21635  // operations may be required for some comparisons.
21636  unsigned Opc = (Cond == ISD::SETEQ || Cond == ISD::SETNE) ? X86ISD::PCMPEQ
21637                                                            : X86ISD::PCMPGT;
21638  bool Swap = Cond == ISD::SETLT || Cond == ISD::SETULT ||
21639              Cond == ISD::SETGE || Cond == ISD::SETUGE;
21640  bool Invert = Cond == ISD::SETNE ||
21641                (Cond != ISD::SETEQ && ISD::isTrueWhenEqual(Cond));
21642
21643  if (Swap)
21644    std::swap(Op0, Op1);
21645
21646  // Check that the operation in question is available (most are plain SSE2,
21647  // but PCMPGTQ and PCMPEQQ have different requirements).
21648  if (VT == MVT::v2i64) {
21649    if (Opc == X86ISD::PCMPGT && !Subtarget.hasSSE42()) {
21650      assert(Subtarget.hasSSE2() && "Don't know how to lower!");
21651
21652      // Special case for sign bit test. We can use a v4i32 PCMPGT and shuffle
21653      // the odd elements over the even elements.
21654      if (!FlipSigns && !Invert && ISD::isBuildVectorAllZeros(Op0.getNode())) {
21655        Op0 = DAG.getConstant(0, dl, MVT::v4i32);
21656        Op1 = DAG.getBitcast(MVT::v4i32, Op1);
21657
21658        SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
21659        static const int MaskHi[] = { 1, 1, 3, 3 };
21660        SDValue Result = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
21661
21662        return DAG.getBitcast(VT, Result);
21663      }
21664
21665      if (!FlipSigns && !Invert && ISD::isBuildVectorAllOnes(Op1.getNode())) {
21666        Op0 = DAG.getBitcast(MVT::v4i32, Op0);
21667        Op1 = DAG.getConstant(-1, dl, MVT::v4i32);
21668
21669        SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
21670        static const int MaskHi[] = { 1, 1, 3, 3 };
21671        SDValue Result = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
21672
21673        return DAG.getBitcast(VT, Result);
21674      }
21675
21676      // Since SSE has no unsigned integer comparisons, we need to flip the sign
21677      // bits of the inputs before performing those operations. The lower
21678      // compare is always unsigned.
21679      SDValue SB;
21680      if (FlipSigns) {
21681        SB = DAG.getConstant(0x8000000080000000ULL, dl, MVT::v2i64);
21682      } else {
21683        SB = DAG.getConstant(0x0000000080000000ULL, dl, MVT::v2i64);
21684      }
21685      Op0 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op0, SB);
21686      Op1 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op1, SB);
21687
21688      // Cast everything to the right type.
21689      Op0 = DAG.getBitcast(MVT::v4i32, Op0);
21690      Op1 = DAG.getBitcast(MVT::v4i32, Op1);
21691
21692      // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
21693      SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
21694      SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
21695
21696      // Create masks for only the low parts/high parts of the 64 bit integers.
21697      static const int MaskHi[] = { 1, 1, 3, 3 };
21698      static const int MaskLo[] = { 0, 0, 2, 2 };
21699      SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
21700      SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
21701      SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
21702
21703      SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
21704      Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
21705
21706      if (Invert)
21707        Result = DAG.getNOT(dl, Result, MVT::v4i32);
21708
21709      return DAG.getBitcast(VT, Result);
21710    }
21711
21712    if (Opc == X86ISD::PCMPEQ && !Subtarget.hasSSE41()) {
21713      // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
21714      // pcmpeqd + pshufd + pand.
21715      assert(Subtarget.hasSSE2() && !FlipSigns && "Don't know how to lower!");
21716
21717      // First cast everything to the right type.
21718      Op0 = DAG.getBitcast(MVT::v4i32, Op0);
21719      Op1 = DAG.getBitcast(MVT::v4i32, Op1);
21720
21721      // Do the compare.
21722      SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
21723
21724      // Make sure the lower and upper halves are both all-ones.
21725      static const int Mask[] = { 1, 0, 3, 2 };
21726      SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
21727      Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
21728
21729      if (Invert)
21730        Result = DAG.getNOT(dl, Result, MVT::v4i32);
21731
21732      return DAG.getBitcast(VT, Result);
21733    }
21734  }
21735
21736  // Since SSE has no unsigned integer comparisons, we need to flip the sign
21737  // bits of the inputs before performing those operations.
21738  if (FlipSigns) {
21739    MVT EltVT = VT.getVectorElementType();
21740    SDValue SM = DAG.getConstant(APInt::getSignMask(EltVT.getSizeInBits()), dl,
21741                                 VT);
21742    Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SM);
21743    Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SM);
21744  }
21745
21746  SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
21747
21748  // If the logical-not of the result is required, perform that now.
21749  if (Invert)
21750    Result = DAG.getNOT(dl, Result, VT);
21751
21752  return Result;
21753}
21754
21755// Try to select this as a KORTEST+SETCC or KTEST+SETCC if possible.
21756static SDValue EmitAVX512Test(SDValue Op0, SDValue Op1, ISD::CondCode CC,
21757                              const SDLoc &dl, SelectionDAG &DAG,
21758                              const X86Subtarget &Subtarget,
21759                              SDValue &X86CC) {
21760  // Only support equality comparisons.
21761  if (CC != ISD::SETEQ && CC != ISD::SETNE)
21762    return SDValue();
21763
21764  // Must be a bitcast from vXi1.
21765  if (Op0.getOpcode() != ISD::BITCAST)
21766    return SDValue();
21767
21768  Op0 = Op0.getOperand(0);
21769  MVT VT = Op0.getSimpleValueType();
21770  if (!(Subtarget.hasAVX512() && VT == MVT::v16i1) &&
21771      !(Subtarget.hasDQI() && VT == MVT::v8i1) &&
21772      !(Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1)))
21773    return SDValue();
21774
21775  X86::CondCode X86Cond;
21776  if (isNullConstant(Op1)) {
21777    X86Cond = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
21778  } else if (isAllOnesConstant(Op1)) {
21779    // C flag is set for all ones.
21780    X86Cond = CC == ISD::SETEQ ? X86::COND_B : X86::COND_AE;
21781  } else
21782    return SDValue();
21783
21784  // If the input is an AND, we can combine it's operands into the KTEST.
21785  bool KTestable = false;
21786  if (Subtarget.hasDQI() && (VT == MVT::v8i1 || VT == MVT::v16i1))
21787    KTestable = true;
21788  if (Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1))
21789    KTestable = true;
21790  if (!isNullConstant(Op1))
21791    KTestable = false;
21792  if (KTestable && Op0.getOpcode() == ISD::AND && Op0.hasOneUse()) {
21793    SDValue LHS = Op0.getOperand(0);
21794    SDValue RHS = Op0.getOperand(1);
21795    X86CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
21796    return DAG.getNode(X86ISD::KTEST, dl, MVT::i32, LHS, RHS);
21797  }
21798
21799  // If the input is an OR, we can combine it's operands into the KORTEST.
21800  SDValue LHS = Op0;
21801  SDValue RHS = Op0;
21802  if (Op0.getOpcode() == ISD::OR && Op0.hasOneUse()) {
21803    LHS = Op0.getOperand(0);
21804    RHS = Op0.getOperand(1);
21805  }
21806
21807  X86CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
21808  return DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
21809}
21810
21811/// Emit flags for the given setcc condition and operands. Also returns the
21812/// corresponding X86 condition code constant in X86CC.
21813SDValue X86TargetLowering::emitFlagsForSetcc(SDValue Op0, SDValue Op1,
21814                                             ISD::CondCode CC, const SDLoc &dl,
21815                                             SelectionDAG &DAG, SDValue &X86CC,
21816                                             SDValue &Chain,
21817                                             bool IsSignaling) const {
21818  // Optimize to BT if possible.
21819  // Lower (X & (1 << N)) == 0 to BT(X, N).
21820  // Lower ((X >>u N) & 1) != 0 to BT(X, N).
21821  // Lower ((X >>s N) & 1) != 0 to BT(X, N).
21822  if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && isNullConstant(Op1) &&
21823      (CC == ISD::SETEQ || CC == ISD::SETNE)) {
21824    if (SDValue BT = LowerAndToBT(Op0, CC, dl, DAG, X86CC))
21825      return BT;
21826  }
21827
21828  // Try to use PTEST for a tree ORs equality compared with 0.
21829  // TODO: We could do AND tree with all 1s as well by using the C flag.
21830  if (Op0.getOpcode() == ISD::OR && isNullConstant(Op1) &&
21831      (CC == ISD::SETEQ || CC == ISD::SETNE)) {
21832    if (SDValue PTEST = LowerVectorAllZeroTest(Op0, CC, Subtarget, DAG, X86CC))
21833      return PTEST;
21834  }
21835
21836  // Try to lower using KORTEST or KTEST.
21837  if (SDValue Test = EmitAVX512Test(Op0, Op1, CC, dl, DAG, Subtarget, X86CC))
21838    return Test;
21839
21840  // Look for X == 0, X == 1, X != 0, or X != 1.  We can simplify some forms of
21841  // these.
21842  if ((isOneConstant(Op1) || isNullConstant(Op1)) &&
21843      (CC == ISD::SETEQ || CC == ISD::SETNE)) {
21844    // If the input is a setcc, then reuse the input setcc or use a new one with
21845    // the inverted condition.
21846    if (Op0.getOpcode() == X86ISD::SETCC) {
21847      bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1);
21848
21849      X86CC = Op0.getOperand(0);
21850      if (Invert) {
21851        X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
21852        CCode = X86::GetOppositeBranchCondition(CCode);
21853        X86CC = DAG.getTargetConstant(CCode, dl, MVT::i8);
21854      }
21855
21856      return Op0.getOperand(1);
21857    }
21858  }
21859
21860  // Try to use the carry flag from the add in place of an separate CMP for:
21861  // (seteq (add X, -1), -1). Similar for setne.
21862  if (isAllOnesConstant(Op1) && Op0.getOpcode() == ISD::ADD &&
21863      Op0.getOperand(1) == Op1 && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
21864    if (isProfitableToUseFlagOp(Op0)) {
21865      SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
21866
21867      SDValue New = DAG.getNode(X86ISD::ADD, dl, VTs, Op0.getOperand(0),
21868                                Op0.getOperand(1));
21869      DAG.ReplaceAllUsesOfValueWith(SDValue(Op0.getNode(), 0), New);
21870      X86::CondCode CCode = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
21871      X86CC = DAG.getTargetConstant(CCode, dl, MVT::i8);
21872      return SDValue(New.getNode(), 1);
21873    }
21874  }
21875
21876  bool IsFP = Op1.getSimpleValueType().isFloatingPoint();
21877  X86::CondCode CondCode = TranslateX86CC(CC, dl, IsFP, Op0, Op1, DAG);
21878  if (CondCode == X86::COND_INVALID)
21879    return SDValue();
21880
21881  std::pair<SDValue, SDValue> Tmp =
21882      EmitCmp(Op0, Op1, CondCode, dl, DAG, Subtarget, Chain, IsSignaling);
21883  SDValue EFLAGS = Tmp.first;
21884  if (Chain)
21885    Chain = Tmp.second;
21886  EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
21887  X86CC = DAG.getTargetConstant(CondCode, dl, MVT::i8);
21888  return EFLAGS;
21889}
21890
21891SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
21892
21893  bool IsStrict = Op.getOpcode() == ISD::STRICT_FSETCC ||
21894                  Op.getOpcode() == ISD::STRICT_FSETCCS;
21895  MVT VT = Op->getSimpleValueType(0);
21896
21897  if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
21898
21899  assert(VT == MVT::i8 && "SetCC type must be 8-bit integer");
21900  SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
21901  SDValue Op0 = Op.getOperand(IsStrict ? 1 : 0);
21902  SDValue Op1 = Op.getOperand(IsStrict ? 2 : 1);
21903  SDLoc dl(Op);
21904  ISD::CondCode CC =
21905      cast<CondCodeSDNode>(Op.getOperand(IsStrict ? 3 : 2))->get();
21906
21907  // Handle f128 first, since one possible outcome is a normal integer
21908  // comparison which gets handled by emitFlagsForSetcc.
21909  if (Op0.getValueType() == MVT::f128) {
21910    softenSetCCOperands(DAG, MVT::f128, Op0, Op1, CC, dl, Op0, Op1, Chain,
21911                        Op.getOpcode() == ISD::STRICT_FSETCCS);
21912
21913    // If softenSetCCOperands returned a scalar, use it.
21914    if (!Op1.getNode()) {
21915      assert(Op0.getValueType() == Op.getValueType() &&
21916             "Unexpected setcc expansion!");
21917      if (IsStrict)
21918        return DAG.getMergeValues({Op0, Chain}, dl);
21919      return Op0;
21920    }
21921  }
21922
21923  SDValue X86CC;
21924  SDValue EFLAGS = emitFlagsForSetcc(Op0, Op1, CC, dl, DAG, X86CC, Chain,
21925                                     Op.getOpcode() == ISD::STRICT_FSETCCS);
21926  if (!EFLAGS)
21927    return SDValue();
21928
21929  SDValue Res = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, X86CC, EFLAGS);
21930
21931  if (IsStrict)
21932    return DAG.getMergeValues({Res, Chain}, dl);
21933
21934  return Res;
21935}
21936
21937SDValue X86TargetLowering::LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const {
21938  SDValue LHS = Op.getOperand(0);
21939  SDValue RHS = Op.getOperand(1);
21940  SDValue Carry = Op.getOperand(2);
21941  SDValue Cond = Op.getOperand(3);
21942  SDLoc DL(Op);
21943
21944  assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.");
21945  X86::CondCode CC = TranslateIntegerX86CC(cast<CondCodeSDNode>(Cond)->get());
21946
21947  // Recreate the carry if needed.
21948  EVT CarryVT = Carry.getValueType();
21949  APInt NegOne = APInt::getAllOnesValue(CarryVT.getScalarSizeInBits());
21950  Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
21951                      Carry, DAG.getConstant(NegOne, DL, CarryVT));
21952
21953  SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
21954  SDValue Cmp = DAG.getNode(X86ISD::SBB, DL, VTs, LHS, RHS, Carry.getValue(1));
21955  return getSETCC(CC, Cmp.getValue(1), DL, DAG);
21956}
21957
21958// This function returns three things: the arithmetic computation itself
21959// (Value), an EFLAGS result (Overflow), and a condition code (Cond).  The
21960// flag and the condition code define the case in which the arithmetic
21961// computation overflows.
21962static std::pair<SDValue, SDValue>
21963getX86XALUOOp(X86::CondCode &Cond, SDValue Op, SelectionDAG &DAG) {
21964  assert(Op.getResNo() == 0 && "Unexpected result number!");
21965  SDValue Value, Overflow;
21966  SDValue LHS = Op.getOperand(0);
21967  SDValue RHS = Op.getOperand(1);
21968  unsigned BaseOp = 0;
21969  SDLoc DL(Op);
21970  switch (Op.getOpcode()) {
21971  default: llvm_unreachable("Unknown ovf instruction!");
21972  case ISD::SADDO:
21973    BaseOp = X86ISD::ADD;
21974    Cond = X86::COND_O;
21975    break;
21976  case ISD::UADDO:
21977    BaseOp = X86ISD::ADD;
21978    Cond = isOneConstant(RHS) ? X86::COND_E : X86::COND_B;
21979    break;
21980  case ISD::SSUBO:
21981    BaseOp = X86ISD::SUB;
21982    Cond = X86::COND_O;
21983    break;
21984  case ISD::USUBO:
21985    BaseOp = X86ISD::SUB;
21986    Cond = X86::COND_B;
21987    break;
21988  case ISD::SMULO:
21989    BaseOp = X86ISD::SMUL;
21990    Cond = X86::COND_O;
21991    break;
21992  case ISD::UMULO:
21993    BaseOp = X86ISD::UMUL;
21994    Cond = X86::COND_O;
21995    break;
21996  }
21997
21998  if (BaseOp) {
21999    // Also sets EFLAGS.
22000    SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
22001    Value = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
22002    Overflow = Value.getValue(1);
22003  }
22004
22005  return std::make_pair(Value, Overflow);
22006}
22007
22008static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
22009  // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
22010  // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
22011  // looks for this combo and may remove the "setcc" instruction if the "setcc"
22012  // has only one use.
22013  SDLoc DL(Op);
22014  X86::CondCode Cond;
22015  SDValue Value, Overflow;
22016  std::tie(Value, Overflow) = getX86XALUOOp(Cond, Op, DAG);
22017
22018  SDValue SetCC = getSETCC(Cond, Overflow, DL, DAG);
22019  assert(Op->getValueType(1) == MVT::i8 && "Unexpected VT!");
22020  return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Value, SetCC);
22021}
22022
22023/// Return true if opcode is a X86 logical comparison.
22024static bool isX86LogicalCmp(SDValue Op) {
22025  unsigned Opc = Op.getOpcode();
22026  if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
22027      Opc == X86ISD::SAHF)
22028    return true;
22029  if (Op.getResNo() == 1 &&
22030      (Opc == X86ISD::ADD || Opc == X86ISD::SUB || Opc == X86ISD::ADC ||
22031       Opc == X86ISD::SBB || Opc == X86ISD::SMUL || Opc == X86ISD::UMUL ||
22032       Opc == X86ISD::OR || Opc == X86ISD::XOR || Opc == X86ISD::AND))
22033    return true;
22034
22035  return false;
22036}
22037
22038static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
22039  if (V.getOpcode() != ISD::TRUNCATE)
22040    return false;
22041
22042  SDValue VOp0 = V.getOperand(0);
22043  unsigned InBits = VOp0.getValueSizeInBits();
22044  unsigned Bits = V.getValueSizeInBits();
22045  return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
22046}
22047
22048SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
22049  bool AddTest = true;
22050  SDValue Cond  = Op.getOperand(0);
22051  SDValue Op1 = Op.getOperand(1);
22052  SDValue Op2 = Op.getOperand(2);
22053  SDLoc DL(Op);
22054  MVT VT = Op1.getSimpleValueType();
22055  SDValue CC;
22056
22057  // Lower FP selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
22058  // are available or VBLENDV if AVX is available.
22059  // Otherwise FP cmovs get lowered into a less efficient branch sequence later.
22060  if (Cond.getOpcode() == ISD::SETCC &&
22061      ((Subtarget.hasSSE2() && VT == MVT::f64) ||
22062       (Subtarget.hasSSE1() && VT == MVT::f32)) &&
22063      VT == Cond.getOperand(0).getSimpleValueType() && Cond->hasOneUse()) {
22064    SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
22065    bool IsAlwaysSignaling;
22066    unsigned SSECC =
22067        translateX86FSETCC(cast<CondCodeSDNode>(Cond.getOperand(2))->get(),
22068                           CondOp0, CondOp1, IsAlwaysSignaling);
22069
22070    if (Subtarget.hasAVX512()) {
22071      SDValue Cmp =
22072          DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CondOp0, CondOp1,
22073                      DAG.getTargetConstant(SSECC, DL, MVT::i8));
22074      assert(!VT.isVector() && "Not a scalar type?");
22075      return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
22076    }
22077
22078    if (SSECC < 8 || Subtarget.hasAVX()) {
22079      SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
22080                                DAG.getTargetConstant(SSECC, DL, MVT::i8));
22081
22082      // If we have AVX, we can use a variable vector select (VBLENDV) instead
22083      // of 3 logic instructions for size savings and potentially speed.
22084      // Unfortunately, there is no scalar form of VBLENDV.
22085
22086      // If either operand is a +0.0 constant, don't try this. We can expect to
22087      // optimize away at least one of the logic instructions later in that
22088      // case, so that sequence would be faster than a variable blend.
22089
22090      // BLENDV was introduced with SSE 4.1, but the 2 register form implicitly
22091      // uses XMM0 as the selection register. That may need just as many
22092      // instructions as the AND/ANDN/OR sequence due to register moves, so
22093      // don't bother.
22094      if (Subtarget.hasAVX() && !isNullFPConstant(Op1) &&
22095          !isNullFPConstant(Op2)) {
22096        // Convert to vectors, do a VSELECT, and convert back to scalar.
22097        // All of the conversions should be optimized away.
22098        MVT VecVT = VT == MVT::f32 ? MVT::v4f32 : MVT::v2f64;
22099        SDValue VOp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op1);
22100        SDValue VOp2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op2);
22101        SDValue VCmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Cmp);
22102
22103        MVT VCmpVT = VT == MVT::f32 ? MVT::v4i32 : MVT::v2i64;
22104        VCmp = DAG.getBitcast(VCmpVT, VCmp);
22105
22106        SDValue VSel = DAG.getSelect(DL, VecVT, VCmp, VOp1, VOp2);
22107
22108        return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
22109                           VSel, DAG.getIntPtrConstant(0, DL));
22110      }
22111      SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
22112      SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
22113      return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
22114    }
22115  }
22116
22117  // AVX512 fallback is to lower selects of scalar floats to masked moves.
22118  if ((VT == MVT::f64 || VT == MVT::f32) && Subtarget.hasAVX512()) {
22119    SDValue Cmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1, Cond);
22120    return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
22121  }
22122
22123  // For v64i1 without 64-bit support we need to split and rejoin.
22124  if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
22125    assert(Subtarget.hasBWI() && "Expected BWI to be legal");
22126    SDValue Op1Lo = extractSubVector(Op1, 0, DAG, DL, 32);
22127    SDValue Op2Lo = extractSubVector(Op2, 0, DAG, DL, 32);
22128    SDValue Op1Hi = extractSubVector(Op1, 32, DAG, DL, 32);
22129    SDValue Op2Hi = extractSubVector(Op2, 32, DAG, DL, 32);
22130    SDValue Lo = DAG.getSelect(DL, MVT::v32i1, Cond, Op1Lo, Op2Lo);
22131    SDValue Hi = DAG.getSelect(DL, MVT::v32i1, Cond, Op1Hi, Op2Hi);
22132    return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
22133  }
22134
22135  if (VT.isVector() && VT.getVectorElementType() == MVT::i1) {
22136    SDValue Op1Scalar;
22137    if (ISD::isBuildVectorOfConstantSDNodes(Op1.getNode()))
22138      Op1Scalar = ConvertI1VectorToInteger(Op1, DAG);
22139    else if (Op1.getOpcode() == ISD::BITCAST && Op1.getOperand(0))
22140      Op1Scalar = Op1.getOperand(0);
22141    SDValue Op2Scalar;
22142    if (ISD::isBuildVectorOfConstantSDNodes(Op2.getNode()))
22143      Op2Scalar = ConvertI1VectorToInteger(Op2, DAG);
22144    else if (Op2.getOpcode() == ISD::BITCAST && Op2.getOperand(0))
22145      Op2Scalar = Op2.getOperand(0);
22146    if (Op1Scalar.getNode() && Op2Scalar.getNode()) {
22147      SDValue newSelect = DAG.getSelect(DL, Op1Scalar.getValueType(), Cond,
22148                                        Op1Scalar, Op2Scalar);
22149      if (newSelect.getValueSizeInBits() == VT.getSizeInBits())
22150        return DAG.getBitcast(VT, newSelect);
22151      SDValue ExtVec = DAG.getBitcast(MVT::v8i1, newSelect);
22152      return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, ExtVec,
22153                         DAG.getIntPtrConstant(0, DL));
22154    }
22155  }
22156
22157  if (Cond.getOpcode() == ISD::SETCC) {
22158    if (SDValue NewCond = LowerSETCC(Cond, DAG)) {
22159      Cond = NewCond;
22160      // If the condition was updated, it's possible that the operands of the
22161      // select were also updated (for example, EmitTest has a RAUW). Refresh
22162      // the local references to the select operands in case they got stale.
22163      Op1 = Op.getOperand(1);
22164      Op2 = Op.getOperand(2);
22165    }
22166  }
22167
22168  // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
22169  // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
22170  // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
22171  // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
22172  // (select (and (x , 0x1) == 0), y, (z ^ y) ) -> (-(and (x , 0x1)) & z ) ^ y
22173  // (select (and (x , 0x1) == 0), y, (z | y) ) -> (-(and (x , 0x1)) & z ) | y
22174  if (Cond.getOpcode() == X86ISD::SETCC &&
22175      Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
22176      isNullConstant(Cond.getOperand(1).getOperand(1))) {
22177    SDValue Cmp = Cond.getOperand(1);
22178    unsigned CondCode = Cond.getConstantOperandVal(0);
22179
22180    if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
22181        (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
22182      SDValue Y = isAllOnesConstant(Op2) ? Op1 : Op2;
22183      SDValue CmpOp0 = Cmp.getOperand(0);
22184
22185      // Apply further optimizations for special cases
22186      // (select (x != 0), -1, 0) -> neg & sbb
22187      // (select (x == 0), 0, -1) -> neg & sbb
22188      if (isNullConstant(Y) &&
22189          (isAllOnesConstant(Op1) == (CondCode == X86::COND_NE))) {
22190        SDValue Zero = DAG.getConstant(0, DL, CmpOp0.getValueType());
22191        SDValue CmpZero = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Zero, CmpOp0);
22192        SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
22193        Zero = DAG.getConstant(0, DL, Op.getValueType());
22194        return DAG.getNode(X86ISD::SBB, DL, VTs, Zero, Zero, CmpZero);
22195      }
22196
22197      Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
22198                        CmpOp0, DAG.getConstant(1, DL, CmpOp0.getValueType()));
22199      Cmp = ConvertCmpIfNecessary(Cmp, DAG);
22200
22201      SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
22202      SDValue Zero = DAG.getConstant(0, DL, Op.getValueType());
22203      SDValue Res =   // Res = 0 or -1.
22204        DAG.getNode(X86ISD::SBB, DL, VTs, Zero, Zero, Cmp);
22205
22206      if (isAllOnesConstant(Op1) != (CondCode == X86::COND_E))
22207        Res = DAG.getNOT(DL, Res, Res.getValueType());
22208
22209      if (!isNullConstant(Op2))
22210        Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
22211      return Res;
22212    } else if (!Subtarget.hasCMov() && CondCode == X86::COND_E &&
22213               Cmp.getOperand(0).getOpcode() == ISD::AND &&
22214               isOneConstant(Cmp.getOperand(0).getOperand(1))) {
22215      SDValue CmpOp0 = Cmp.getOperand(0);
22216      SDValue Src1, Src2;
22217      // true if Op2 is XOR or OR operator and one of its operands
22218      // is equal to Op1
22219      // ( a , a op b) || ( b , a op b)
22220      auto isOrXorPattern = [&]() {
22221        if ((Op2.getOpcode() == ISD::XOR || Op2.getOpcode() == ISD::OR) &&
22222            (Op2.getOperand(0) == Op1 || Op2.getOperand(1) == Op1)) {
22223          Src1 =
22224              Op2.getOperand(0) == Op1 ? Op2.getOperand(1) : Op2.getOperand(0);
22225          Src2 = Op1;
22226          return true;
22227        }
22228        return false;
22229      };
22230
22231      if (isOrXorPattern()) {
22232        SDValue Neg;
22233        unsigned int CmpSz = CmpOp0.getSimpleValueType().getSizeInBits();
22234        // we need mask of all zeros or ones with same size of the other
22235        // operands.
22236        if (CmpSz > VT.getSizeInBits())
22237          Neg = DAG.getNode(ISD::TRUNCATE, DL, VT, CmpOp0);
22238        else if (CmpSz < VT.getSizeInBits())
22239          Neg = DAG.getNode(ISD::AND, DL, VT,
22240              DAG.getNode(ISD::ANY_EXTEND, DL, VT, CmpOp0.getOperand(0)),
22241              DAG.getConstant(1, DL, VT));
22242        else
22243          Neg = CmpOp0;
22244        SDValue Mask = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
22245                                   Neg); // -(and (x, 0x1))
22246        SDValue And = DAG.getNode(ISD::AND, DL, VT, Mask, Src1); // Mask & z
22247        return DAG.getNode(Op2.getOpcode(), DL, VT, And, Src2);  // And Op y
22248      }
22249    }
22250  }
22251
22252  // Look past (and (setcc_carry (cmp ...)), 1).
22253  if (Cond.getOpcode() == ISD::AND &&
22254      Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
22255      isOneConstant(Cond.getOperand(1)))
22256    Cond = Cond.getOperand(0);
22257
22258  // If condition flag is set by a X86ISD::CMP, then use it as the condition
22259  // setting operand in place of the X86ISD::SETCC.
22260  unsigned CondOpcode = Cond.getOpcode();
22261  if (CondOpcode == X86ISD::SETCC ||
22262      CondOpcode == X86ISD::SETCC_CARRY) {
22263    CC = Cond.getOperand(0);
22264
22265    SDValue Cmp = Cond.getOperand(1);
22266    bool IllegalFPCMov = false;
22267    if (VT.isFloatingPoint() && !VT.isVector() &&
22268        !isScalarFPTypeInSSEReg(VT))  // FPStack?
22269      IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
22270
22271    if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
22272        Cmp.getOpcode() == X86ISD::BT) { // FIXME
22273      Cond = Cmp;
22274      AddTest = false;
22275    }
22276  } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
22277             CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
22278             CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
22279    SDValue Value;
22280    X86::CondCode X86Cond;
22281    std::tie(Value, Cond) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
22282
22283    CC = DAG.getTargetConstant(X86Cond, DL, MVT::i8);
22284    AddTest = false;
22285  }
22286
22287  if (AddTest) {
22288    // Look past the truncate if the high bits are known zero.
22289    if (isTruncWithZeroHighBitsInput(Cond, DAG))
22290      Cond = Cond.getOperand(0);
22291
22292    // We know the result of AND is compared against zero. Try to match
22293    // it to BT.
22294    if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
22295      SDValue BTCC;
22296      if (SDValue BT = LowerAndToBT(Cond, ISD::SETNE, DL, DAG, BTCC)) {
22297        CC = BTCC;
22298        Cond = BT;
22299        AddTest = false;
22300      }
22301    }
22302  }
22303
22304  if (AddTest) {
22305    CC = DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8);
22306    Cond = EmitTest(Cond, X86::COND_NE, DL, DAG, Subtarget);
22307  }
22308
22309  // a <  b ? -1 :  0 -> RES = ~setcc_carry
22310  // a <  b ?  0 : -1 -> RES = setcc_carry
22311  // a >= b ? -1 :  0 -> RES = setcc_carry
22312  // a >= b ?  0 : -1 -> RES = ~setcc_carry
22313  if (Cond.getOpcode() == X86ISD::SUB) {
22314    Cond = ConvertCmpIfNecessary(Cond, DAG);
22315    unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
22316
22317    if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
22318        (isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
22319        (isNullConstant(Op1) || isNullConstant(Op2))) {
22320      SDValue Res =
22321          DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
22322                      DAG.getTargetConstant(X86::COND_B, DL, MVT::i8), Cond);
22323      if (isAllOnesConstant(Op1) != (CondCode == X86::COND_B))
22324        return DAG.getNOT(DL, Res, Res.getValueType());
22325      return Res;
22326    }
22327  }
22328
22329  // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
22330  // widen the cmov and push the truncate through. This avoids introducing a new
22331  // branch during isel and doesn't add any extensions.
22332  if (Op.getValueType() == MVT::i8 &&
22333      Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
22334    SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
22335    if (T1.getValueType() == T2.getValueType() &&
22336        // Blacklist CopyFromReg to avoid partial register stalls.
22337        T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
22338      SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, T1.getValueType(), T2, T1,
22339                                 CC, Cond);
22340      return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
22341    }
22342  }
22343
22344  // Or finally, promote i8 cmovs if we have CMOV,
22345  //                 or i16 cmovs if it won't prevent folding a load.
22346  // FIXME: we should not limit promotion of i8 case to only when the CMOV is
22347  //        legal, but EmitLoweredSelect() can not deal with these extensions
22348  //        being inserted between two CMOV's. (in i16 case too TBN)
22349  //        https://bugs.llvm.org/show_bug.cgi?id=40974
22350  if ((Op.getValueType() == MVT::i8 && Subtarget.hasCMov()) ||
22351      (Op.getValueType() == MVT::i16 && !MayFoldLoad(Op1) &&
22352       !MayFoldLoad(Op2))) {
22353    Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op1);
22354    Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op2);
22355    SDValue Ops[] = { Op2, Op1, CC, Cond };
22356    SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, MVT::i32, Ops);
22357    return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
22358  }
22359
22360  // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
22361  // condition is true.
22362  SDValue Ops[] = { Op2, Op1, CC, Cond };
22363  return DAG.getNode(X86ISD::CMOV, DL, Op.getValueType(), Ops);
22364}
22365
22366static SDValue LowerSIGN_EXTEND_Mask(SDValue Op,
22367                                     const X86Subtarget &Subtarget,
22368                                     SelectionDAG &DAG) {
22369  MVT VT = Op->getSimpleValueType(0);
22370  SDValue In = Op->getOperand(0);
22371  MVT InVT = In.getSimpleValueType();
22372  assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
22373  MVT VTElt = VT.getVectorElementType();
22374  SDLoc dl(Op);
22375
22376  unsigned NumElts = VT.getVectorNumElements();
22377
22378  // Extend VT if the scalar type is i8/i16 and BWI is not supported.
22379  MVT ExtVT = VT;
22380  if (!Subtarget.hasBWI() && VTElt.getSizeInBits() <= 16) {
22381    // If v16i32 is to be avoided, we'll need to split and concatenate.
22382    if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
22383      return SplitAndExtendv16i1(Op.getOpcode(), VT, In, dl, DAG);
22384
22385    ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
22386  }
22387
22388  // Widen to 512-bits if VLX is not supported.
22389  MVT WideVT = ExtVT;
22390  if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
22391    NumElts *= 512 / ExtVT.getSizeInBits();
22392    InVT = MVT::getVectorVT(MVT::i1, NumElts);
22393    In = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, InVT, DAG.getUNDEF(InVT),
22394                     In, DAG.getIntPtrConstant(0, dl));
22395    WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(), NumElts);
22396  }
22397
22398  SDValue V;
22399  MVT WideEltVT = WideVT.getVectorElementType();
22400  if ((Subtarget.hasDQI() && WideEltVT.getSizeInBits() >= 32) ||
22401      (Subtarget.hasBWI() && WideEltVT.getSizeInBits() <= 16)) {
22402    V = DAG.getNode(Op.getOpcode(), dl, WideVT, In);
22403  } else {
22404    SDValue NegOne = DAG.getConstant(-1, dl, WideVT);
22405    SDValue Zero = DAG.getConstant(0, dl, WideVT);
22406    V = DAG.getSelect(dl, WideVT, In, NegOne, Zero);
22407  }
22408
22409  // Truncate if we had to extend i16/i8 above.
22410  if (VT != ExtVT) {
22411    WideVT = MVT::getVectorVT(VTElt, NumElts);
22412    V = DAG.getNode(ISD::TRUNCATE, dl, WideVT, V);
22413  }
22414
22415  // Extract back to 128/256-bit if we widened.
22416  if (WideVT != VT)
22417    V = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, V,
22418                    DAG.getIntPtrConstant(0, dl));
22419
22420  return V;
22421}
22422
22423static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
22424                               SelectionDAG &DAG) {
22425  SDValue In = Op->getOperand(0);
22426  MVT InVT = In.getSimpleValueType();
22427
22428  if (InVT.getVectorElementType() == MVT::i1)
22429    return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
22430
22431  assert(Subtarget.hasAVX() && "Expected AVX support");
22432  return LowerAVXExtend(Op, DAG, Subtarget);
22433}
22434
22435// Lowering for SIGN_EXTEND_VECTOR_INREG and ZERO_EXTEND_VECTOR_INREG.
22436// For sign extend this needs to handle all vector sizes and SSE4.1 and
22437// non-SSE4.1 targets. For zero extend this should only handle inputs of
22438// MVT::v64i8 when BWI is not supported, but AVX512 is.
22439static SDValue LowerEXTEND_VECTOR_INREG(SDValue Op,
22440                                        const X86Subtarget &Subtarget,
22441                                        SelectionDAG &DAG) {
22442  SDValue In = Op->getOperand(0);
22443  MVT VT = Op->getSimpleValueType(0);
22444  MVT InVT = In.getSimpleValueType();
22445
22446  MVT SVT = VT.getVectorElementType();
22447  MVT InSVT = InVT.getVectorElementType();
22448  assert(SVT.getSizeInBits() > InSVT.getSizeInBits());
22449
22450  if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16)
22451    return SDValue();
22452  if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
22453    return SDValue();
22454  if (!(VT.is128BitVector() && Subtarget.hasSSE2()) &&
22455      !(VT.is256BitVector() && Subtarget.hasAVX()) &&
22456      !(VT.is512BitVector() && Subtarget.hasAVX512()))
22457    return SDValue();
22458
22459  SDLoc dl(Op);
22460  unsigned Opc = Op.getOpcode();
22461  unsigned NumElts = VT.getVectorNumElements();
22462
22463  // For 256-bit vectors, we only need the lower (128-bit) half of the input.
22464  // For 512-bit vectors, we need 128-bits or 256-bits.
22465  if (InVT.getSizeInBits() > 128) {
22466    // Input needs to be at least the same number of elements as output, and
22467    // at least 128-bits.
22468    int InSize = InSVT.getSizeInBits() * NumElts;
22469    In = extractSubVector(In, 0, DAG, dl, std::max(InSize, 128));
22470    InVT = In.getSimpleValueType();
22471  }
22472
22473  // SSE41 targets can use the pmov[sz]x* instructions directly for 128-bit results,
22474  // so are legal and shouldn't occur here. AVX2/AVX512 pmovsx* instructions still
22475  // need to be handled here for 256/512-bit results.
22476  if (Subtarget.hasInt256()) {
22477    assert(VT.getSizeInBits() > 128 && "Unexpected 128-bit vector extension");
22478
22479    if (InVT.getVectorNumElements() != NumElts)
22480      return DAG.getNode(Op.getOpcode(), dl, VT, In);
22481
22482    // FIXME: Apparently we create inreg operations that could be regular
22483    // extends.
22484    unsigned ExtOpc =
22485        Opc == ISD::SIGN_EXTEND_VECTOR_INREG ? ISD::SIGN_EXTEND
22486                                             : ISD::ZERO_EXTEND;
22487    return DAG.getNode(ExtOpc, dl, VT, In);
22488  }
22489
22490  // pre-AVX2 256-bit extensions need to be split into 128-bit instructions.
22491  if (Subtarget.hasAVX()) {
22492    assert(VT.is256BitVector() && "256-bit vector expected");
22493    MVT HalfVT = VT.getHalfNumVectorElementsVT();
22494    int HalfNumElts = HalfVT.getVectorNumElements();
22495
22496    unsigned NumSrcElts = InVT.getVectorNumElements();
22497    SmallVector<int, 16> HiMask(NumSrcElts, SM_SentinelUndef);
22498    for (int i = 0; i != HalfNumElts; ++i)
22499      HiMask[i] = HalfNumElts + i;
22500
22501    SDValue Lo = DAG.getNode(Opc, dl, HalfVT, In);
22502    SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, DAG.getUNDEF(InVT), HiMask);
22503    Hi = DAG.getNode(Opc, dl, HalfVT, Hi);
22504    return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
22505  }
22506
22507  // We should only get here for sign extend.
22508  assert(Opc == ISD::SIGN_EXTEND_VECTOR_INREG && "Unexpected opcode!");
22509  assert(VT.is128BitVector() && InVT.is128BitVector() && "Unexpected VTs");
22510
22511  // pre-SSE41 targets unpack lower lanes and then sign-extend using SRAI.
22512  SDValue Curr = In;
22513  SDValue SignExt = Curr;
22514
22515  // As SRAI is only available on i16/i32 types, we expand only up to i32
22516  // and handle i64 separately.
22517  if (InVT != MVT::v4i32) {
22518    MVT DestVT = VT == MVT::v2i64 ? MVT::v4i32 : VT;
22519
22520    unsigned DestWidth = DestVT.getScalarSizeInBits();
22521    unsigned Scale = DestWidth / InSVT.getSizeInBits();
22522
22523    unsigned InNumElts = InVT.getVectorNumElements();
22524    unsigned DestElts = DestVT.getVectorNumElements();
22525
22526    // Build a shuffle mask that takes each input element and places it in the
22527    // MSBs of the new element size.
22528    SmallVector<int, 16> Mask(InNumElts, SM_SentinelUndef);
22529    for (unsigned i = 0; i != DestElts; ++i)
22530      Mask[i * Scale + (Scale - 1)] = i;
22531
22532    Curr = DAG.getVectorShuffle(InVT, dl, In, In, Mask);
22533    Curr = DAG.getBitcast(DestVT, Curr);
22534
22535    unsigned SignExtShift = DestWidth - InSVT.getSizeInBits();
22536    SignExt = DAG.getNode(X86ISD::VSRAI, dl, DestVT, Curr,
22537                          DAG.getTargetConstant(SignExtShift, dl, MVT::i8));
22538  }
22539
22540  if (VT == MVT::v2i64) {
22541    assert(Curr.getValueType() == MVT::v4i32 && "Unexpected input VT");
22542    SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
22543    SDValue Sign = DAG.getSetCC(dl, MVT::v4i32, Zero, Curr, ISD::SETGT);
22544    SignExt = DAG.getVectorShuffle(MVT::v4i32, dl, SignExt, Sign, {0, 4, 1, 5});
22545    SignExt = DAG.getBitcast(VT, SignExt);
22546  }
22547
22548  return SignExt;
22549}
22550
22551static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
22552                                SelectionDAG &DAG) {
22553  MVT VT = Op->getSimpleValueType(0);
22554  SDValue In = Op->getOperand(0);
22555  MVT InVT = In.getSimpleValueType();
22556  SDLoc dl(Op);
22557
22558  if (InVT.getVectorElementType() == MVT::i1)
22559    return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
22560
22561  assert(VT.isVector() && InVT.isVector() && "Expected vector type");
22562  assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
22563         "Expected same number of elements");
22564  assert((VT.getVectorElementType() == MVT::i16 ||
22565          VT.getVectorElementType() == MVT::i32 ||
22566          VT.getVectorElementType() == MVT::i64) &&
22567         "Unexpected element type");
22568  assert((InVT.getVectorElementType() == MVT::i8 ||
22569          InVT.getVectorElementType() == MVT::i16 ||
22570          InVT.getVectorElementType() == MVT::i32) &&
22571         "Unexpected element type");
22572
22573  // Custom legalize v8i8->v8i64 on CPUs without avx512bw.
22574  if (InVT == MVT::v8i8) {
22575    if (VT != MVT::v8i64)
22576      return SDValue();
22577
22578    In = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op),
22579                     MVT::v16i8, In, DAG.getUNDEF(MVT::v8i8));
22580    return DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, VT, In);
22581  }
22582
22583  if (Subtarget.hasInt256())
22584    return Op;
22585
22586  // Optimize vectors in AVX mode
22587  // Sign extend  v8i16 to v8i32 and
22588  //              v4i32 to v4i64
22589  //
22590  // Divide input vector into two parts
22591  // for v4i32 the high shuffle mask will be {2, 3, -1, -1}
22592  // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
22593  // concat the vectors to original VT
22594  MVT HalfVT = VT.getHalfNumVectorElementsVT();
22595  SDValue OpLo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, In);
22596
22597  unsigned NumElems = InVT.getVectorNumElements();
22598  SmallVector<int,8> ShufMask(NumElems, -1);
22599  for (unsigned i = 0; i != NumElems/2; ++i)
22600    ShufMask[i] = i + NumElems/2;
22601
22602  SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
22603  OpHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, OpHi);
22604
22605  return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
22606}
22607
22608/// Change a vector store into a pair of half-size vector stores.
22609static SDValue splitVectorStore(StoreSDNode *Store, SelectionDAG &DAG) {
22610  SDValue StoredVal = Store->getValue();
22611  assert((StoredVal.getValueType().is256BitVector() ||
22612          StoredVal.getValueType().is512BitVector()) &&
22613         "Expecting 256/512-bit op");
22614
22615  // Splitting volatile memory ops is not allowed unless the operation was not
22616  // legal to begin with. Assume the input store is legal (this transform is
22617  // only used for targets with AVX). Note: It is possible that we have an
22618  // illegal type like v2i128, and so we could allow splitting a volatile store
22619  // in that case if that is important.
22620  if (!Store->isSimple())
22621    return SDValue();
22622
22623  EVT StoreVT = StoredVal.getValueType();
22624  unsigned NumElems = StoreVT.getVectorNumElements();
22625  unsigned HalfSize = StoredVal.getValueSizeInBits() / 2;
22626  unsigned HalfAlign = (128 == HalfSize ? 16 : 32);
22627
22628  SDLoc DL(Store);
22629  SDValue Value0 = extractSubVector(StoredVal, 0, DAG, DL, HalfSize);
22630  SDValue Value1 = extractSubVector(StoredVal, NumElems / 2, DAG, DL, HalfSize);
22631  SDValue Ptr0 = Store->getBasePtr();
22632  SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, HalfAlign, DL);
22633  unsigned Alignment = Store->getAlignment();
22634  SDValue Ch0 =
22635      DAG.getStore(Store->getChain(), DL, Value0, Ptr0, Store->getPointerInfo(),
22636                   Alignment, Store->getMemOperand()->getFlags());
22637  SDValue Ch1 = DAG.getStore(Store->getChain(), DL, Value1, Ptr1,
22638                             Store->getPointerInfo().getWithOffset(HalfAlign),
22639                             MinAlign(Alignment, HalfAlign),
22640                             Store->getMemOperand()->getFlags());
22641  return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Ch0, Ch1);
22642}
22643
22644/// Scalarize a vector store, bitcasting to TargetVT to determine the scalar
22645/// type.
22646static SDValue scalarizeVectorStore(StoreSDNode *Store, MVT StoreVT,
22647                                    SelectionDAG &DAG) {
22648  SDValue StoredVal = Store->getValue();
22649  assert(StoreVT.is128BitVector() &&
22650         StoredVal.getValueType().is128BitVector() && "Expecting 128-bit op");
22651  StoredVal = DAG.getBitcast(StoreVT, StoredVal);
22652
22653  // Splitting volatile memory ops is not allowed unless the operation was not
22654  // legal to begin with. We are assuming the input op is legal (this transform
22655  // is only used for targets with AVX).
22656  if (!Store->isSimple())
22657    return SDValue();
22658
22659  MVT StoreSVT = StoreVT.getScalarType();
22660  unsigned NumElems = StoreVT.getVectorNumElements();
22661  unsigned ScalarSize = StoreSVT.getStoreSize();
22662  unsigned Alignment = Store->getAlignment();
22663
22664  SDLoc DL(Store);
22665  SmallVector<SDValue, 4> Stores;
22666  for (unsigned i = 0; i != NumElems; ++i) {
22667    unsigned Offset = i * ScalarSize;
22668    SDValue Ptr = DAG.getMemBasePlusOffset(Store->getBasePtr(), Offset, DL);
22669    SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, StoreSVT, StoredVal,
22670                              DAG.getIntPtrConstant(i, DL));
22671    SDValue Ch = DAG.getStore(Store->getChain(), DL, Scl, Ptr,
22672                              Store->getPointerInfo().getWithOffset(Offset),
22673                              MinAlign(Alignment, Offset),
22674                              Store->getMemOperand()->getFlags());
22675    Stores.push_back(Ch);
22676  }
22677  return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores);
22678}
22679
22680static SDValue LowerStore(SDValue Op, const X86Subtarget &Subtarget,
22681                          SelectionDAG &DAG) {
22682  StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
22683  SDLoc dl(St);
22684  SDValue StoredVal = St->getValue();
22685
22686  // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 stores.
22687  if (StoredVal.getValueType().isVector() &&
22688      StoredVal.getValueType().getVectorElementType() == MVT::i1) {
22689    assert(StoredVal.getValueType().getVectorNumElements() <= 8 &&
22690           "Unexpected VT");
22691    assert(!St->isTruncatingStore() && "Expected non-truncating store");
22692    assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
22693           "Expected AVX512F without AVX512DQI");
22694
22695    StoredVal = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
22696                            DAG.getUNDEF(MVT::v16i1), StoredVal,
22697                            DAG.getIntPtrConstant(0, dl));
22698    StoredVal = DAG.getBitcast(MVT::i16, StoredVal);
22699    StoredVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, StoredVal);
22700
22701    return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
22702                        St->getPointerInfo(), St->getAlignment(),
22703                        St->getMemOperand()->getFlags());
22704  }
22705
22706  if (St->isTruncatingStore())
22707    return SDValue();
22708
22709  // If this is a 256-bit store of concatenated ops, we are better off splitting
22710  // that store into two 128-bit stores. This avoids spurious use of 256-bit ops
22711  // and each half can execute independently. Some cores would split the op into
22712  // halves anyway, so the concat (vinsertf128) is purely an extra op.
22713  MVT StoreVT = StoredVal.getSimpleValueType();
22714  if (StoreVT.is256BitVector()) {
22715    SmallVector<SDValue, 4> CatOps;
22716    if (StoredVal.hasOneUse() && collectConcatOps(StoredVal.getNode(), CatOps))
22717      return splitVectorStore(St, DAG);
22718    return SDValue();
22719  }
22720
22721  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22722  assert(StoreVT.isVector() && StoreVT.getSizeInBits() == 64 &&
22723         "Unexpected VT");
22724  assert(TLI.getTypeAction(*DAG.getContext(), StoreVT) ==
22725             TargetLowering::TypeWidenVector && "Unexpected type action!");
22726
22727  EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), StoreVT);
22728  StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, StoredVal,
22729                          DAG.getUNDEF(StoreVT));
22730
22731  if (Subtarget.hasSSE2()) {
22732    // Widen the vector, cast to a v2x64 type, extract the single 64-bit element
22733    // and store it.
22734    MVT StVT = Subtarget.is64Bit() && StoreVT.isInteger() ? MVT::i64 : MVT::f64;
22735    MVT CastVT = MVT::getVectorVT(StVT, 2);
22736    StoredVal = DAG.getBitcast(CastVT, StoredVal);
22737    StoredVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, StVT, StoredVal,
22738                            DAG.getIntPtrConstant(0, dl));
22739
22740    return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
22741                        St->getPointerInfo(), St->getAlignment(),
22742                        St->getMemOperand()->getFlags());
22743  }
22744  assert(Subtarget.hasSSE1() && "Expected SSE");
22745  SDVTList Tys = DAG.getVTList(MVT::Other);
22746  SDValue Ops[] = {St->getChain(), StoredVal, St->getBasePtr()};
22747  return DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys, Ops, MVT::i64,
22748                                 St->getMemOperand());
22749}
22750
22751// Lower vector extended loads using a shuffle. If SSSE3 is not available we
22752// may emit an illegal shuffle but the expansion is still better than scalar
22753// code. We generate sext/sext_invec for SEXTLOADs if it's available, otherwise
22754// we'll emit a shuffle and a arithmetic shift.
22755// FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
22756// TODO: It is possible to support ZExt by zeroing the undef values during
22757// the shuffle phase or after the shuffle.
22758static SDValue LowerLoad(SDValue Op, const X86Subtarget &Subtarget,
22759                                 SelectionDAG &DAG) {
22760  MVT RegVT = Op.getSimpleValueType();
22761  assert(RegVT.isVector() && "We only custom lower vector loads.");
22762  assert(RegVT.isInteger() &&
22763         "We only custom lower integer vector loads.");
22764
22765  LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
22766  SDLoc dl(Ld);
22767
22768  // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 loads.
22769  if (RegVT.getVectorElementType() == MVT::i1) {
22770    assert(EVT(RegVT) == Ld->getMemoryVT() && "Expected non-extending load");
22771    assert(RegVT.getVectorNumElements() <= 8 && "Unexpected VT");
22772    assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
22773           "Expected AVX512F without AVX512DQI");
22774
22775    SDValue NewLd = DAG.getLoad(MVT::i8, dl, Ld->getChain(), Ld->getBasePtr(),
22776                                Ld->getPointerInfo(), Ld->getAlignment(),
22777                                Ld->getMemOperand()->getFlags());
22778
22779    // Replace chain users with the new chain.
22780    assert(NewLd->getNumValues() == 2 && "Loads must carry a chain!");
22781
22782    SDValue Val = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, NewLd);
22783    Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, RegVT,
22784                      DAG.getBitcast(MVT::v16i1, Val),
22785                      DAG.getIntPtrConstant(0, dl));
22786    return DAG.getMergeValues({Val, NewLd.getValue(1)}, dl);
22787  }
22788
22789  return SDValue();
22790}
22791
22792/// Return true if node is an ISD::AND or ISD::OR of two X86ISD::SETCC nodes
22793/// each of which has no other use apart from the AND / OR.
22794static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
22795  Opc = Op.getOpcode();
22796  if (Opc != ISD::OR && Opc != ISD::AND)
22797    return false;
22798  return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
22799          Op.getOperand(0).hasOneUse() &&
22800          Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
22801          Op.getOperand(1).hasOneUse());
22802}
22803
22804/// Return true if node is an ISD::XOR of a X86ISD::SETCC and 1 and that the
22805/// SETCC node has a single use.
22806static bool isXor1OfSetCC(SDValue Op) {
22807  if (Op.getOpcode() != ISD::XOR)
22808    return false;
22809  if (isOneConstant(Op.getOperand(1)))
22810    return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
22811           Op.getOperand(0).hasOneUse();
22812  return false;
22813}
22814
22815SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
22816  bool addTest = true;
22817  SDValue Chain = Op.getOperand(0);
22818  SDValue Cond  = Op.getOperand(1);
22819  SDValue Dest  = Op.getOperand(2);
22820  SDLoc dl(Op);
22821  SDValue CC;
22822  bool Inverted = false;
22823
22824  if (Cond.getOpcode() == ISD::SETCC) {
22825    // Check for setcc([su]{add,sub,mul}o == 0).
22826    if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
22827        isNullConstant(Cond.getOperand(1)) &&
22828        Cond.getOperand(0).getResNo() == 1 &&
22829        (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
22830         Cond.getOperand(0).getOpcode() == ISD::UADDO ||
22831         Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
22832         Cond.getOperand(0).getOpcode() == ISD::USUBO ||
22833         Cond.getOperand(0).getOpcode() == ISD::SMULO ||
22834         Cond.getOperand(0).getOpcode() == ISD::UMULO)) {
22835      Inverted = true;
22836      Cond = Cond.getOperand(0);
22837    } else {
22838      if (SDValue NewCond = LowerSETCC(Cond, DAG))
22839        Cond = NewCond;
22840    }
22841  }
22842#if 0
22843  // FIXME: LowerXALUO doesn't handle these!!
22844  else if (Cond.getOpcode() == X86ISD::ADD  ||
22845           Cond.getOpcode() == X86ISD::SUB  ||
22846           Cond.getOpcode() == X86ISD::SMUL ||
22847           Cond.getOpcode() == X86ISD::UMUL)
22848    Cond = LowerXALUO(Cond, DAG);
22849#endif
22850
22851  // Look pass (and (setcc_carry (cmp ...)), 1).
22852  if (Cond.getOpcode() == ISD::AND &&
22853      Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
22854      isOneConstant(Cond.getOperand(1)))
22855    Cond = Cond.getOperand(0);
22856
22857  // If condition flag is set by a X86ISD::CMP, then use it as the condition
22858  // setting operand in place of the X86ISD::SETCC.
22859  unsigned CondOpcode = Cond.getOpcode();
22860  if (CondOpcode == X86ISD::SETCC ||
22861      CondOpcode == X86ISD::SETCC_CARRY) {
22862    CC = Cond.getOperand(0);
22863
22864    SDValue Cmp = Cond.getOperand(1);
22865    unsigned Opc = Cmp.getOpcode();
22866    // FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
22867    if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
22868      Cond = Cmp;
22869      addTest = false;
22870    } else {
22871      switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
22872      default: break;
22873      case X86::COND_O:
22874      case X86::COND_B:
22875        // These can only come from an arithmetic instruction with overflow,
22876        // e.g. SADDO, UADDO.
22877        Cond = Cond.getOperand(1);
22878        addTest = false;
22879        break;
22880      }
22881    }
22882  }
22883  CondOpcode = Cond.getOpcode();
22884  if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
22885      CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
22886      CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
22887    SDValue Value;
22888    X86::CondCode X86Cond;
22889    std::tie(Value, Cond) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
22890
22891    if (Inverted)
22892      X86Cond = X86::GetOppositeBranchCondition(X86Cond);
22893
22894    CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
22895    addTest = false;
22896  } else {
22897    unsigned CondOpc;
22898    if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
22899      SDValue Cmp = Cond.getOperand(0).getOperand(1);
22900      if (CondOpc == ISD::OR) {
22901        // Also, recognize the pattern generated by an FCMP_UNE. We can emit
22902        // two branches instead of an explicit OR instruction with a
22903        // separate test.
22904        if (Cmp == Cond.getOperand(1).getOperand(1) &&
22905            isX86LogicalCmp(Cmp)) {
22906          CC = Cond.getOperand(0).getOperand(0);
22907          Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
22908                              Chain, Dest, CC, Cmp);
22909          CC = Cond.getOperand(1).getOperand(0);
22910          Cond = Cmp;
22911          addTest = false;
22912        }
22913      } else { // ISD::AND
22914        // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
22915        // two branches instead of an explicit AND instruction with a
22916        // separate test. However, we only do this if this block doesn't
22917        // have a fall-through edge, because this requires an explicit
22918        // jmp when the condition is false.
22919        if (Cmp == Cond.getOperand(1).getOperand(1) &&
22920            isX86LogicalCmp(Cmp) &&
22921            Op.getNode()->hasOneUse()) {
22922          X86::CondCode CCode0 =
22923              (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
22924          CCode0 = X86::GetOppositeBranchCondition(CCode0);
22925          CC = DAG.getTargetConstant(CCode0, dl, MVT::i8);
22926          SDNode *User = *Op.getNode()->use_begin();
22927          // Look for an unconditional branch following this conditional branch.
22928          // We need this because we need to reverse the successors in order
22929          // to implement FCMP_OEQ.
22930          if (User->getOpcode() == ISD::BR) {
22931            SDValue FalseBB = User->getOperand(1);
22932            SDNode *NewBR =
22933              DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
22934            assert(NewBR == User);
22935            (void)NewBR;
22936            Dest = FalseBB;
22937
22938            Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), Chain,
22939                                Dest, CC, Cmp);
22940            X86::CondCode CCode1 =
22941                (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
22942            CCode1 = X86::GetOppositeBranchCondition(CCode1);
22943            CC = DAG.getTargetConstant(CCode1, dl, MVT::i8);
22944            Cond = Cmp;
22945            addTest = false;
22946          }
22947        }
22948      }
22949    } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
22950      // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
22951      // It should be transformed during dag combiner except when the condition
22952      // is set by a arithmetics with overflow node.
22953      X86::CondCode CCode =
22954        (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
22955      CCode = X86::GetOppositeBranchCondition(CCode);
22956      CC = DAG.getTargetConstant(CCode, dl, MVT::i8);
22957      Cond = Cond.getOperand(0).getOperand(1);
22958      addTest = false;
22959    } else if (Cond.getOpcode() == ISD::SETCC &&
22960               cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) {
22961      // For FCMP_OEQ, we can emit
22962      // two branches instead of an explicit AND instruction with a
22963      // separate test. However, we only do this if this block doesn't
22964      // have a fall-through edge, because this requires an explicit
22965      // jmp when the condition is false.
22966      if (Op.getNode()->hasOneUse()) {
22967        SDNode *User = *Op.getNode()->use_begin();
22968        // Look for an unconditional branch following this conditional branch.
22969        // We need this because we need to reverse the successors in order
22970        // to implement FCMP_OEQ.
22971        if (User->getOpcode() == ISD::BR) {
22972          SDValue FalseBB = User->getOperand(1);
22973          SDNode *NewBR =
22974            DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
22975          assert(NewBR == User);
22976          (void)NewBR;
22977          Dest = FalseBB;
22978
22979          SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
22980                                    Cond.getOperand(0), Cond.getOperand(1));
22981          Cmp = ConvertCmpIfNecessary(Cmp, DAG);
22982          CC = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
22983          Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
22984                              Chain, Dest, CC, Cmp);
22985          CC = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
22986          Cond = Cmp;
22987          addTest = false;
22988        }
22989      }
22990    } else if (Cond.getOpcode() == ISD::SETCC &&
22991               cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
22992      // For FCMP_UNE, we can emit
22993      // two branches instead of an explicit OR instruction with a
22994      // separate test.
22995      SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
22996                                Cond.getOperand(0), Cond.getOperand(1));
22997      Cmp = ConvertCmpIfNecessary(Cmp, DAG);
22998      CC = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
22999      Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
23000                          Chain, Dest, CC, Cmp);
23001      CC = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
23002      Cond = Cmp;
23003      addTest = false;
23004    }
23005  }
23006
23007  if (addTest) {
23008    // Look pass the truncate if the high bits are known zero.
23009    if (isTruncWithZeroHighBitsInput(Cond, DAG))
23010        Cond = Cond.getOperand(0);
23011
23012    // We know the result of AND is compared against zero. Try to match
23013    // it to BT.
23014    if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
23015      SDValue BTCC;
23016      if (SDValue BT = LowerAndToBT(Cond, ISD::SETNE, dl, DAG, BTCC)) {
23017        CC = BTCC;
23018        Cond = BT;
23019        addTest = false;
23020      }
23021    }
23022  }
23023
23024  if (addTest) {
23025    X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
23026    CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
23027    Cond = EmitTest(Cond, X86Cond, dl, DAG, Subtarget);
23028  }
23029  Cond = ConvertCmpIfNecessary(Cond, DAG);
23030  return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
23031                     Chain, Dest, CC, Cond);
23032}
23033
23034// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
23035// Calls to _alloca are needed to probe the stack when allocating more than 4k
23036// bytes in one go. Touching the stack at 4K increments is necessary to ensure
23037// that the guard pages used by the OS virtual memory manager are allocated in
23038// correct sequence.
23039SDValue
23040X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
23041                                           SelectionDAG &DAG) const {
23042  MachineFunction &MF = DAG.getMachineFunction();
23043  bool SplitStack = MF.shouldSplitStack();
23044  bool EmitStackProbe = !getStackProbeSymbolName(MF).empty();
23045  bool Lower = (Subtarget.isOSWindows() && !Subtarget.isTargetMachO()) ||
23046               SplitStack || EmitStackProbe;
23047  SDLoc dl(Op);
23048
23049  // Get the inputs.
23050  SDNode *Node = Op.getNode();
23051  SDValue Chain = Op.getOperand(0);
23052  SDValue Size  = Op.getOperand(1);
23053  MaybeAlign Alignment(Op.getConstantOperandVal(2));
23054  EVT VT = Node->getValueType(0);
23055
23056  // Chain the dynamic stack allocation so that it doesn't modify the stack
23057  // pointer when other instructions are using the stack.
23058  Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl);
23059
23060  bool Is64Bit = Subtarget.is64Bit();
23061  MVT SPTy = getPointerTy(DAG.getDataLayout());
23062
23063  SDValue Result;
23064  if (!Lower) {
23065    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23066    unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
23067    assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
23068                    " not tell us which reg is the stack pointer!");
23069
23070    SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
23071    Chain = SP.getValue(1);
23072    const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
23073    const Align StackAlign(TFI.getStackAlignment());
23074    Result = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
23075    if (Alignment && Alignment > StackAlign)
23076      Result =
23077          DAG.getNode(ISD::AND, dl, VT, Result,
23078                      DAG.getConstant(~(Alignment->value() - 1ULL), dl, VT));
23079    Chain = DAG.getCopyToReg(Chain, dl, SPReg, Result); // Output chain
23080  } else if (SplitStack) {
23081    MachineRegisterInfo &MRI = MF.getRegInfo();
23082
23083    if (Is64Bit) {
23084      // The 64 bit implementation of segmented stacks needs to clobber both r10
23085      // r11. This makes it impossible to use it along with nested parameters.
23086      const Function &F = MF.getFunction();
23087      for (const auto &A : F.args()) {
23088        if (A.hasNestAttr())
23089          report_fatal_error("Cannot use segmented stacks with functions that "
23090                             "have nested arguments.");
23091      }
23092    }
23093
23094    const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
23095    Register Vreg = MRI.createVirtualRegister(AddrRegClass);
23096    Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
23097    Result = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
23098                                DAG.getRegister(Vreg, SPTy));
23099  } else {
23100    SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
23101    Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Size);
23102    MF.getInfo<X86MachineFunctionInfo>()->setHasWinAlloca(true);
23103
23104    const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
23105    Register SPReg = RegInfo->getStackRegister();
23106    SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
23107    Chain = SP.getValue(1);
23108
23109    if (Alignment) {
23110      SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
23111                       DAG.getConstant(~(Alignment->value() - 1ULL), dl, VT));
23112      Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
23113    }
23114
23115    Result = SP;
23116  }
23117
23118  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true),
23119                             DAG.getIntPtrConstant(0, dl, true), SDValue(), dl);
23120
23121  SDValue Ops[2] = {Result, Chain};
23122  return DAG.getMergeValues(Ops, dl);
23123}
23124
23125SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
23126  MachineFunction &MF = DAG.getMachineFunction();
23127  auto PtrVT = getPointerTy(MF.getDataLayout());
23128  X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
23129
23130  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
23131  SDLoc DL(Op);
23132
23133  if (!Subtarget.is64Bit() ||
23134      Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv())) {
23135    // vastart just stores the address of the VarArgsFrameIndex slot into the
23136    // memory location argument.
23137    SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
23138    return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
23139                        MachinePointerInfo(SV));
23140  }
23141
23142  // __va_list_tag:
23143  //   gp_offset         (0 - 6 * 8)
23144  //   fp_offset         (48 - 48 + 8 * 16)
23145  //   overflow_arg_area (point to parameters coming in memory).
23146  //   reg_save_area
23147  SmallVector<SDValue, 8> MemOps;
23148  SDValue FIN = Op.getOperand(1);
23149  // Store gp_offset
23150  SDValue Store = DAG.getStore(
23151      Op.getOperand(0), DL,
23152      DAG.getConstant(FuncInfo->getVarArgsGPOffset(), DL, MVT::i32), FIN,
23153      MachinePointerInfo(SV));
23154  MemOps.push_back(Store);
23155
23156  // Store fp_offset
23157  FIN = DAG.getMemBasePlusOffset(FIN, 4, DL);
23158  Store = DAG.getStore(
23159      Op.getOperand(0), DL,
23160      DAG.getConstant(FuncInfo->getVarArgsFPOffset(), DL, MVT::i32), FIN,
23161      MachinePointerInfo(SV, 4));
23162  MemOps.push_back(Store);
23163
23164  // Store ptr to overflow_arg_area
23165  FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4, DL));
23166  SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
23167  Store =
23168      DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, MachinePointerInfo(SV, 8));
23169  MemOps.push_back(Store);
23170
23171  // Store ptr to reg_save_area.
23172  FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(
23173      Subtarget.isTarget64BitLP64() ? 8 : 4, DL));
23174  SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT);
23175  Store = DAG.getStore(
23176      Op.getOperand(0), DL, RSFIN, FIN,
23177      MachinePointerInfo(SV, Subtarget.isTarget64BitLP64() ? 16 : 12));
23178  MemOps.push_back(Store);
23179  return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
23180}
23181
23182SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
23183  assert(Subtarget.is64Bit() &&
23184         "LowerVAARG only handles 64-bit va_arg!");
23185  assert(Op.getNumOperands() == 4);
23186
23187  MachineFunction &MF = DAG.getMachineFunction();
23188  if (Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()))
23189    // The Win64 ABI uses char* instead of a structure.
23190    return DAG.expandVAArg(Op.getNode());
23191
23192  SDValue Chain = Op.getOperand(0);
23193  SDValue SrcPtr = Op.getOperand(1);
23194  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
23195  unsigned Align = Op.getConstantOperandVal(3);
23196  SDLoc dl(Op);
23197
23198  EVT ArgVT = Op.getNode()->getValueType(0);
23199  Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
23200  uint32_t ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy);
23201  uint8_t ArgMode;
23202
23203  // Decide which area this value should be read from.
23204  // TODO: Implement the AMD64 ABI in its entirety. This simple
23205  // selection mechanism works only for the basic types.
23206  if (ArgVT == MVT::f80) {
23207    llvm_unreachable("va_arg for f80 not yet implemented");
23208  } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
23209    ArgMode = 2;  // Argument passed in XMM register. Use fp_offset.
23210  } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
23211    ArgMode = 1;  // Argument passed in GPR64 register(s). Use gp_offset.
23212  } else {
23213    llvm_unreachable("Unhandled argument type in LowerVAARG");
23214  }
23215
23216  if (ArgMode == 2) {
23217    // Sanity Check: Make sure using fp_offset makes sense.
23218    assert(!Subtarget.useSoftFloat() &&
23219           !(MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) &&
23220           Subtarget.hasSSE1());
23221  }
23222
23223  // Insert VAARG_64 node into the DAG
23224  // VAARG_64 returns two values: Variable Argument Address, Chain
23225  SDValue InstOps[] = {Chain, SrcPtr, DAG.getConstant(ArgSize, dl, MVT::i32),
23226                       DAG.getConstant(ArgMode, dl, MVT::i8),
23227                       DAG.getConstant(Align, dl, MVT::i32)};
23228  SDVTList VTs = DAG.getVTList(getPointerTy(DAG.getDataLayout()), MVT::Other);
23229  SDValue VAARG = DAG.getMemIntrinsicNode(
23230    X86ISD::VAARG_64, dl,
23231    VTs, InstOps, MVT::i64,
23232    MachinePointerInfo(SV),
23233    /*Align=*/0,
23234    MachineMemOperand::MOLoad | MachineMemOperand::MOStore);
23235  Chain = VAARG.getValue(1);
23236
23237  // Load the next argument and return it
23238  return DAG.getLoad(ArgVT, dl, Chain, VAARG, MachinePointerInfo());
23239}
23240
23241static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget,
23242                           SelectionDAG &DAG) {
23243  // X86-64 va_list is a struct { i32, i32, i8*, i8* }, except on Windows,
23244  // where a va_list is still an i8*.
23245  assert(Subtarget.is64Bit() && "This code only handles 64-bit va_copy!");
23246  if (Subtarget.isCallingConvWin64(
23247        DAG.getMachineFunction().getFunction().getCallingConv()))
23248    // Probably a Win64 va_copy.
23249    return DAG.expandVACopy(Op.getNode());
23250
23251  SDValue Chain = Op.getOperand(0);
23252  SDValue DstPtr = Op.getOperand(1);
23253  SDValue SrcPtr = Op.getOperand(2);
23254  const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
23255  const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
23256  SDLoc DL(Op);
23257
23258  return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
23259                       DAG.getIntPtrConstant(24, DL), 8, /*isVolatile*/false,
23260                       false, false,
23261                       MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
23262}
23263
23264// Helper to get immediate/variable SSE shift opcode from other shift opcodes.
23265static unsigned getTargetVShiftUniformOpcode(unsigned Opc, bool IsVariable) {
23266  switch (Opc) {
23267  case ISD::SHL:
23268  case X86ISD::VSHL:
23269  case X86ISD::VSHLI:
23270    return IsVariable ? X86ISD::VSHL : X86ISD::VSHLI;
23271  case ISD::SRL:
23272  case X86ISD::VSRL:
23273  case X86ISD::VSRLI:
23274    return IsVariable ? X86ISD::VSRL : X86ISD::VSRLI;
23275  case ISD::SRA:
23276  case X86ISD::VSRA:
23277  case X86ISD::VSRAI:
23278    return IsVariable ? X86ISD::VSRA : X86ISD::VSRAI;
23279  }
23280  llvm_unreachable("Unknown target vector shift node");
23281}
23282
23283/// Handle vector element shifts where the shift amount is a constant.
23284/// Takes immediate version of shift as input.
23285static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
23286                                          SDValue SrcOp, uint64_t ShiftAmt,
23287                                          SelectionDAG &DAG) {
23288  MVT ElementType = VT.getVectorElementType();
23289
23290  // Bitcast the source vector to the output type, this is mainly necessary for
23291  // vXi8/vXi64 shifts.
23292  if (VT != SrcOp.getSimpleValueType())
23293    SrcOp = DAG.getBitcast(VT, SrcOp);
23294
23295  // Fold this packed shift into its first operand if ShiftAmt is 0.
23296  if (ShiftAmt == 0)
23297    return SrcOp;
23298
23299  // Check for ShiftAmt >= element width
23300  if (ShiftAmt >= ElementType.getSizeInBits()) {
23301    if (Opc == X86ISD::VSRAI)
23302      ShiftAmt = ElementType.getSizeInBits() - 1;
23303    else
23304      return DAG.getConstant(0, dl, VT);
23305  }
23306
23307  assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
23308         && "Unknown target vector shift-by-constant node");
23309
23310  // Fold this packed vector shift into a build vector if SrcOp is a
23311  // vector of Constants or UNDEFs.
23312  if (ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
23313    SmallVector<SDValue, 8> Elts;
23314    unsigned NumElts = SrcOp->getNumOperands();
23315
23316    switch (Opc) {
23317    default: llvm_unreachable("Unknown opcode!");
23318    case X86ISD::VSHLI:
23319      for (unsigned i = 0; i != NumElts; ++i) {
23320        SDValue CurrentOp = SrcOp->getOperand(i);
23321        if (CurrentOp->isUndef()) {
23322          Elts.push_back(CurrentOp);
23323          continue;
23324        }
23325        auto *ND = cast<ConstantSDNode>(CurrentOp);
23326        const APInt &C = ND->getAPIntValue();
23327        Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), dl, ElementType));
23328      }
23329      break;
23330    case X86ISD::VSRLI:
23331      for (unsigned i = 0; i != NumElts; ++i) {
23332        SDValue CurrentOp = SrcOp->getOperand(i);
23333        if (CurrentOp->isUndef()) {
23334          Elts.push_back(CurrentOp);
23335          continue;
23336        }
23337        auto *ND = cast<ConstantSDNode>(CurrentOp);
23338        const APInt &C = ND->getAPIntValue();
23339        Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), dl, ElementType));
23340      }
23341      break;
23342    case X86ISD::VSRAI:
23343      for (unsigned i = 0; i != NumElts; ++i) {
23344        SDValue CurrentOp = SrcOp->getOperand(i);
23345        if (CurrentOp->isUndef()) {
23346          Elts.push_back(CurrentOp);
23347          continue;
23348        }
23349        auto *ND = cast<ConstantSDNode>(CurrentOp);
23350        const APInt &C = ND->getAPIntValue();
23351        Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), dl, ElementType));
23352      }
23353      break;
23354    }
23355
23356    return DAG.getBuildVector(VT, dl, Elts);
23357  }
23358
23359  return DAG.getNode(Opc, dl, VT, SrcOp,
23360                     DAG.getTargetConstant(ShiftAmt, dl, MVT::i8));
23361}
23362
23363/// Handle vector element shifts where the shift amount may or may not be a
23364/// constant. Takes immediate version of shift as input.
23365static SDValue getTargetVShiftNode(unsigned Opc, const SDLoc &dl, MVT VT,
23366                                   SDValue SrcOp, SDValue ShAmt,
23367                                   const X86Subtarget &Subtarget,
23368                                   SelectionDAG &DAG) {
23369  MVT SVT = ShAmt.getSimpleValueType();
23370  assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");
23371
23372  // Catch shift-by-constant.
23373  if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
23374    return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
23375                                      CShAmt->getZExtValue(), DAG);
23376
23377  // Change opcode to non-immediate version.
23378  Opc = getTargetVShiftUniformOpcode(Opc, true);
23379
23380  // Need to build a vector containing shift amount.
23381  // SSE/AVX packed shifts only use the lower 64-bit of the shift count.
23382  // +====================+============+=======================================+
23383  // | ShAmt is           | HasSSE4.1? | Construct ShAmt vector as             |
23384  // +====================+============+=======================================+
23385  // | i64                | Yes, No    | Use ShAmt as lowest elt               |
23386  // | i32                | Yes        | zero-extend in-reg                    |
23387  // | (i32 zext(i16/i8)) | Yes        | zero-extend in-reg                    |
23388  // | (i32 zext(i16/i8)) | No         | byte-shift-in-reg                     |
23389  // | i16/i32            | No         | v4i32 build_vector(ShAmt, 0, ud, ud)) |
23390  // +====================+============+=======================================+
23391
23392  if (SVT == MVT::i64)
23393    ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v2i64, ShAmt);
23394  else if (ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
23395           ShAmt.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
23396           (ShAmt.getOperand(0).getSimpleValueType() == MVT::i16 ||
23397            ShAmt.getOperand(0).getSimpleValueType() == MVT::i8)) {
23398    ShAmt = ShAmt.getOperand(0);
23399    MVT AmtTy = ShAmt.getSimpleValueType() == MVT::i8 ? MVT::v16i8 : MVT::v8i16;
23400    ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), AmtTy, ShAmt);
23401    if (Subtarget.hasSSE41())
23402      ShAmt = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(ShAmt),
23403                          MVT::v2i64, ShAmt);
23404    else {
23405      SDValue ByteShift = DAG.getTargetConstant(
23406          (128 - AmtTy.getScalarSizeInBits()) / 8, SDLoc(ShAmt), MVT::i8);
23407      ShAmt = DAG.getBitcast(MVT::v16i8, ShAmt);
23408      ShAmt = DAG.getNode(X86ISD::VSHLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
23409                          ByteShift);
23410      ShAmt = DAG.getNode(X86ISD::VSRLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
23411                          ByteShift);
23412    }
23413  } else if (Subtarget.hasSSE41() &&
23414             ShAmt.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
23415    ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v4i32, ShAmt);
23416    ShAmt = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(ShAmt),
23417                        MVT::v2i64, ShAmt);
23418  } else {
23419    SDValue ShOps[4] = {ShAmt, DAG.getConstant(0, dl, SVT), DAG.getUNDEF(SVT),
23420                        DAG.getUNDEF(SVT)};
23421    ShAmt = DAG.getBuildVector(MVT::v4i32, dl, ShOps);
23422  }
23423
23424  // The return type has to be a 128-bit type with the same element
23425  // type as the input type.
23426  MVT EltVT = VT.getVectorElementType();
23427  MVT ShVT = MVT::getVectorVT(EltVT, 128 / EltVT.getSizeInBits());
23428
23429  ShAmt = DAG.getBitcast(ShVT, ShAmt);
23430  return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
23431}
23432
23433/// Return Mask with the necessary casting or extending
23434/// for \p Mask according to \p MaskVT when lowering masking intrinsics
23435static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
23436                           const X86Subtarget &Subtarget, SelectionDAG &DAG,
23437                           const SDLoc &dl) {
23438
23439  if (isAllOnesConstant(Mask))
23440    return DAG.getConstant(1, dl, MaskVT);
23441  if (X86::isZeroNode(Mask))
23442    return DAG.getConstant(0, dl, MaskVT);
23443
23444  assert(MaskVT.bitsLE(Mask.getSimpleValueType()) && "Unexpected mask size!");
23445
23446  if (Mask.getSimpleValueType() == MVT::i64 && Subtarget.is32Bit()) {
23447    assert(MaskVT == MVT::v64i1 && "Expected v64i1 mask!");
23448    assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
23449    // In case 32bit mode, bitcast i64 is illegal, extend/split it.
23450    SDValue Lo, Hi;
23451    Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
23452                        DAG.getConstant(0, dl, MVT::i32));
23453    Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
23454                        DAG.getConstant(1, dl, MVT::i32));
23455
23456    Lo = DAG.getBitcast(MVT::v32i1, Lo);
23457    Hi = DAG.getBitcast(MVT::v32i1, Hi);
23458
23459    return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
23460  } else {
23461    MVT BitcastVT = MVT::getVectorVT(MVT::i1,
23462                                     Mask.getSimpleValueType().getSizeInBits());
23463    // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
23464    // are extracted by EXTRACT_SUBVECTOR.
23465    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
23466                       DAG.getBitcast(BitcastVT, Mask),
23467                       DAG.getIntPtrConstant(0, dl));
23468  }
23469}
23470
23471/// Return (and \p Op, \p Mask) for compare instructions or
23472/// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
23473/// necessary casting or extending for \p Mask when lowering masking intrinsics
23474static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
23475                  SDValue PreservedSrc,
23476                  const X86Subtarget &Subtarget,
23477                  SelectionDAG &DAG) {
23478  MVT VT = Op.getSimpleValueType();
23479  MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
23480  unsigned OpcodeSelect = ISD::VSELECT;
23481  SDLoc dl(Op);
23482
23483  if (isAllOnesConstant(Mask))
23484    return Op;
23485
23486  SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
23487
23488  if (PreservedSrc.isUndef())
23489    PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
23490  return DAG.getNode(OpcodeSelect, dl, VT, VMask, Op, PreservedSrc);
23491}
23492
23493/// Creates an SDNode for a predicated scalar operation.
23494/// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
23495/// The mask is coming as MVT::i8 and it should be transformed
23496/// to MVT::v1i1 while lowering masking intrinsics.
23497/// The main difference between ScalarMaskingNode and VectorMaskingNode is using
23498/// "X86select" instead of "vselect". We just can't create the "vselect" node
23499/// for a scalar instruction.
23500static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
23501                                    SDValue PreservedSrc,
23502                                    const X86Subtarget &Subtarget,
23503                                    SelectionDAG &DAG) {
23504
23505  if (auto *MaskConst = dyn_cast<ConstantSDNode>(Mask))
23506    if (MaskConst->getZExtValue() & 0x1)
23507      return Op;
23508
23509  MVT VT = Op.getSimpleValueType();
23510  SDLoc dl(Op);
23511
23512  assert(Mask.getValueType() == MVT::i8 && "Unexpect type");
23513  SDValue IMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v1i1,
23514                              DAG.getBitcast(MVT::v8i1, Mask),
23515                              DAG.getIntPtrConstant(0, dl));
23516  if (Op.getOpcode() == X86ISD::FSETCCM ||
23517      Op.getOpcode() == X86ISD::FSETCCM_SAE ||
23518      Op.getOpcode() == X86ISD::VFPCLASSS)
23519    return DAG.getNode(ISD::AND, dl, VT, Op, IMask);
23520
23521  if (PreservedSrc.isUndef())
23522    PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
23523  return DAG.getNode(X86ISD::SELECTS, dl, VT, IMask, Op, PreservedSrc);
23524}
23525
23526static int getSEHRegistrationNodeSize(const Function *Fn) {
23527  if (!Fn->hasPersonalityFn())
23528    report_fatal_error(
23529        "querying registration node size for function without personality");
23530  // The RegNodeSize is 6 32-bit words for SEH and 4 for C++ EH. See
23531  // WinEHStatePass for the full struct definition.
23532  switch (classifyEHPersonality(Fn->getPersonalityFn())) {
23533  case EHPersonality::MSVC_X86SEH: return 24;
23534  case EHPersonality::MSVC_CXX: return 16;
23535  default: break;
23536  }
23537  report_fatal_error(
23538      "can only recover FP for 32-bit MSVC EH personality functions");
23539}
23540
23541/// When the MSVC runtime transfers control to us, either to an outlined
23542/// function or when returning to a parent frame after catching an exception, we
23543/// recover the parent frame pointer by doing arithmetic on the incoming EBP.
23544/// Here's the math:
23545///   RegNodeBase = EntryEBP - RegNodeSize
23546///   ParentFP = RegNodeBase - ParentFrameOffset
23547/// Subtracting RegNodeSize takes us to the offset of the registration node, and
23548/// subtracting the offset (negative on x86) takes us back to the parent FP.
23549static SDValue recoverFramePointer(SelectionDAG &DAG, const Function *Fn,
23550                                   SDValue EntryEBP) {
23551  MachineFunction &MF = DAG.getMachineFunction();
23552  SDLoc dl;
23553
23554  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23555  MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
23556
23557  // It's possible that the parent function no longer has a personality function
23558  // if the exceptional code was optimized away, in which case we just return
23559  // the incoming EBP.
23560  if (!Fn->hasPersonalityFn())
23561    return EntryEBP;
23562
23563  // Get an MCSymbol that will ultimately resolve to the frame offset of the EH
23564  // registration, or the .set_setframe offset.
23565  MCSymbol *OffsetSym =
23566      MF.getMMI().getContext().getOrCreateParentFrameOffsetSymbol(
23567          GlobalValue::dropLLVMManglingEscape(Fn->getName()));
23568  SDValue OffsetSymVal = DAG.getMCSymbol(OffsetSym, PtrVT);
23569  SDValue ParentFrameOffset =
23570      DAG.getNode(ISD::LOCAL_RECOVER, dl, PtrVT, OffsetSymVal);
23571
23572  // Return EntryEBP + ParentFrameOffset for x64. This adjusts from RSP after
23573  // prologue to RBP in the parent function.
23574  const X86Subtarget &Subtarget =
23575      static_cast<const X86Subtarget &>(DAG.getSubtarget());
23576  if (Subtarget.is64Bit())
23577    return DAG.getNode(ISD::ADD, dl, PtrVT, EntryEBP, ParentFrameOffset);
23578
23579  int RegNodeSize = getSEHRegistrationNodeSize(Fn);
23580  // RegNodeBase = EntryEBP - RegNodeSize
23581  // ParentFP = RegNodeBase - ParentFrameOffset
23582  SDValue RegNodeBase = DAG.getNode(ISD::SUB, dl, PtrVT, EntryEBP,
23583                                    DAG.getConstant(RegNodeSize, dl, PtrVT));
23584  return DAG.getNode(ISD::SUB, dl, PtrVT, RegNodeBase, ParentFrameOffset);
23585}
23586
23587SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
23588                                                   SelectionDAG &DAG) const {
23589  // Helper to detect if the operand is CUR_DIRECTION rounding mode.
23590  auto isRoundModeCurDirection = [](SDValue Rnd) {
23591    if (auto *C = dyn_cast<ConstantSDNode>(Rnd))
23592      return C->getAPIntValue() == X86::STATIC_ROUNDING::CUR_DIRECTION;
23593
23594    return false;
23595  };
23596  auto isRoundModeSAE = [](SDValue Rnd) {
23597    if (auto *C = dyn_cast<ConstantSDNode>(Rnd)) {
23598      unsigned RC = C->getZExtValue();
23599      if (RC & X86::STATIC_ROUNDING::NO_EXC) {
23600        // Clear the NO_EXC bit and check remaining bits.
23601        RC ^= X86::STATIC_ROUNDING::NO_EXC;
23602        // As a convenience we allow no other bits or explicitly
23603        // current direction.
23604        return RC == 0 || RC == X86::STATIC_ROUNDING::CUR_DIRECTION;
23605      }
23606    }
23607
23608    return false;
23609  };
23610  auto isRoundModeSAEToX = [](SDValue Rnd, unsigned &RC) {
23611    if (auto *C = dyn_cast<ConstantSDNode>(Rnd)) {
23612      RC = C->getZExtValue();
23613      if (RC & X86::STATIC_ROUNDING::NO_EXC) {
23614        // Clear the NO_EXC bit and check remaining bits.
23615        RC ^= X86::STATIC_ROUNDING::NO_EXC;
23616        return RC == X86::STATIC_ROUNDING::TO_NEAREST_INT ||
23617               RC == X86::STATIC_ROUNDING::TO_NEG_INF ||
23618               RC == X86::STATIC_ROUNDING::TO_POS_INF ||
23619               RC == X86::STATIC_ROUNDING::TO_ZERO;
23620      }
23621    }
23622
23623    return false;
23624  };
23625
23626  SDLoc dl(Op);
23627  unsigned IntNo = Op.getConstantOperandVal(0);
23628  MVT VT = Op.getSimpleValueType();
23629  const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
23630
23631  if (IntrData) {
23632    switch(IntrData->Type) {
23633    case INTR_TYPE_1OP: {
23634      // We specify 2 possible opcodes for intrinsics with rounding modes.
23635      // First, we check if the intrinsic may have non-default rounding mode,
23636      // (IntrData->Opc1 != 0), then we check the rounding mode operand.
23637      unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
23638      if (IntrWithRoundingModeOpcode != 0) {
23639        SDValue Rnd = Op.getOperand(2);
23640        unsigned RC = 0;
23641        if (isRoundModeSAEToX(Rnd, RC))
23642          return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
23643                             Op.getOperand(1),
23644                             DAG.getTargetConstant(RC, dl, MVT::i32));
23645        if (!isRoundModeCurDirection(Rnd))
23646          return SDValue();
23647      }
23648      return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
23649                         Op.getOperand(1));
23650    }
23651    case INTR_TYPE_1OP_SAE: {
23652      SDValue Sae = Op.getOperand(2);
23653
23654      unsigned Opc;
23655      if (isRoundModeCurDirection(Sae))
23656        Opc = IntrData->Opc0;
23657      else if (isRoundModeSAE(Sae))
23658        Opc = IntrData->Opc1;
23659      else
23660        return SDValue();
23661
23662      return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1));
23663    }
23664    case INTR_TYPE_2OP: {
23665      SDValue Src2 = Op.getOperand(2);
23666
23667      // We specify 2 possible opcodes for intrinsics with rounding modes.
23668      // First, we check if the intrinsic may have non-default rounding mode,
23669      // (IntrData->Opc1 != 0), then we check the rounding mode operand.
23670      unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
23671      if (IntrWithRoundingModeOpcode != 0) {
23672        SDValue Rnd = Op.getOperand(3);
23673        unsigned RC = 0;
23674        if (isRoundModeSAEToX(Rnd, RC))
23675          return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
23676                             Op.getOperand(1), Src2,
23677                             DAG.getTargetConstant(RC, dl, MVT::i32));
23678        if (!isRoundModeCurDirection(Rnd))
23679          return SDValue();
23680      }
23681
23682      return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
23683                         Op.getOperand(1), Src2);
23684    }
23685    case INTR_TYPE_2OP_SAE: {
23686      SDValue Sae = Op.getOperand(3);
23687
23688      unsigned Opc;
23689      if (isRoundModeCurDirection(Sae))
23690        Opc = IntrData->Opc0;
23691      else if (isRoundModeSAE(Sae))
23692        Opc = IntrData->Opc1;
23693      else
23694        return SDValue();
23695
23696      return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1),
23697                         Op.getOperand(2));
23698    }
23699    case INTR_TYPE_3OP:
23700    case INTR_TYPE_3OP_IMM8: {
23701      SDValue Src1 = Op.getOperand(1);
23702      SDValue Src2 = Op.getOperand(2);
23703      SDValue Src3 = Op.getOperand(3);
23704
23705      // We specify 2 possible opcodes for intrinsics with rounding modes.
23706      // First, we check if the intrinsic may have non-default rounding mode,
23707      // (IntrData->Opc1 != 0), then we check the rounding mode operand.
23708      unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
23709      if (IntrWithRoundingModeOpcode != 0) {
23710        SDValue Rnd = Op.getOperand(4);
23711        unsigned RC = 0;
23712        if (isRoundModeSAEToX(Rnd, RC))
23713          return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
23714                             Src1, Src2, Src3,
23715                             DAG.getTargetConstant(RC, dl, MVT::i32));
23716        if (!isRoundModeCurDirection(Rnd))
23717          return SDValue();
23718      }
23719
23720      return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
23721                         {Src1, Src2, Src3});
23722    }
23723    case INTR_TYPE_4OP:
23724      return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
23725        Op.getOperand(2), Op.getOperand(3), Op.getOperand(4));
23726    case INTR_TYPE_1OP_MASK: {
23727      SDValue Src = Op.getOperand(1);
23728      SDValue PassThru = Op.getOperand(2);
23729      SDValue Mask = Op.getOperand(3);
23730      // We add rounding mode to the Node when
23731      //   - RC Opcode is specified and
23732      //   - RC is not "current direction".
23733      unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
23734      if (IntrWithRoundingModeOpcode != 0) {
23735        SDValue Rnd = Op.getOperand(4);
23736        unsigned RC = 0;
23737        if (isRoundModeSAEToX(Rnd, RC))
23738          return getVectorMaskingNode(
23739              DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
23740                          Src, DAG.getTargetConstant(RC, dl, MVT::i32)),
23741              Mask, PassThru, Subtarget, DAG);
23742        if (!isRoundModeCurDirection(Rnd))
23743          return SDValue();
23744      }
23745      return getVectorMaskingNode(
23746          DAG.getNode(IntrData->Opc0, dl, VT, Src), Mask, PassThru,
23747          Subtarget, DAG);
23748    }
23749    case INTR_TYPE_1OP_MASK_SAE: {
23750      SDValue Src = Op.getOperand(1);
23751      SDValue PassThru = Op.getOperand(2);
23752      SDValue Mask = Op.getOperand(3);
23753      SDValue Rnd = Op.getOperand(4);
23754
23755      unsigned Opc;
23756      if (isRoundModeCurDirection(Rnd))
23757        Opc = IntrData->Opc0;
23758      else if (isRoundModeSAE(Rnd))
23759        Opc = IntrData->Opc1;
23760      else
23761        return SDValue();
23762
23763      return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src), Mask, PassThru,
23764                                  Subtarget, DAG);
23765    }
23766    case INTR_TYPE_SCALAR_MASK: {
23767      SDValue Src1 = Op.getOperand(1);
23768      SDValue Src2 = Op.getOperand(2);
23769      SDValue passThru = Op.getOperand(3);
23770      SDValue Mask = Op.getOperand(4);
23771      unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
23772      // There are 2 kinds of intrinsics in this group:
23773      // (1) With suppress-all-exceptions (sae) or rounding mode- 6 operands
23774      // (2) With rounding mode and sae - 7 operands.
23775      bool HasRounding = IntrWithRoundingModeOpcode != 0;
23776      if (Op.getNumOperands() == (5U + HasRounding)) {
23777        if (HasRounding) {
23778          SDValue Rnd = Op.getOperand(5);
23779          unsigned RC = 0;
23780          if (isRoundModeSAEToX(Rnd, RC))
23781            return getScalarMaskingNode(
23782                DAG.getNode(IntrWithRoundingModeOpcode, dl, VT, Src1, Src2,
23783                            DAG.getTargetConstant(RC, dl, MVT::i32)),
23784                Mask, passThru, Subtarget, DAG);
23785          if (!isRoundModeCurDirection(Rnd))
23786            return SDValue();
23787        }
23788        return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1,
23789                                                Src2),
23790                                    Mask, passThru, Subtarget, DAG);
23791      }
23792
23793      assert(Op.getNumOperands() == (6U + HasRounding) &&
23794             "Unexpected intrinsic form");
23795      SDValue RoundingMode = Op.getOperand(5);
23796      unsigned Opc = IntrData->Opc0;
23797      if (HasRounding) {
23798        SDValue Sae = Op.getOperand(6);
23799        if (isRoundModeSAE(Sae))
23800          Opc = IntrWithRoundingModeOpcode;
23801        else if (!isRoundModeCurDirection(Sae))
23802          return SDValue();
23803      }
23804      return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1,
23805                                              Src2, RoundingMode),
23806                                  Mask, passThru, Subtarget, DAG);
23807    }
23808    case INTR_TYPE_SCALAR_MASK_RND: {
23809      SDValue Src1 = Op.getOperand(1);
23810      SDValue Src2 = Op.getOperand(2);
23811      SDValue passThru = Op.getOperand(3);
23812      SDValue Mask = Op.getOperand(4);
23813      SDValue Rnd = Op.getOperand(5);
23814
23815      SDValue NewOp;
23816      unsigned RC = 0;
23817      if (isRoundModeCurDirection(Rnd))
23818        NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
23819      else if (isRoundModeSAEToX(Rnd, RC))
23820        NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
23821                            DAG.getTargetConstant(RC, dl, MVT::i32));
23822      else
23823        return SDValue();
23824
23825      return getScalarMaskingNode(NewOp, Mask, passThru, Subtarget, DAG);
23826    }
23827    case INTR_TYPE_SCALAR_MASK_SAE: {
23828      SDValue Src1 = Op.getOperand(1);
23829      SDValue Src2 = Op.getOperand(2);
23830      SDValue passThru = Op.getOperand(3);
23831      SDValue Mask = Op.getOperand(4);
23832      SDValue Sae = Op.getOperand(5);
23833      unsigned Opc;
23834      if (isRoundModeCurDirection(Sae))
23835        Opc = IntrData->Opc0;
23836      else if (isRoundModeSAE(Sae))
23837        Opc = IntrData->Opc1;
23838      else
23839        return SDValue();
23840
23841      return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
23842                                  Mask, passThru, Subtarget, DAG);
23843    }
23844    case INTR_TYPE_2OP_MASK: {
23845      SDValue Src1 = Op.getOperand(1);
23846      SDValue Src2 = Op.getOperand(2);
23847      SDValue PassThru = Op.getOperand(3);
23848      SDValue Mask = Op.getOperand(4);
23849      SDValue NewOp;
23850      if (IntrData->Opc1 != 0) {
23851        SDValue Rnd = Op.getOperand(5);
23852        unsigned RC = 0;
23853        if (isRoundModeSAEToX(Rnd, RC))
23854          NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
23855                              DAG.getTargetConstant(RC, dl, MVT::i32));
23856        else if (!isRoundModeCurDirection(Rnd))
23857          return SDValue();
23858      }
23859      if (!NewOp)
23860        NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
23861      return getVectorMaskingNode(NewOp, Mask, PassThru, Subtarget, DAG);
23862    }
23863    case INTR_TYPE_2OP_MASK_SAE: {
23864      SDValue Src1 = Op.getOperand(1);
23865      SDValue Src2 = Op.getOperand(2);
23866      SDValue PassThru = Op.getOperand(3);
23867      SDValue Mask = Op.getOperand(4);
23868
23869      unsigned Opc = IntrData->Opc0;
23870      if (IntrData->Opc1 != 0) {
23871        SDValue Sae = Op.getOperand(5);
23872        if (isRoundModeSAE(Sae))
23873          Opc = IntrData->Opc1;
23874        else if (!isRoundModeCurDirection(Sae))
23875          return SDValue();
23876      }
23877
23878      return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
23879                                  Mask, PassThru, Subtarget, DAG);
23880    }
23881    case INTR_TYPE_3OP_SCALAR_MASK_SAE: {
23882      SDValue Src1 = Op.getOperand(1);
23883      SDValue Src2 = Op.getOperand(2);
23884      SDValue Src3 = Op.getOperand(3);
23885      SDValue PassThru = Op.getOperand(4);
23886      SDValue Mask = Op.getOperand(5);
23887      SDValue Sae = Op.getOperand(6);
23888      unsigned Opc;
23889      if (isRoundModeCurDirection(Sae))
23890        Opc = IntrData->Opc0;
23891      else if (isRoundModeSAE(Sae))
23892        Opc = IntrData->Opc1;
23893      else
23894        return SDValue();
23895
23896      return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
23897                                  Mask, PassThru, Subtarget, DAG);
23898    }
23899    case INTR_TYPE_3OP_MASK_SAE: {
23900      SDValue Src1 = Op.getOperand(1);
23901      SDValue Src2 = Op.getOperand(2);
23902      SDValue Src3 = Op.getOperand(3);
23903      SDValue PassThru = Op.getOperand(4);
23904      SDValue Mask = Op.getOperand(5);
23905
23906      unsigned Opc = IntrData->Opc0;
23907      if (IntrData->Opc1 != 0) {
23908        SDValue Sae = Op.getOperand(6);
23909        if (isRoundModeSAE(Sae))
23910          Opc = IntrData->Opc1;
23911        else if (!isRoundModeCurDirection(Sae))
23912          return SDValue();
23913      }
23914      return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
23915                                  Mask, PassThru, Subtarget, DAG);
23916    }
23917    case BLENDV: {
23918      SDValue Src1 = Op.getOperand(1);
23919      SDValue Src2 = Op.getOperand(2);
23920      SDValue Src3 = Op.getOperand(3);
23921
23922      EVT MaskVT = Src3.getValueType().changeVectorElementTypeToInteger();
23923      Src3 = DAG.getBitcast(MaskVT, Src3);
23924
23925      // Reverse the operands to match VSELECT order.
23926      return DAG.getNode(IntrData->Opc0, dl, VT, Src3, Src2, Src1);
23927    }
23928    case VPERM_2OP : {
23929      SDValue Src1 = Op.getOperand(1);
23930      SDValue Src2 = Op.getOperand(2);
23931
23932      // Swap Src1 and Src2 in the node creation
23933      return DAG.getNode(IntrData->Opc0, dl, VT,Src2, Src1);
23934    }
23935    case IFMA_OP:
23936      // NOTE: We need to swizzle the operands to pass the multiply operands
23937      // first.
23938      return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
23939                         Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
23940    case FPCLASSS: {
23941      SDValue Src1 = Op.getOperand(1);
23942      SDValue Imm = Op.getOperand(2);
23943      SDValue Mask = Op.getOperand(3);
23944      SDValue FPclass = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Imm);
23945      SDValue FPclassMask = getScalarMaskingNode(FPclass, Mask, SDValue(),
23946                                                 Subtarget, DAG);
23947      // Need to fill with zeros to ensure the bitcast will produce zeroes
23948      // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
23949      SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
23950                                DAG.getConstant(0, dl, MVT::v8i1),
23951                                FPclassMask, DAG.getIntPtrConstant(0, dl));
23952      return DAG.getBitcast(MVT::i8, Ins);
23953    }
23954
23955    case CMP_MASK_CC: {
23956      MVT MaskVT = Op.getSimpleValueType();
23957      SDValue CC = Op.getOperand(3);
23958      // We specify 2 possible opcodes for intrinsics with rounding modes.
23959      // First, we check if the intrinsic may have non-default rounding mode,
23960      // (IntrData->Opc1 != 0), then we check the rounding mode operand.
23961      if (IntrData->Opc1 != 0) {
23962        SDValue Sae = Op.getOperand(4);
23963        if (isRoundModeSAE(Sae))
23964          return DAG.getNode(IntrData->Opc1, dl, MaskVT, Op.getOperand(1),
23965                             Op.getOperand(2), CC, Sae);
23966        if (!isRoundModeCurDirection(Sae))
23967          return SDValue();
23968      }
23969      //default rounding mode
23970      return DAG.getNode(IntrData->Opc0, dl, MaskVT,
23971                         {Op.getOperand(1), Op.getOperand(2), CC});
23972    }
23973    case CMP_MASK_SCALAR_CC: {
23974      SDValue Src1 = Op.getOperand(1);
23975      SDValue Src2 = Op.getOperand(2);
23976      SDValue CC = Op.getOperand(3);
23977      SDValue Mask = Op.getOperand(4);
23978
23979      SDValue Cmp;
23980      if (IntrData->Opc1 != 0) {
23981        SDValue Sae = Op.getOperand(5);
23982        if (isRoundModeSAE(Sae))
23983          Cmp = DAG.getNode(IntrData->Opc1, dl, MVT::v1i1, Src1, Src2, CC, Sae);
23984        else if (!isRoundModeCurDirection(Sae))
23985          return SDValue();
23986      }
23987      //default rounding mode
23988      if (!Cmp.getNode())
23989        Cmp = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Src2, CC);
23990
23991      SDValue CmpMask = getScalarMaskingNode(Cmp, Mask, SDValue(),
23992                                             Subtarget, DAG);
23993      // Need to fill with zeros to ensure the bitcast will produce zeroes
23994      // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
23995      SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
23996                                DAG.getConstant(0, dl, MVT::v8i1),
23997                                CmpMask, DAG.getIntPtrConstant(0, dl));
23998      return DAG.getBitcast(MVT::i8, Ins);
23999    }
24000    case COMI: { // Comparison intrinsics
24001      ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
24002      SDValue LHS = Op.getOperand(1);
24003      SDValue RHS = Op.getOperand(2);
24004      SDValue Comi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
24005      SDValue InvComi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, RHS, LHS);
24006      SDValue SetCC;
24007      switch (CC) {
24008      case ISD::SETEQ: { // (ZF = 0 and PF = 0)
24009        SetCC = getSETCC(X86::COND_E, Comi, dl, DAG);
24010        SDValue SetNP = getSETCC(X86::COND_NP, Comi, dl, DAG);
24011        SetCC = DAG.getNode(ISD::AND, dl, MVT::i8, SetCC, SetNP);
24012        break;
24013      }
24014      case ISD::SETNE: { // (ZF = 1 or PF = 1)
24015        SetCC = getSETCC(X86::COND_NE, Comi, dl, DAG);
24016        SDValue SetP = getSETCC(X86::COND_P, Comi, dl, DAG);
24017        SetCC = DAG.getNode(ISD::OR, dl, MVT::i8, SetCC, SetP);
24018        break;
24019      }
24020      case ISD::SETGT: // (CF = 0 and ZF = 0)
24021        SetCC = getSETCC(X86::COND_A, Comi, dl, DAG);
24022        break;
24023      case ISD::SETLT: { // The condition is opposite to GT. Swap the operands.
24024        SetCC = getSETCC(X86::COND_A, InvComi, dl, DAG);
24025        break;
24026      }
24027      case ISD::SETGE: // CF = 0
24028        SetCC = getSETCC(X86::COND_AE, Comi, dl, DAG);
24029        break;
24030      case ISD::SETLE: // The condition is opposite to GE. Swap the operands.
24031        SetCC = getSETCC(X86::COND_AE, InvComi, dl, DAG);
24032        break;
24033      default:
24034        llvm_unreachable("Unexpected illegal condition!");
24035      }
24036      return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
24037    }
24038    case COMI_RM: { // Comparison intrinsics with Sae
24039      SDValue LHS = Op.getOperand(1);
24040      SDValue RHS = Op.getOperand(2);
24041      unsigned CondVal = Op.getConstantOperandVal(3);
24042      SDValue Sae = Op.getOperand(4);
24043
24044      SDValue FCmp;
24045      if (isRoundModeCurDirection(Sae))
24046        FCmp = DAG.getNode(X86ISD::FSETCCM, dl, MVT::v1i1, LHS, RHS,
24047                           DAG.getTargetConstant(CondVal, dl, MVT::i8));
24048      else if (isRoundModeSAE(Sae))
24049        FCmp = DAG.getNode(X86ISD::FSETCCM_SAE, dl, MVT::v1i1, LHS, RHS,
24050                           DAG.getTargetConstant(CondVal, dl, MVT::i8), Sae);
24051      else
24052        return SDValue();
24053      // Need to fill with zeros to ensure the bitcast will produce zeroes
24054      // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
24055      SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
24056                                DAG.getConstant(0, dl, MVT::v16i1),
24057                                FCmp, DAG.getIntPtrConstant(0, dl));
24058      return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32,
24059                         DAG.getBitcast(MVT::i16, Ins));
24060    }
24061    case VSHIFT:
24062      return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
24063                                 Op.getOperand(1), Op.getOperand(2), Subtarget,
24064                                 DAG);
24065    case COMPRESS_EXPAND_IN_REG: {
24066      SDValue Mask = Op.getOperand(3);
24067      SDValue DataToCompress = Op.getOperand(1);
24068      SDValue PassThru = Op.getOperand(2);
24069      if (ISD::isBuildVectorAllOnes(Mask.getNode())) // return data as is
24070        return Op.getOperand(1);
24071
24072      // Avoid false dependency.
24073      if (PassThru.isUndef())
24074        PassThru = DAG.getConstant(0, dl, VT);
24075
24076      return DAG.getNode(IntrData->Opc0, dl, VT, DataToCompress, PassThru,
24077                         Mask);
24078    }
24079    case FIXUPIMM:
24080    case FIXUPIMM_MASKZ: {
24081      SDValue Src1 = Op.getOperand(1);
24082      SDValue Src2 = Op.getOperand(2);
24083      SDValue Src3 = Op.getOperand(3);
24084      SDValue Imm = Op.getOperand(4);
24085      SDValue Mask = Op.getOperand(5);
24086      SDValue Passthru = (IntrData->Type == FIXUPIMM)
24087                             ? Src1
24088                             : getZeroVector(VT, Subtarget, DAG, dl);
24089
24090      unsigned Opc = IntrData->Opc0;
24091      if (IntrData->Opc1 != 0) {
24092        SDValue Sae = Op.getOperand(6);
24093        if (isRoundModeSAE(Sae))
24094          Opc = IntrData->Opc1;
24095        else if (!isRoundModeCurDirection(Sae))
24096          return SDValue();
24097      }
24098
24099      SDValue FixupImm = DAG.getNode(Opc, dl, VT, Src1, Src2, Src3, Imm);
24100
24101      if (Opc == X86ISD::VFIXUPIMM || Opc == X86ISD::VFIXUPIMM_SAE)
24102        return getVectorMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
24103
24104      return getScalarMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
24105    }
24106    case ROUNDP: {
24107      assert(IntrData->Opc0 == X86ISD::VRNDSCALE && "Unexpected opcode");
24108      // Clear the upper bits of the rounding immediate so that the legacy
24109      // intrinsic can't trigger the scaling behavior of VRNDSCALE.
24110      auto Round = cast<ConstantSDNode>(Op.getOperand(2));
24111      SDValue RoundingMode =
24112          DAG.getTargetConstant(Round->getZExtValue() & 0xf, dl, MVT::i32);
24113      return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
24114                         Op.getOperand(1), RoundingMode);
24115    }
24116    case ROUNDS: {
24117      assert(IntrData->Opc0 == X86ISD::VRNDSCALES && "Unexpected opcode");
24118      // Clear the upper bits of the rounding immediate so that the legacy
24119      // intrinsic can't trigger the scaling behavior of VRNDSCALE.
24120      auto Round = cast<ConstantSDNode>(Op.getOperand(3));
24121      SDValue RoundingMode =
24122          DAG.getTargetConstant(Round->getZExtValue() & 0xf, dl, MVT::i32);
24123      return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
24124                         Op.getOperand(1), Op.getOperand(2), RoundingMode);
24125    }
24126    case BEXTRI: {
24127      assert(IntrData->Opc0 == X86ISD::BEXTR && "Unexpected opcode");
24128
24129      // The control is a TargetConstant, but we need to convert it to a
24130      // ConstantSDNode.
24131      uint64_t Imm = Op.getConstantOperandVal(2);
24132      SDValue Control = DAG.getConstant(Imm, dl, Op.getValueType());
24133      return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
24134                         Op.getOperand(1), Control);
24135    }
24136    // ADC/ADCX/SBB
24137    case ADX: {
24138      SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::i32);
24139      SDVTList VTs = DAG.getVTList(Op.getOperand(2).getValueType(), MVT::i32);
24140
24141      SDValue Res;
24142      // If the carry in is zero, then we should just use ADD/SUB instead of
24143      // ADC/SBB.
24144      if (isNullConstant(Op.getOperand(1))) {
24145        Res = DAG.getNode(IntrData->Opc1, dl, VTs, Op.getOperand(2),
24146                          Op.getOperand(3));
24147      } else {
24148        SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(1),
24149                                    DAG.getConstant(-1, dl, MVT::i8));
24150        Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(2),
24151                          Op.getOperand(3), GenCF.getValue(1));
24152      }
24153      SDValue SetCC = getSETCC(X86::COND_B, Res.getValue(1), dl, DAG);
24154      SDValue Results[] = { SetCC, Res };
24155      return DAG.getMergeValues(Results, dl);
24156    }
24157    case CVTPD2PS_MASK:
24158    case CVTPD2DQ_MASK:
24159    case CVTQQ2PS_MASK:
24160    case TRUNCATE_TO_REG: {
24161      SDValue Src = Op.getOperand(1);
24162      SDValue PassThru = Op.getOperand(2);
24163      SDValue Mask = Op.getOperand(3);
24164
24165      if (isAllOnesConstant(Mask))
24166        return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
24167
24168      MVT SrcVT = Src.getSimpleValueType();
24169      MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
24170      Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
24171      return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(),
24172                         {Src, PassThru, Mask});
24173    }
24174    case CVTPS2PH_MASK: {
24175      SDValue Src = Op.getOperand(1);
24176      SDValue Rnd = Op.getOperand(2);
24177      SDValue PassThru = Op.getOperand(3);
24178      SDValue Mask = Op.getOperand(4);
24179
24180      if (isAllOnesConstant(Mask))
24181        return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src, Rnd);
24182
24183      MVT SrcVT = Src.getSimpleValueType();
24184      MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
24185      Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
24186      return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, Rnd,
24187                         PassThru, Mask);
24188
24189    }
24190    case CVTNEPS2BF16_MASK: {
24191      SDValue Src = Op.getOperand(1);
24192      SDValue PassThru = Op.getOperand(2);
24193      SDValue Mask = Op.getOperand(3);
24194
24195      if (ISD::isBuildVectorAllOnes(Mask.getNode()))
24196        return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
24197
24198      // Break false dependency.
24199      if (PassThru.isUndef())
24200        PassThru = DAG.getConstant(0, dl, PassThru.getValueType());
24201
24202      return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, PassThru,
24203                         Mask);
24204    }
24205    default:
24206      break;
24207    }
24208  }
24209
24210  switch (IntNo) {
24211  default: return SDValue();    // Don't custom lower most intrinsics.
24212
24213  // ptest and testp intrinsics. The intrinsic these come from are designed to
24214  // return an integer value, not just an instruction so lower it to the ptest
24215  // or testp pattern and a setcc for the result.
24216  case Intrinsic::x86_avx512_ktestc_b:
24217  case Intrinsic::x86_avx512_ktestc_w:
24218  case Intrinsic::x86_avx512_ktestc_d:
24219  case Intrinsic::x86_avx512_ktestc_q:
24220  case Intrinsic::x86_avx512_ktestz_b:
24221  case Intrinsic::x86_avx512_ktestz_w:
24222  case Intrinsic::x86_avx512_ktestz_d:
24223  case Intrinsic::x86_avx512_ktestz_q:
24224  case Intrinsic::x86_sse41_ptestz:
24225  case Intrinsic::x86_sse41_ptestc:
24226  case Intrinsic::x86_sse41_ptestnzc:
24227  case Intrinsic::x86_avx_ptestz_256:
24228  case Intrinsic::x86_avx_ptestc_256:
24229  case Intrinsic::x86_avx_ptestnzc_256:
24230  case Intrinsic::x86_avx_vtestz_ps:
24231  case Intrinsic::x86_avx_vtestc_ps:
24232  case Intrinsic::x86_avx_vtestnzc_ps:
24233  case Intrinsic::x86_avx_vtestz_pd:
24234  case Intrinsic::x86_avx_vtestc_pd:
24235  case Intrinsic::x86_avx_vtestnzc_pd:
24236  case Intrinsic::x86_avx_vtestz_ps_256:
24237  case Intrinsic::x86_avx_vtestc_ps_256:
24238  case Intrinsic::x86_avx_vtestnzc_ps_256:
24239  case Intrinsic::x86_avx_vtestz_pd_256:
24240  case Intrinsic::x86_avx_vtestc_pd_256:
24241  case Intrinsic::x86_avx_vtestnzc_pd_256: {
24242    unsigned TestOpc = X86ISD::PTEST;
24243    X86::CondCode X86CC;
24244    switch (IntNo) {
24245    default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
24246    case Intrinsic::x86_avx512_ktestc_b:
24247    case Intrinsic::x86_avx512_ktestc_w:
24248    case Intrinsic::x86_avx512_ktestc_d:
24249    case Intrinsic::x86_avx512_ktestc_q:
24250      // CF = 1
24251      TestOpc = X86ISD::KTEST;
24252      X86CC = X86::COND_B;
24253      break;
24254    case Intrinsic::x86_avx512_ktestz_b:
24255    case Intrinsic::x86_avx512_ktestz_w:
24256    case Intrinsic::x86_avx512_ktestz_d:
24257    case Intrinsic::x86_avx512_ktestz_q:
24258      TestOpc = X86ISD::KTEST;
24259      X86CC = X86::COND_E;
24260      break;
24261    case Intrinsic::x86_avx_vtestz_ps:
24262    case Intrinsic::x86_avx_vtestz_pd:
24263    case Intrinsic::x86_avx_vtestz_ps_256:
24264    case Intrinsic::x86_avx_vtestz_pd_256:
24265      TestOpc = X86ISD::TESTP;
24266      LLVM_FALLTHROUGH;
24267    case Intrinsic::x86_sse41_ptestz:
24268    case Intrinsic::x86_avx_ptestz_256:
24269      // ZF = 1
24270      X86CC = X86::COND_E;
24271      break;
24272    case Intrinsic::x86_avx_vtestc_ps:
24273    case Intrinsic::x86_avx_vtestc_pd:
24274    case Intrinsic::x86_avx_vtestc_ps_256:
24275    case Intrinsic::x86_avx_vtestc_pd_256:
24276      TestOpc = X86ISD::TESTP;
24277      LLVM_FALLTHROUGH;
24278    case Intrinsic::x86_sse41_ptestc:
24279    case Intrinsic::x86_avx_ptestc_256:
24280      // CF = 1
24281      X86CC = X86::COND_B;
24282      break;
24283    case Intrinsic::x86_avx_vtestnzc_ps:
24284    case Intrinsic::x86_avx_vtestnzc_pd:
24285    case Intrinsic::x86_avx_vtestnzc_ps_256:
24286    case Intrinsic::x86_avx_vtestnzc_pd_256:
24287      TestOpc = X86ISD::TESTP;
24288      LLVM_FALLTHROUGH;
24289    case Intrinsic::x86_sse41_ptestnzc:
24290    case Intrinsic::x86_avx_ptestnzc_256:
24291      // ZF and CF = 0
24292      X86CC = X86::COND_A;
24293      break;
24294    }
24295
24296    SDValue LHS = Op.getOperand(1);
24297    SDValue RHS = Op.getOperand(2);
24298    SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
24299    SDValue SetCC = getSETCC(X86CC, Test, dl, DAG);
24300    return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
24301  }
24302
24303  case Intrinsic::x86_sse42_pcmpistria128:
24304  case Intrinsic::x86_sse42_pcmpestria128:
24305  case Intrinsic::x86_sse42_pcmpistric128:
24306  case Intrinsic::x86_sse42_pcmpestric128:
24307  case Intrinsic::x86_sse42_pcmpistrio128:
24308  case Intrinsic::x86_sse42_pcmpestrio128:
24309  case Intrinsic::x86_sse42_pcmpistris128:
24310  case Intrinsic::x86_sse42_pcmpestris128:
24311  case Intrinsic::x86_sse42_pcmpistriz128:
24312  case Intrinsic::x86_sse42_pcmpestriz128: {
24313    unsigned Opcode;
24314    X86::CondCode X86CC;
24315    switch (IntNo) {
24316    default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
24317    case Intrinsic::x86_sse42_pcmpistria128:
24318      Opcode = X86ISD::PCMPISTR;
24319      X86CC = X86::COND_A;
24320      break;
24321    case Intrinsic::x86_sse42_pcmpestria128:
24322      Opcode = X86ISD::PCMPESTR;
24323      X86CC = X86::COND_A;
24324      break;
24325    case Intrinsic::x86_sse42_pcmpistric128:
24326      Opcode = X86ISD::PCMPISTR;
24327      X86CC = X86::COND_B;
24328      break;
24329    case Intrinsic::x86_sse42_pcmpestric128:
24330      Opcode = X86ISD::PCMPESTR;
24331      X86CC = X86::COND_B;
24332      break;
24333    case Intrinsic::x86_sse42_pcmpistrio128:
24334      Opcode = X86ISD::PCMPISTR;
24335      X86CC = X86::COND_O;
24336      break;
24337    case Intrinsic::x86_sse42_pcmpestrio128:
24338      Opcode = X86ISD::PCMPESTR;
24339      X86CC = X86::COND_O;
24340      break;
24341    case Intrinsic::x86_sse42_pcmpistris128:
24342      Opcode = X86ISD::PCMPISTR;
24343      X86CC = X86::COND_S;
24344      break;
24345    case Intrinsic::x86_sse42_pcmpestris128:
24346      Opcode = X86ISD::PCMPESTR;
24347      X86CC = X86::COND_S;
24348      break;
24349    case Intrinsic::x86_sse42_pcmpistriz128:
24350      Opcode = X86ISD::PCMPISTR;
24351      X86CC = X86::COND_E;
24352      break;
24353    case Intrinsic::x86_sse42_pcmpestriz128:
24354      Opcode = X86ISD::PCMPESTR;
24355      X86CC = X86::COND_E;
24356      break;
24357    }
24358    SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
24359    SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
24360    SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps).getValue(2);
24361    SDValue SetCC = getSETCC(X86CC, PCMP, dl, DAG);
24362    return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
24363  }
24364
24365  case Intrinsic::x86_sse42_pcmpistri128:
24366  case Intrinsic::x86_sse42_pcmpestri128: {
24367    unsigned Opcode;
24368    if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
24369      Opcode = X86ISD::PCMPISTR;
24370    else
24371      Opcode = X86ISD::PCMPESTR;
24372
24373    SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
24374    SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
24375    return DAG.getNode(Opcode, dl, VTs, NewOps);
24376  }
24377
24378  case Intrinsic::x86_sse42_pcmpistrm128:
24379  case Intrinsic::x86_sse42_pcmpestrm128: {
24380    unsigned Opcode;
24381    if (IntNo == Intrinsic::x86_sse42_pcmpistrm128)
24382      Opcode = X86ISD::PCMPISTR;
24383    else
24384      Opcode = X86ISD::PCMPESTR;
24385
24386    SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
24387    SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
24388    return DAG.getNode(Opcode, dl, VTs, NewOps).getValue(1);
24389  }
24390
24391  case Intrinsic::eh_sjlj_lsda: {
24392    MachineFunction &MF = DAG.getMachineFunction();
24393    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24394    MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
24395    auto &Context = MF.getMMI().getContext();
24396    MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
24397                                            Twine(MF.getFunctionNumber()));
24398    return DAG.getNode(getGlobalWrapperKind(), dl, VT,
24399                       DAG.getMCSymbol(S, PtrVT));
24400  }
24401
24402  case Intrinsic::x86_seh_lsda: {
24403    // Compute the symbol for the LSDA. We know it'll get emitted later.
24404    MachineFunction &MF = DAG.getMachineFunction();
24405    SDValue Op1 = Op.getOperand(1);
24406    auto *Fn = cast<Function>(cast<GlobalAddressSDNode>(Op1)->getGlobal());
24407    MCSymbol *LSDASym = MF.getMMI().getContext().getOrCreateLSDASymbol(
24408        GlobalValue::dropLLVMManglingEscape(Fn->getName()));
24409
24410    // Generate a simple absolute symbol reference. This intrinsic is only
24411    // supported on 32-bit Windows, which isn't PIC.
24412    SDValue Result = DAG.getMCSymbol(LSDASym, VT);
24413    return DAG.getNode(X86ISD::Wrapper, dl, VT, Result);
24414  }
24415
24416  case Intrinsic::eh_recoverfp: {
24417    SDValue FnOp = Op.getOperand(1);
24418    SDValue IncomingFPOp = Op.getOperand(2);
24419    GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(FnOp);
24420    auto *Fn = dyn_cast_or_null<Function>(GSD ? GSD->getGlobal() : nullptr);
24421    if (!Fn)
24422      report_fatal_error(
24423          "llvm.eh.recoverfp must take a function as the first argument");
24424    return recoverFramePointer(DAG, Fn, IncomingFPOp);
24425  }
24426
24427  case Intrinsic::localaddress: {
24428    // Returns one of the stack, base, or frame pointer registers, depending on
24429    // which is used to reference local variables.
24430    MachineFunction &MF = DAG.getMachineFunction();
24431    const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
24432    unsigned Reg;
24433    if (RegInfo->hasBasePointer(MF))
24434      Reg = RegInfo->getBaseRegister();
24435    else { // Handles the SP or FP case.
24436      bool CantUseFP = RegInfo->needsStackRealignment(MF);
24437      if (CantUseFP)
24438        Reg = RegInfo->getPtrSizedStackRegister(MF);
24439      else
24440        Reg = RegInfo->getPtrSizedFrameRegister(MF);
24441    }
24442    return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
24443  }
24444
24445  case Intrinsic::x86_avx512_vp2intersect_q_512:
24446  case Intrinsic::x86_avx512_vp2intersect_q_256:
24447  case Intrinsic::x86_avx512_vp2intersect_q_128:
24448  case Intrinsic::x86_avx512_vp2intersect_d_512:
24449  case Intrinsic::x86_avx512_vp2intersect_d_256:
24450  case Intrinsic::x86_avx512_vp2intersect_d_128: {
24451    MVT MaskVT = Op.getSimpleValueType();
24452
24453    SDVTList VTs = DAG.getVTList(MVT::Untyped, MVT::Other);
24454    SDLoc DL(Op);
24455
24456    SDValue Operation =
24457        DAG.getNode(X86ISD::VP2INTERSECT, DL, VTs,
24458                    Op->getOperand(1), Op->getOperand(2));
24459
24460    SDValue Result0 = DAG.getTargetExtractSubreg(X86::sub_mask_0, DL,
24461                                                 MaskVT, Operation);
24462    SDValue Result1 = DAG.getTargetExtractSubreg(X86::sub_mask_1, DL,
24463                                                 MaskVT, Operation);
24464    return DAG.getMergeValues({Result0, Result1}, DL);
24465  }
24466  case Intrinsic::x86_mmx_pslli_w:
24467  case Intrinsic::x86_mmx_pslli_d:
24468  case Intrinsic::x86_mmx_pslli_q:
24469  case Intrinsic::x86_mmx_psrli_w:
24470  case Intrinsic::x86_mmx_psrli_d:
24471  case Intrinsic::x86_mmx_psrli_q:
24472  case Intrinsic::x86_mmx_psrai_w:
24473  case Intrinsic::x86_mmx_psrai_d: {
24474    SDLoc DL(Op);
24475    SDValue ShAmt = Op.getOperand(2);
24476    // If the argument is a constant, convert it to a target constant.
24477    if (auto *C = dyn_cast<ConstantSDNode>(ShAmt)) {
24478      // Clamp out of bounds shift amounts since they will otherwise be masked
24479      // to 8-bits which may make it no longer out of bounds.
24480      unsigned ShiftAmount = C->getAPIntValue().getLimitedValue(255);
24481      return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
24482                         Op.getOperand(0), Op.getOperand(1),
24483                         DAG.getTargetConstant(ShiftAmount, DL, MVT::i32));
24484    }
24485
24486    unsigned NewIntrinsic;
24487    switch (IntNo) {
24488    default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
24489    case Intrinsic::x86_mmx_pslli_w:
24490      NewIntrinsic = Intrinsic::x86_mmx_psll_w;
24491      break;
24492    case Intrinsic::x86_mmx_pslli_d:
24493      NewIntrinsic = Intrinsic::x86_mmx_psll_d;
24494      break;
24495    case Intrinsic::x86_mmx_pslli_q:
24496      NewIntrinsic = Intrinsic::x86_mmx_psll_q;
24497      break;
24498    case Intrinsic::x86_mmx_psrli_w:
24499      NewIntrinsic = Intrinsic::x86_mmx_psrl_w;
24500      break;
24501    case Intrinsic::x86_mmx_psrli_d:
24502      NewIntrinsic = Intrinsic::x86_mmx_psrl_d;
24503      break;
24504    case Intrinsic::x86_mmx_psrli_q:
24505      NewIntrinsic = Intrinsic::x86_mmx_psrl_q;
24506      break;
24507    case Intrinsic::x86_mmx_psrai_w:
24508      NewIntrinsic = Intrinsic::x86_mmx_psra_w;
24509      break;
24510    case Intrinsic::x86_mmx_psrai_d:
24511      NewIntrinsic = Intrinsic::x86_mmx_psra_d;
24512      break;
24513    }
24514
24515    // The vector shift intrinsics with scalars uses 32b shift amounts but
24516    // the sse2/mmx shift instructions reads 64 bits. Copy the 32 bits to an
24517    // MMX register.
24518    ShAmt = DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, ShAmt);
24519    return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
24520                       DAG.getConstant(NewIntrinsic, DL, MVT::i32),
24521                       Op.getOperand(1), ShAmt);
24522
24523  }
24524  }
24525}
24526
24527static SDValue getAVX2GatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
24528                                 SDValue Src, SDValue Mask, SDValue Base,
24529                                 SDValue Index, SDValue ScaleOp, SDValue Chain,
24530                                 const X86Subtarget &Subtarget) {
24531  SDLoc dl(Op);
24532  auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
24533  // Scale must be constant.
24534  if (!C)
24535    return SDValue();
24536  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24537  SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
24538                                        TLI.getPointerTy(DAG.getDataLayout()));
24539  EVT MaskVT = Mask.getValueType().changeVectorElementTypeToInteger();
24540  SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
24541  // If source is undef or we know it won't be used, use a zero vector
24542  // to break register dependency.
24543  // TODO: use undef instead and let BreakFalseDeps deal with it?
24544  if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
24545    Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
24546
24547  MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
24548
24549  SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
24550  SDValue Res = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
24551    VTs, Ops, dl, MemIntr->getMemoryVT(), MemIntr->getMemOperand());
24552  return DAG.getMergeValues({ Res, Res.getValue(2) }, dl);
24553}
24554
24555static SDValue getGatherNode(SDValue Op, SelectionDAG &DAG,
24556                             SDValue Src, SDValue Mask, SDValue Base,
24557                             SDValue Index, SDValue ScaleOp, SDValue Chain,
24558                             const X86Subtarget &Subtarget) {
24559  MVT VT = Op.getSimpleValueType();
24560  SDLoc dl(Op);
24561  auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
24562  // Scale must be constant.
24563  if (!C)
24564    return SDValue();
24565  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24566  SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
24567                                        TLI.getPointerTy(DAG.getDataLayout()));
24568  unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
24569                              VT.getVectorNumElements());
24570  MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
24571
24572  // We support two versions of the gather intrinsics. One with scalar mask and
24573  // one with vXi1 mask. Convert scalar to vXi1 if necessary.
24574  if (Mask.getValueType() != MaskVT)
24575    Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
24576
24577  SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
24578  // If source is undef or we know it won't be used, use a zero vector
24579  // to break register dependency.
24580  // TODO: use undef instead and let BreakFalseDeps deal with it?
24581  if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
24582    Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
24583
24584  MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
24585
24586  SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
24587  SDValue Res = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
24588    VTs, Ops, dl, MemIntr->getMemoryVT(), MemIntr->getMemOperand());
24589  return DAG.getMergeValues({ Res, Res.getValue(2) }, dl);
24590}
24591
24592static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
24593                               SDValue Src, SDValue Mask, SDValue Base,
24594                               SDValue Index, SDValue ScaleOp, SDValue Chain,
24595                               const X86Subtarget &Subtarget) {
24596  SDLoc dl(Op);
24597  auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
24598  // Scale must be constant.
24599  if (!C)
24600    return SDValue();
24601  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24602  SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
24603                                        TLI.getPointerTy(DAG.getDataLayout()));
24604  unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
24605                              Src.getSimpleValueType().getVectorNumElements());
24606  MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
24607
24608  // We support two versions of the scatter intrinsics. One with scalar mask and
24609  // one with vXi1 mask. Convert scalar to vXi1 if necessary.
24610  if (Mask.getValueType() != MaskVT)
24611    Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
24612
24613  MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
24614
24615  SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
24616  SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale};
24617  SDValue Res = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>(
24618      VTs, Ops, dl, MemIntr->getMemoryVT(), MemIntr->getMemOperand());
24619  return Res.getValue(1);
24620}
24621
24622static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
24623                               SDValue Mask, SDValue Base, SDValue Index,
24624                               SDValue ScaleOp, SDValue Chain,
24625                               const X86Subtarget &Subtarget) {
24626  SDLoc dl(Op);
24627  auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
24628  // Scale must be constant.
24629  if (!C)
24630    return SDValue();
24631  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24632  SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
24633                                        TLI.getPointerTy(DAG.getDataLayout()));
24634  SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
24635  SDValue Segment = DAG.getRegister(0, MVT::i32);
24636  MVT MaskVT =
24637    MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
24638  SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
24639  SDValue Ops[] = {VMask, Base, Scale, Index, Disp, Segment, Chain};
24640  SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
24641  return SDValue(Res, 0);
24642}
24643
24644/// Handles the lowering of builtin intrinsics with chain that return their
24645/// value into registers EDX:EAX.
24646/// If operand ScrReg is a valid register identifier, then operand 2 of N is
24647/// copied to SrcReg. The assumption is that SrcReg is an implicit input to
24648/// TargetOpcode.
24649/// Returns a Glue value which can be used to add extra copy-from-reg if the
24650/// expanded intrinsics implicitly defines extra registers (i.e. not just
24651/// EDX:EAX).
24652static SDValue expandIntrinsicWChainHelper(SDNode *N, const SDLoc &DL,
24653                                        SelectionDAG &DAG,
24654                                        unsigned TargetOpcode,
24655                                        unsigned SrcReg,
24656                                        const X86Subtarget &Subtarget,
24657                                        SmallVectorImpl<SDValue> &Results) {
24658  SDValue Chain = N->getOperand(0);
24659  SDValue Glue;
24660
24661  if (SrcReg) {
24662    assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
24663    Chain = DAG.getCopyToReg(Chain, DL, SrcReg, N->getOperand(2), Glue);
24664    Glue = Chain.getValue(1);
24665  }
24666
24667  SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
24668  SDValue N1Ops[] = {Chain, Glue};
24669  SDNode *N1 = DAG.getMachineNode(
24670      TargetOpcode, DL, Tys, ArrayRef<SDValue>(N1Ops, Glue.getNode() ? 2 : 1));
24671  Chain = SDValue(N1, 0);
24672
24673  // Reads the content of XCR and returns it in registers EDX:EAX.
24674  SDValue LO, HI;
24675  if (Subtarget.is64Bit()) {
24676    LO = DAG.getCopyFromReg(Chain, DL, X86::RAX, MVT::i64, SDValue(N1, 1));
24677    HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
24678                            LO.getValue(2));
24679  } else {
24680    LO = DAG.getCopyFromReg(Chain, DL, X86::EAX, MVT::i32, SDValue(N1, 1));
24681    HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
24682                            LO.getValue(2));
24683  }
24684  Chain = HI.getValue(1);
24685  Glue = HI.getValue(2);
24686
24687  if (Subtarget.is64Bit()) {
24688    // Merge the two 32-bit values into a 64-bit one.
24689    SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
24690                              DAG.getConstant(32, DL, MVT::i8));
24691    Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
24692    Results.push_back(Chain);
24693    return Glue;
24694  }
24695
24696  // Use a buildpair to merge the two 32-bit values into a 64-bit one.
24697  SDValue Ops[] = { LO, HI };
24698  SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
24699  Results.push_back(Pair);
24700  Results.push_back(Chain);
24701  return Glue;
24702}
24703
24704/// Handles the lowering of builtin intrinsics that read the time stamp counter
24705/// (x86_rdtsc and x86_rdtscp). This function is also used to custom lower
24706/// READCYCLECOUNTER nodes.
24707static void getReadTimeStampCounter(SDNode *N, const SDLoc &DL, unsigned Opcode,
24708                                    SelectionDAG &DAG,
24709                                    const X86Subtarget &Subtarget,
24710                                    SmallVectorImpl<SDValue> &Results) {
24711  // The processor's time-stamp counter (a 64-bit MSR) is stored into the
24712  // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
24713  // and the EAX register is loaded with the low-order 32 bits.
24714  SDValue Glue = expandIntrinsicWChainHelper(N, DL, DAG, Opcode,
24715                                             /* NoRegister */0, Subtarget,
24716                                             Results);
24717  if (Opcode != X86::RDTSCP)
24718    return;
24719
24720  SDValue Chain = Results[1];
24721  // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
24722  // the ECX register. Add 'ecx' explicitly to the chain.
24723  SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32, Glue);
24724  Results[1] = ecx;
24725  Results.push_back(ecx.getValue(1));
24726}
24727
24728static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget &Subtarget,
24729                                     SelectionDAG &DAG) {
24730  SmallVector<SDValue, 3> Results;
24731  SDLoc DL(Op);
24732  getReadTimeStampCounter(Op.getNode(), DL, X86::RDTSC, DAG, Subtarget,
24733                          Results);
24734  return DAG.getMergeValues(Results, DL);
24735}
24736
24737static SDValue MarkEHRegistrationNode(SDValue Op, SelectionDAG &DAG) {
24738  MachineFunction &MF = DAG.getMachineFunction();
24739  SDValue Chain = Op.getOperand(0);
24740  SDValue RegNode = Op.getOperand(2);
24741  WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
24742  if (!EHInfo)
24743    report_fatal_error("EH registrations only live in functions using WinEH");
24744
24745  // Cast the operand to an alloca, and remember the frame index.
24746  auto *FINode = dyn_cast<FrameIndexSDNode>(RegNode);
24747  if (!FINode)
24748    report_fatal_error("llvm.x86.seh.ehregnode expects a static alloca");
24749  EHInfo->EHRegNodeFrameIndex = FINode->getIndex();
24750
24751  // Return the chain operand without making any DAG nodes.
24752  return Chain;
24753}
24754
24755static SDValue MarkEHGuard(SDValue Op, SelectionDAG &DAG) {
24756  MachineFunction &MF = DAG.getMachineFunction();
24757  SDValue Chain = Op.getOperand(0);
24758  SDValue EHGuard = Op.getOperand(2);
24759  WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
24760  if (!EHInfo)
24761    report_fatal_error("EHGuard only live in functions using WinEH");
24762
24763  // Cast the operand to an alloca, and remember the frame index.
24764  auto *FINode = dyn_cast<FrameIndexSDNode>(EHGuard);
24765  if (!FINode)
24766    report_fatal_error("llvm.x86.seh.ehguard expects a static alloca");
24767  EHInfo->EHGuardFrameIndex = FINode->getIndex();
24768
24769  // Return the chain operand without making any DAG nodes.
24770  return Chain;
24771}
24772
24773/// Emit Truncating Store with signed or unsigned saturation.
24774static SDValue
24775EmitTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl, SDValue Val,
24776                SDValue Ptr, EVT MemVT, MachineMemOperand *MMO,
24777                SelectionDAG &DAG) {
24778
24779  SDVTList VTs = DAG.getVTList(MVT::Other);
24780  SDValue Undef = DAG.getUNDEF(Ptr.getValueType());
24781  SDValue Ops[] = { Chain, Val, Ptr, Undef };
24782  return SignedSat ?
24783    DAG.getTargetMemSDNode<TruncSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO) :
24784    DAG.getTargetMemSDNode<TruncUSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO);
24785}
24786
24787/// Emit Masked Truncating Store with signed or unsigned saturation.
24788static SDValue
24789EmitMaskedTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl,
24790                      SDValue Val, SDValue Ptr, SDValue Mask, EVT MemVT,
24791                      MachineMemOperand *MMO, SelectionDAG &DAG) {
24792
24793  SDVTList VTs = DAG.getVTList(MVT::Other);
24794  SDValue Ops[] = { Chain, Val, Ptr, Mask };
24795  return SignedSat ?
24796    DAG.getTargetMemSDNode<MaskedTruncSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO) :
24797    DAG.getTargetMemSDNode<MaskedTruncUSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO);
24798}
24799
24800static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
24801                                      SelectionDAG &DAG) {
24802  unsigned IntNo = Op.getConstantOperandVal(1);
24803  const IntrinsicData *IntrData = getIntrinsicWithChain(IntNo);
24804  if (!IntrData) {
24805    switch (IntNo) {
24806    case llvm::Intrinsic::x86_seh_ehregnode:
24807      return MarkEHRegistrationNode(Op, DAG);
24808    case llvm::Intrinsic::x86_seh_ehguard:
24809      return MarkEHGuard(Op, DAG);
24810    case llvm::Intrinsic::x86_rdpkru: {
24811      SDLoc dl(Op);
24812      SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
24813      // Create a RDPKRU node and pass 0 to the ECX parameter.
24814      return DAG.getNode(X86ISD::RDPKRU, dl, VTs, Op.getOperand(0),
24815                         DAG.getConstant(0, dl, MVT::i32));
24816    }
24817    case llvm::Intrinsic::x86_wrpkru: {
24818      SDLoc dl(Op);
24819      // Create a WRPKRU node, pass the input to the EAX parameter,  and pass 0
24820      // to the EDX and ECX parameters.
24821      return DAG.getNode(X86ISD::WRPKRU, dl, MVT::Other,
24822                         Op.getOperand(0), Op.getOperand(2),
24823                         DAG.getConstant(0, dl, MVT::i32),
24824                         DAG.getConstant(0, dl, MVT::i32));
24825    }
24826    case llvm::Intrinsic::x86_flags_read_u32:
24827    case llvm::Intrinsic::x86_flags_read_u64:
24828    case llvm::Intrinsic::x86_flags_write_u32:
24829    case llvm::Intrinsic::x86_flags_write_u64: {
24830      // We need a frame pointer because this will get lowered to a PUSH/POP
24831      // sequence.
24832      MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
24833      MFI.setHasCopyImplyingStackAdjustment(true);
24834      // Don't do anything here, we will expand these intrinsics out later
24835      // during FinalizeISel in EmitInstrWithCustomInserter.
24836      return Op;
24837    }
24838    case Intrinsic::x86_lwpins32:
24839    case Intrinsic::x86_lwpins64:
24840    case Intrinsic::x86_umwait:
24841    case Intrinsic::x86_tpause: {
24842      SDLoc dl(Op);
24843      SDValue Chain = Op->getOperand(0);
24844      SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
24845      unsigned Opcode;
24846
24847      switch (IntNo) {
24848      default: llvm_unreachable("Impossible intrinsic");
24849      case Intrinsic::x86_umwait:
24850        Opcode = X86ISD::UMWAIT;
24851        break;
24852      case Intrinsic::x86_tpause:
24853        Opcode = X86ISD::TPAUSE;
24854        break;
24855      case Intrinsic::x86_lwpins32:
24856      case Intrinsic::x86_lwpins64:
24857        Opcode = X86ISD::LWPINS;
24858        break;
24859      }
24860
24861      SDValue Operation =
24862          DAG.getNode(Opcode, dl, VTs, Chain, Op->getOperand(2),
24863                      Op->getOperand(3), Op->getOperand(4));
24864      SDValue SetCC = getSETCC(X86::COND_B, Operation.getValue(0), dl, DAG);
24865      return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
24866                         Operation.getValue(1));
24867    }
24868    case Intrinsic::x86_enqcmd:
24869    case Intrinsic::x86_enqcmds: {
24870      SDLoc dl(Op);
24871      SDValue Chain = Op.getOperand(0);
24872      SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
24873      unsigned Opcode;
24874      switch (IntNo) {
24875      default: llvm_unreachable("Impossible intrinsic!");
24876      case Intrinsic::x86_enqcmd:
24877        Opcode = X86ISD::ENQCMD;
24878        break;
24879      case Intrinsic::x86_enqcmds:
24880        Opcode = X86ISD::ENQCMDS;
24881        break;
24882      }
24883      SDValue Operation = DAG.getNode(Opcode, dl, VTs, Chain, Op.getOperand(2),
24884                                      Op.getOperand(3));
24885      SDValue SetCC = getSETCC(X86::COND_E, Operation.getValue(0), dl, DAG);
24886      return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
24887                         Operation.getValue(1));
24888    }
24889    }
24890    return SDValue();
24891  }
24892
24893  SDLoc dl(Op);
24894  switch(IntrData->Type) {
24895  default: llvm_unreachable("Unknown Intrinsic Type");
24896  case RDSEED:
24897  case RDRAND: {
24898    // Emit the node with the right value type.
24899    SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::i32, MVT::Other);
24900    SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
24901
24902    // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
24903    // Otherwise return the value from Rand, which is always 0, casted to i32.
24904    SDValue Ops[] = {DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
24905                     DAG.getConstant(1, dl, Op->getValueType(1)),
24906                     DAG.getTargetConstant(X86::COND_B, dl, MVT::i8),
24907                     SDValue(Result.getNode(), 1)};
24908    SDValue isValid = DAG.getNode(X86ISD::CMOV, dl, Op->getValueType(1), Ops);
24909
24910    // Return { result, isValid, chain }.
24911    return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
24912                       SDValue(Result.getNode(), 2));
24913  }
24914  case GATHER_AVX2: {
24915    SDValue Chain = Op.getOperand(0);
24916    SDValue Src   = Op.getOperand(2);
24917    SDValue Base  = Op.getOperand(3);
24918    SDValue Index = Op.getOperand(4);
24919    SDValue Mask  = Op.getOperand(5);
24920    SDValue Scale = Op.getOperand(6);
24921    return getAVX2GatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
24922                             Scale, Chain, Subtarget);
24923  }
24924  case GATHER: {
24925  //gather(v1, mask, index, base, scale);
24926    SDValue Chain = Op.getOperand(0);
24927    SDValue Src   = Op.getOperand(2);
24928    SDValue Base  = Op.getOperand(3);
24929    SDValue Index = Op.getOperand(4);
24930    SDValue Mask  = Op.getOperand(5);
24931    SDValue Scale = Op.getOperand(6);
24932    return getGatherNode(Op, DAG, Src, Mask, Base, Index, Scale,
24933                         Chain, Subtarget);
24934  }
24935  case SCATTER: {
24936  //scatter(base, mask, index, v1, scale);
24937    SDValue Chain = Op.getOperand(0);
24938    SDValue Base  = Op.getOperand(2);
24939    SDValue Mask  = Op.getOperand(3);
24940    SDValue Index = Op.getOperand(4);
24941    SDValue Src   = Op.getOperand(5);
24942    SDValue Scale = Op.getOperand(6);
24943    return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
24944                          Scale, Chain, Subtarget);
24945  }
24946  case PREFETCH: {
24947    const APInt &HintVal = Op.getConstantOperandAPInt(6);
24948    assert((HintVal == 2 || HintVal == 3) &&
24949           "Wrong prefetch hint in intrinsic: should be 2 or 3");
24950    unsigned Opcode = (HintVal == 2 ? IntrData->Opc1 : IntrData->Opc0);
24951    SDValue Chain = Op.getOperand(0);
24952    SDValue Mask  = Op.getOperand(2);
24953    SDValue Index = Op.getOperand(3);
24954    SDValue Base  = Op.getOperand(4);
24955    SDValue Scale = Op.getOperand(5);
24956    return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain,
24957                           Subtarget);
24958  }
24959  // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
24960  case RDTSC: {
24961    SmallVector<SDValue, 2> Results;
24962    getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget,
24963                            Results);
24964    return DAG.getMergeValues(Results, dl);
24965  }
24966  // Read Performance Monitoring Counters.
24967  case RDPMC:
24968  // GetExtended Control Register.
24969  case XGETBV: {
24970    SmallVector<SDValue, 2> Results;
24971
24972    // RDPMC uses ECX to select the index of the performance counter to read.
24973    // XGETBV uses ECX to select the index of the XCR register to return.
24974    // The result is stored into registers EDX:EAX.
24975    expandIntrinsicWChainHelper(Op.getNode(), dl, DAG, IntrData->Opc0, X86::ECX,
24976                                Subtarget, Results);
24977    return DAG.getMergeValues(Results, dl);
24978  }
24979  // XTEST intrinsics.
24980  case XTEST: {
24981    SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
24982    SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
24983
24984    SDValue SetCC = getSETCC(X86::COND_NE, InTrans, dl, DAG);
24985    SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
24986    return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
24987                       Ret, SDValue(InTrans.getNode(), 1));
24988  }
24989  case TRUNCATE_TO_MEM_VI8:
24990  case TRUNCATE_TO_MEM_VI16:
24991  case TRUNCATE_TO_MEM_VI32: {
24992    SDValue Mask = Op.getOperand(4);
24993    SDValue DataToTruncate = Op.getOperand(3);
24994    SDValue Addr = Op.getOperand(2);
24995    SDValue Chain = Op.getOperand(0);
24996
24997    MemIntrinsicSDNode *MemIntr = dyn_cast<MemIntrinsicSDNode>(Op);
24998    assert(MemIntr && "Expected MemIntrinsicSDNode!");
24999
25000    EVT MemVT  = MemIntr->getMemoryVT();
25001
25002    uint16_t TruncationOp = IntrData->Opc0;
25003    switch (TruncationOp) {
25004    case X86ISD::VTRUNC: {
25005      if (isAllOnesConstant(Mask)) // return just a truncate store
25006        return DAG.getTruncStore(Chain, dl, DataToTruncate, Addr, MemVT,
25007                                 MemIntr->getMemOperand());
25008
25009      MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
25010      SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
25011      SDValue Offset = DAG.getUNDEF(VMask.getValueType());
25012
25013      return DAG.getMaskedStore(Chain, dl, DataToTruncate, Addr, Offset, VMask,
25014                                MemVT, MemIntr->getMemOperand(), ISD::UNINDEXED,
25015                                true /* truncating */);
25016    }
25017    case X86ISD::VTRUNCUS:
25018    case X86ISD::VTRUNCS: {
25019      bool IsSigned = (TruncationOp == X86ISD::VTRUNCS);
25020      if (isAllOnesConstant(Mask))
25021        return EmitTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr, MemVT,
25022                               MemIntr->getMemOperand(), DAG);
25023
25024      MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
25025      SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
25026
25027      return EmitMaskedTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr,
25028                                   VMask, MemVT, MemIntr->getMemOperand(), DAG);
25029    }
25030    default:
25031      llvm_unreachable("Unsupported truncstore intrinsic");
25032    }
25033  }
25034  }
25035}
25036
25037SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
25038                                           SelectionDAG &DAG) const {
25039  MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
25040  MFI.setReturnAddressIsTaken(true);
25041
25042  if (verifyReturnAddressArgumentIsConstant(Op, DAG))
25043    return SDValue();
25044
25045  unsigned Depth = Op.getConstantOperandVal(0);
25046  SDLoc dl(Op);
25047  EVT PtrVT = getPointerTy(DAG.getDataLayout());
25048
25049  if (Depth > 0) {
25050    SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
25051    const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
25052    SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), dl, PtrVT);
25053    return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
25054                       DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
25055                       MachinePointerInfo());
25056  }
25057
25058  // Just load the return address.
25059  SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
25060  return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
25061                     MachinePointerInfo());
25062}
25063
25064SDValue X86TargetLowering::LowerADDROFRETURNADDR(SDValue Op,
25065                                                 SelectionDAG &DAG) const {
25066  DAG.getMachineFunction().getFrameInfo().setReturnAddressIsTaken(true);
25067  return getReturnAddressFrameIndex(DAG);
25068}
25069
25070SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
25071  MachineFunction &MF = DAG.getMachineFunction();
25072  MachineFrameInfo &MFI = MF.getFrameInfo();
25073  X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
25074  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
25075  EVT VT = Op.getValueType();
25076
25077  MFI.setFrameAddressIsTaken(true);
25078
25079  if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
25080    // Depth > 0 makes no sense on targets which use Windows unwind codes.  It
25081    // is not possible to crawl up the stack without looking at the unwind codes
25082    // simultaneously.
25083    int FrameAddrIndex = FuncInfo->getFAIndex();
25084    if (!FrameAddrIndex) {
25085      // Set up a frame object for the return address.
25086      unsigned SlotSize = RegInfo->getSlotSize();
25087      FrameAddrIndex = MF.getFrameInfo().CreateFixedObject(
25088          SlotSize, /*SPOffset=*/0, /*IsImmutable=*/false);
25089      FuncInfo->setFAIndex(FrameAddrIndex);
25090    }
25091    return DAG.getFrameIndex(FrameAddrIndex, VT);
25092  }
25093
25094  unsigned FrameReg =
25095      RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
25096  SDLoc dl(Op);  // FIXME probably not meaningful
25097  unsigned Depth = Op.getConstantOperandVal(0);
25098  assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
25099          (FrameReg == X86::EBP && VT == MVT::i32)) &&
25100         "Invalid Frame Register!");
25101  SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
25102  while (Depth--)
25103    FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
25104                            MachinePointerInfo());
25105  return FrameAddr;
25106}
25107
25108// FIXME? Maybe this could be a TableGen attribute on some registers and
25109// this table could be generated automatically from RegInfo.
25110Register X86TargetLowering::getRegisterByName(const char* RegName, LLT VT,
25111                                              const MachineFunction &MF) const {
25112  const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
25113
25114  Register Reg = StringSwitch<unsigned>(RegName)
25115                       .Case("esp", X86::ESP)
25116                       .Case("rsp", X86::RSP)
25117                       .Case("ebp", X86::EBP)
25118                       .Case("rbp", X86::RBP)
25119                       .Default(0);
25120
25121  if (Reg == X86::EBP || Reg == X86::RBP) {
25122    if (!TFI.hasFP(MF))
25123      report_fatal_error("register " + StringRef(RegName) +
25124                         " is allocatable: function has no frame pointer");
25125#ifndef NDEBUG
25126    else {
25127      const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
25128      Register FrameReg = RegInfo->getPtrSizedFrameRegister(MF);
25129      assert((FrameReg == X86::EBP || FrameReg == X86::RBP) &&
25130             "Invalid Frame Register!");
25131    }
25132#endif
25133  }
25134
25135  if (Reg)
25136    return Reg;
25137
25138  report_fatal_error("Invalid register name global variable");
25139}
25140
25141SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
25142                                                     SelectionDAG &DAG) const {
25143  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
25144  return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize(), SDLoc(Op));
25145}
25146
25147unsigned X86TargetLowering::getExceptionPointerRegister(
25148    const Constant *PersonalityFn) const {
25149  if (classifyEHPersonality(PersonalityFn) == EHPersonality::CoreCLR)
25150    return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
25151
25152  return Subtarget.isTarget64BitLP64() ? X86::RAX : X86::EAX;
25153}
25154
25155unsigned X86TargetLowering::getExceptionSelectorRegister(
25156    const Constant *PersonalityFn) const {
25157  // Funclet personalities don't use selectors (the runtime does the selection).
25158  assert(!isFuncletEHPersonality(classifyEHPersonality(PersonalityFn)));
25159  return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
25160}
25161
25162bool X86TargetLowering::needsFixedCatchObjects() const {
25163  return Subtarget.isTargetWin64();
25164}
25165
25166SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
25167  SDValue Chain     = Op.getOperand(0);
25168  SDValue Offset    = Op.getOperand(1);
25169  SDValue Handler   = Op.getOperand(2);
25170  SDLoc dl      (Op);
25171
25172  EVT PtrVT = getPointerTy(DAG.getDataLayout());
25173  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
25174  Register FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
25175  assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
25176          (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
25177         "Invalid Frame Register!");
25178  SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
25179  unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
25180
25181  SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
25182                                 DAG.getIntPtrConstant(RegInfo->getSlotSize(),
25183                                                       dl));
25184  StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
25185  Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo());
25186  Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
25187
25188  return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
25189                     DAG.getRegister(StoreAddrReg, PtrVT));
25190}
25191
25192SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
25193                                               SelectionDAG &DAG) const {
25194  SDLoc DL(Op);
25195  // If the subtarget is not 64bit, we may need the global base reg
25196  // after isel expand pseudo, i.e., after CGBR pass ran.
25197  // Therefore, ask for the GlobalBaseReg now, so that the pass
25198  // inserts the code for us in case we need it.
25199  // Otherwise, we will end up in a situation where we will
25200  // reference a virtual register that is not defined!
25201  if (!Subtarget.is64Bit()) {
25202    const X86InstrInfo *TII = Subtarget.getInstrInfo();
25203    (void)TII->getGlobalBaseReg(&DAG.getMachineFunction());
25204  }
25205  return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
25206                     DAG.getVTList(MVT::i32, MVT::Other),
25207                     Op.getOperand(0), Op.getOperand(1));
25208}
25209
25210SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
25211                                                SelectionDAG &DAG) const {
25212  SDLoc DL(Op);
25213  return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
25214                     Op.getOperand(0), Op.getOperand(1));
25215}
25216
25217SDValue X86TargetLowering::lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
25218                                                       SelectionDAG &DAG) const {
25219  SDLoc DL(Op);
25220  return DAG.getNode(X86ISD::EH_SJLJ_SETUP_DISPATCH, DL, MVT::Other,
25221                     Op.getOperand(0));
25222}
25223
25224static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
25225  return Op.getOperand(0);
25226}
25227
25228SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
25229                                                SelectionDAG &DAG) const {
25230  SDValue Root = Op.getOperand(0);
25231  SDValue Trmp = Op.getOperand(1); // trampoline
25232  SDValue FPtr = Op.getOperand(2); // nested function
25233  SDValue Nest = Op.getOperand(3); // 'nest' parameter value
25234  SDLoc dl (Op);
25235
25236  const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
25237  const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
25238
25239  if (Subtarget.is64Bit()) {
25240    SDValue OutChains[6];
25241
25242    // Large code-model.
25243    const unsigned char JMP64r  = 0xFF; // 64-bit jmp through register opcode.
25244    const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
25245
25246    const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
25247    const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
25248
25249    const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
25250
25251    // Load the pointer to the nested function into R11.
25252    unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
25253    SDValue Addr = Trmp;
25254    OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
25255                                Addr, MachinePointerInfo(TrmpAddr));
25256
25257    Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
25258                       DAG.getConstant(2, dl, MVT::i64));
25259    OutChains[1] =
25260        DAG.getStore(Root, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 2),
25261                     /* Alignment = */ 2);
25262
25263    // Load the 'nest' parameter value into R10.
25264    // R10 is specified in X86CallingConv.td
25265    OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
25266    Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
25267                       DAG.getConstant(10, dl, MVT::i64));
25268    OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
25269                                Addr, MachinePointerInfo(TrmpAddr, 10));
25270
25271    Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
25272                       DAG.getConstant(12, dl, MVT::i64));
25273    OutChains[3] =
25274        DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12),
25275                     /* Alignment = */ 2);
25276
25277    // Jump to the nested function.
25278    OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
25279    Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
25280                       DAG.getConstant(20, dl, MVT::i64));
25281    OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
25282                                Addr, MachinePointerInfo(TrmpAddr, 20));
25283
25284    unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
25285    Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
25286                       DAG.getConstant(22, dl, MVT::i64));
25287    OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, dl, MVT::i8),
25288                                Addr, MachinePointerInfo(TrmpAddr, 22));
25289
25290    return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
25291  } else {
25292    const Function *Func =
25293      cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
25294    CallingConv::ID CC = Func->getCallingConv();
25295    unsigned NestReg;
25296
25297    switch (CC) {
25298    default:
25299      llvm_unreachable("Unsupported calling convention");
25300    case CallingConv::C:
25301    case CallingConv::X86_StdCall: {
25302      // Pass 'nest' parameter in ECX.
25303      // Must be kept in sync with X86CallingConv.td
25304      NestReg = X86::ECX;
25305
25306      // Check that ECX wasn't needed by an 'inreg' parameter.
25307      FunctionType *FTy = Func->getFunctionType();
25308      const AttributeList &Attrs = Func->getAttributes();
25309
25310      if (!Attrs.isEmpty() && !Func->isVarArg()) {
25311        unsigned InRegCount = 0;
25312        unsigned Idx = 1;
25313
25314        for (FunctionType::param_iterator I = FTy->param_begin(),
25315             E = FTy->param_end(); I != E; ++I, ++Idx)
25316          if (Attrs.hasAttribute(Idx, Attribute::InReg)) {
25317            auto &DL = DAG.getDataLayout();
25318            // FIXME: should only count parameters that are lowered to integers.
25319            InRegCount += (DL.getTypeSizeInBits(*I) + 31) / 32;
25320          }
25321
25322        if (InRegCount > 2) {
25323          report_fatal_error("Nest register in use - reduce number of inreg"
25324                             " parameters!");
25325        }
25326      }
25327      break;
25328    }
25329    case CallingConv::X86_FastCall:
25330    case CallingConv::X86_ThisCall:
25331    case CallingConv::Fast:
25332    case CallingConv::Tail:
25333      // Pass 'nest' parameter in EAX.
25334      // Must be kept in sync with X86CallingConv.td
25335      NestReg = X86::EAX;
25336      break;
25337    }
25338
25339    SDValue OutChains[4];
25340    SDValue Addr, Disp;
25341
25342    Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
25343                       DAG.getConstant(10, dl, MVT::i32));
25344    Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
25345
25346    // This is storing the opcode for MOV32ri.
25347    const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
25348    const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
25349    OutChains[0] =
25350        DAG.getStore(Root, dl, DAG.getConstant(MOV32ri | N86Reg, dl, MVT::i8),
25351                     Trmp, MachinePointerInfo(TrmpAddr));
25352
25353    Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
25354                       DAG.getConstant(1, dl, MVT::i32));
25355    OutChains[1] =
25356        DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 1),
25357                     /* Alignment = */ 1);
25358
25359    const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
25360    Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
25361                       DAG.getConstant(5, dl, MVT::i32));
25362    OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, dl, MVT::i8),
25363                                Addr, MachinePointerInfo(TrmpAddr, 5),
25364                                /* Alignment = */ 1);
25365
25366    Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
25367                       DAG.getConstant(6, dl, MVT::i32));
25368    OutChains[3] =
25369        DAG.getStore(Root, dl, Disp, Addr, MachinePointerInfo(TrmpAddr, 6),
25370                     /* Alignment = */ 1);
25371
25372    return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
25373  }
25374}
25375
25376SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
25377                                            SelectionDAG &DAG) const {
25378  /*
25379   The rounding mode is in bits 11:10 of FPSR, and has the following
25380   settings:
25381     00 Round to nearest
25382     01 Round to -inf
25383     10 Round to +inf
25384     11 Round to 0
25385
25386  FLT_ROUNDS, on the other hand, expects the following:
25387    -1 Undefined
25388     0 Round to 0
25389     1 Round to nearest
25390     2 Round to +inf
25391     3 Round to -inf
25392
25393  To perform the conversion, we do:
25394    (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
25395  */
25396
25397  MachineFunction &MF = DAG.getMachineFunction();
25398  const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
25399  const Align StackAlignment(TFI.getStackAlignment());
25400  MVT VT = Op.getSimpleValueType();
25401  SDLoc DL(Op);
25402
25403  // Save FP Control Word to stack slot
25404  int SSFI =
25405      MF.getFrameInfo().CreateStackObject(2, StackAlignment.value(), false);
25406  SDValue StackSlot =
25407      DAG.getFrameIndex(SSFI, getPointerTy(DAG.getDataLayout()));
25408
25409  MachineMemOperand *MMO =
25410      MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, SSFI),
25411                              MachineMemOperand::MOStore, 2, 2);
25412
25413  SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
25414  SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
25415                                          DAG.getVTList(MVT::Other),
25416                                          Ops, MVT::i16, MMO);
25417
25418  // Load FP Control Word from stack slot
25419  SDValue CWD =
25420      DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MachinePointerInfo());
25421
25422  // Transform as necessary
25423  SDValue CWD1 =
25424    DAG.getNode(ISD::SRL, DL, MVT::i16,
25425                DAG.getNode(ISD::AND, DL, MVT::i16,
25426                            CWD, DAG.getConstant(0x800, DL, MVT::i16)),
25427                DAG.getConstant(11, DL, MVT::i8));
25428  SDValue CWD2 =
25429    DAG.getNode(ISD::SRL, DL, MVT::i16,
25430                DAG.getNode(ISD::AND, DL, MVT::i16,
25431                            CWD, DAG.getConstant(0x400, DL, MVT::i16)),
25432                DAG.getConstant(9, DL, MVT::i8));
25433
25434  SDValue RetVal =
25435    DAG.getNode(ISD::AND, DL, MVT::i16,
25436                DAG.getNode(ISD::ADD, DL, MVT::i16,
25437                            DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
25438                            DAG.getConstant(1, DL, MVT::i16)),
25439                DAG.getConstant(3, DL, MVT::i16));
25440
25441  return DAG.getNode((VT.getSizeInBits() < 16 ?
25442                      ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
25443}
25444
25445// Split an unary integer op into 2 half sized ops.
25446static SDValue LowerVectorIntUnary(SDValue Op, SelectionDAG &DAG) {
25447  MVT VT = Op.getSimpleValueType();
25448  unsigned NumElems = VT.getVectorNumElements();
25449  unsigned SizeInBits = VT.getSizeInBits();
25450  MVT EltVT = VT.getVectorElementType();
25451  SDValue Src = Op.getOperand(0);
25452  assert(EltVT == Src.getSimpleValueType().getVectorElementType() &&
25453         "Src and Op should have the same element type!");
25454
25455  // Extract the Lo/Hi vectors
25456  SDLoc dl(Op);
25457  SDValue Lo = extractSubVector(Src, 0, DAG, dl, SizeInBits / 2);
25458  SDValue Hi = extractSubVector(Src, NumElems / 2, DAG, dl, SizeInBits / 2);
25459
25460  MVT NewVT = MVT::getVectorVT(EltVT, NumElems / 2);
25461  return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
25462                     DAG.getNode(Op.getOpcode(), dl, NewVT, Lo),
25463                     DAG.getNode(Op.getOpcode(), dl, NewVT, Hi));
25464}
25465
25466// Decompose 256-bit ops into smaller 128-bit ops.
25467static SDValue Lower256IntUnary(SDValue Op, SelectionDAG &DAG) {
25468  assert(Op.getSimpleValueType().is256BitVector() &&
25469         Op.getSimpleValueType().isInteger() &&
25470         "Only handle AVX 256-bit vector integer operation");
25471  return LowerVectorIntUnary(Op, DAG);
25472}
25473
25474// Decompose 512-bit ops into smaller 256-bit ops.
25475static SDValue Lower512IntUnary(SDValue Op, SelectionDAG &DAG) {
25476  assert(Op.getSimpleValueType().is512BitVector() &&
25477         Op.getSimpleValueType().isInteger() &&
25478         "Only handle AVX 512-bit vector integer operation");
25479  return LowerVectorIntUnary(Op, DAG);
25480}
25481
25482/// Lower a vector CTLZ using native supported vector CTLZ instruction.
25483//
25484// i8/i16 vector implemented using dword LZCNT vector instruction
25485// ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal,
25486// split the vector, perform operation on it's Lo a Hi part and
25487// concatenate the results.
25488static SDValue LowerVectorCTLZ_AVX512CDI(SDValue Op, SelectionDAG &DAG,
25489                                         const X86Subtarget &Subtarget) {
25490  assert(Op.getOpcode() == ISD::CTLZ);
25491  SDLoc dl(Op);
25492  MVT VT = Op.getSimpleValueType();
25493  MVT EltVT = VT.getVectorElementType();
25494  unsigned NumElems = VT.getVectorNumElements();
25495
25496  assert((EltVT == MVT::i8 || EltVT == MVT::i16) &&
25497          "Unsupported element type");
25498
25499  // Split vector, it's Lo and Hi parts will be handled in next iteration.
25500  if (NumElems > 16 ||
25501      (NumElems == 16 && !Subtarget.canExtendTo512DQ()))
25502    return LowerVectorIntUnary(Op, DAG);
25503
25504  MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
25505  assert((NewVT.is256BitVector() || NewVT.is512BitVector()) &&
25506          "Unsupported value type for operation");
25507
25508  // Use native supported vector instruction vplzcntd.
25509  Op = DAG.getNode(ISD::ZERO_EXTEND, dl, NewVT, Op.getOperand(0));
25510  SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Op);
25511  SDValue TruncNode = DAG.getNode(ISD::TRUNCATE, dl, VT, CtlzNode);
25512  SDValue Delta = DAG.getConstant(32 - EltVT.getSizeInBits(), dl, VT);
25513
25514  return DAG.getNode(ISD::SUB, dl, VT, TruncNode, Delta);
25515}
25516
25517// Lower CTLZ using a PSHUFB lookup table implementation.
25518static SDValue LowerVectorCTLZInRegLUT(SDValue Op, const SDLoc &DL,
25519                                       const X86Subtarget &Subtarget,
25520                                       SelectionDAG &DAG) {
25521  MVT VT = Op.getSimpleValueType();
25522  int NumElts = VT.getVectorNumElements();
25523  int NumBytes = NumElts * (VT.getScalarSizeInBits() / 8);
25524  MVT CurrVT = MVT::getVectorVT(MVT::i8, NumBytes);
25525
25526  // Per-nibble leading zero PSHUFB lookup table.
25527  const int LUT[16] = {/* 0 */ 4, /* 1 */ 3, /* 2 */ 2, /* 3 */ 2,
25528                       /* 4 */ 1, /* 5 */ 1, /* 6 */ 1, /* 7 */ 1,
25529                       /* 8 */ 0, /* 9 */ 0, /* a */ 0, /* b */ 0,
25530                       /* c */ 0, /* d */ 0, /* e */ 0, /* f */ 0};
25531
25532  SmallVector<SDValue, 64> LUTVec;
25533  for (int i = 0; i < NumBytes; ++i)
25534    LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
25535  SDValue InRegLUT = DAG.getBuildVector(CurrVT, DL, LUTVec);
25536
25537  // Begin by bitcasting the input to byte vector, then split those bytes
25538  // into lo/hi nibbles and use the PSHUFB LUT to perform CLTZ on each of them.
25539  // If the hi input nibble is zero then we add both results together, otherwise
25540  // we just take the hi result (by masking the lo result to zero before the
25541  // add).
25542  SDValue Op0 = DAG.getBitcast(CurrVT, Op.getOperand(0));
25543  SDValue Zero = DAG.getConstant(0, DL, CurrVT);
25544
25545  SDValue NibbleShift = DAG.getConstant(0x4, DL, CurrVT);
25546  SDValue Lo = Op0;
25547  SDValue Hi = DAG.getNode(ISD::SRL, DL, CurrVT, Op0, NibbleShift);
25548  SDValue HiZ;
25549  if (CurrVT.is512BitVector()) {
25550    MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
25551    HiZ = DAG.getSetCC(DL, MaskVT, Hi, Zero, ISD::SETEQ);
25552    HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
25553  } else {
25554    HiZ = DAG.getSetCC(DL, CurrVT, Hi, Zero, ISD::SETEQ);
25555  }
25556
25557  Lo = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Lo);
25558  Hi = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Hi);
25559  Lo = DAG.getNode(ISD::AND, DL, CurrVT, Lo, HiZ);
25560  SDValue Res = DAG.getNode(ISD::ADD, DL, CurrVT, Lo, Hi);
25561
25562  // Merge result back from vXi8 back to VT, working on the lo/hi halves
25563  // of the current vector width in the same way we did for the nibbles.
25564  // If the upper half of the input element is zero then add the halves'
25565  // leading zero counts together, otherwise just use the upper half's.
25566  // Double the width of the result until we are at target width.
25567  while (CurrVT != VT) {
25568    int CurrScalarSizeInBits = CurrVT.getScalarSizeInBits();
25569    int CurrNumElts = CurrVT.getVectorNumElements();
25570    MVT NextSVT = MVT::getIntegerVT(CurrScalarSizeInBits * 2);
25571    MVT NextVT = MVT::getVectorVT(NextSVT, CurrNumElts / 2);
25572    SDValue Shift = DAG.getConstant(CurrScalarSizeInBits, DL, NextVT);
25573
25574    // Check if the upper half of the input element is zero.
25575    if (CurrVT.is512BitVector()) {
25576      MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
25577      HiZ = DAG.getSetCC(DL, MaskVT, DAG.getBitcast(CurrVT, Op0),
25578                         DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
25579      HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
25580    } else {
25581      HiZ = DAG.getSetCC(DL, CurrVT, DAG.getBitcast(CurrVT, Op0),
25582                         DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
25583    }
25584    HiZ = DAG.getBitcast(NextVT, HiZ);
25585
25586    // Move the upper/lower halves to the lower bits as we'll be extending to
25587    // NextVT. Mask the lower result to zero if HiZ is true and add the results
25588    // together.
25589    SDValue ResNext = Res = DAG.getBitcast(NextVT, Res);
25590    SDValue R0 = DAG.getNode(ISD::SRL, DL, NextVT, ResNext, Shift);
25591    SDValue R1 = DAG.getNode(ISD::SRL, DL, NextVT, HiZ, Shift);
25592    R1 = DAG.getNode(ISD::AND, DL, NextVT, ResNext, R1);
25593    Res = DAG.getNode(ISD::ADD, DL, NextVT, R0, R1);
25594    CurrVT = NextVT;
25595  }
25596
25597  return Res;
25598}
25599
25600static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
25601                               const X86Subtarget &Subtarget,
25602                               SelectionDAG &DAG) {
25603  MVT VT = Op.getSimpleValueType();
25604
25605  if (Subtarget.hasCDI() &&
25606      // vXi8 vectors need to be promoted to 512-bits for vXi32.
25607      (Subtarget.canExtendTo512DQ() || VT.getVectorElementType() != MVT::i8))
25608    return LowerVectorCTLZ_AVX512CDI(Op, DAG, Subtarget);
25609
25610  // Decompose 256-bit ops into smaller 128-bit ops.
25611  if (VT.is256BitVector() && !Subtarget.hasInt256())
25612    return Lower256IntUnary(Op, DAG);
25613
25614  // Decompose 512-bit ops into smaller 256-bit ops.
25615  if (VT.is512BitVector() && !Subtarget.hasBWI())
25616    return Lower512IntUnary(Op, DAG);
25617
25618  assert(Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB");
25619  return LowerVectorCTLZInRegLUT(Op, DL, Subtarget, DAG);
25620}
25621
25622static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget,
25623                         SelectionDAG &DAG) {
25624  MVT VT = Op.getSimpleValueType();
25625  MVT OpVT = VT;
25626  unsigned NumBits = VT.getSizeInBits();
25627  SDLoc dl(Op);
25628  unsigned Opc = Op.getOpcode();
25629
25630  if (VT.isVector())
25631    return LowerVectorCTLZ(Op, dl, Subtarget, DAG);
25632
25633  Op = Op.getOperand(0);
25634  if (VT == MVT::i8) {
25635    // Zero extend to i32 since there is not an i8 bsr.
25636    OpVT = MVT::i32;
25637    Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
25638  }
25639
25640  // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
25641  SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
25642  Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
25643
25644  if (Opc == ISD::CTLZ) {
25645    // If src is zero (i.e. bsr sets ZF), returns NumBits.
25646    SDValue Ops[] = {Op, DAG.getConstant(NumBits + NumBits - 1, dl, OpVT),
25647                     DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
25648                     Op.getValue(1)};
25649    Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
25650  }
25651
25652  // Finally xor with NumBits-1.
25653  Op = DAG.getNode(ISD::XOR, dl, OpVT, Op,
25654                   DAG.getConstant(NumBits - 1, dl, OpVT));
25655
25656  if (VT == MVT::i8)
25657    Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
25658  return Op;
25659}
25660
25661static SDValue LowerCTTZ(SDValue Op, const X86Subtarget &Subtarget,
25662                         SelectionDAG &DAG) {
25663  MVT VT = Op.getSimpleValueType();
25664  unsigned NumBits = VT.getScalarSizeInBits();
25665  SDValue N0 = Op.getOperand(0);
25666  SDLoc dl(Op);
25667
25668  assert(!VT.isVector() && Op.getOpcode() == ISD::CTTZ &&
25669         "Only scalar CTTZ requires custom lowering");
25670
25671  // Issue a bsf (scan bits forward) which also sets EFLAGS.
25672  SDVTList VTs = DAG.getVTList(VT, MVT::i32);
25673  Op = DAG.getNode(X86ISD::BSF, dl, VTs, N0);
25674
25675  // If src is zero (i.e. bsf sets ZF), returns NumBits.
25676  SDValue Ops[] = {Op, DAG.getConstant(NumBits, dl, VT),
25677                   DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
25678                   Op.getValue(1)};
25679  return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
25680}
25681
25682/// Break a 256-bit integer operation into two new 128-bit ones and then
25683/// concatenate the result back.
25684static SDValue split256IntArith(SDValue Op, SelectionDAG &DAG) {
25685  MVT VT = Op.getSimpleValueType();
25686
25687  assert(VT.is256BitVector() && VT.isInteger() &&
25688         "Unsupported value type for operation");
25689
25690  unsigned NumElems = VT.getVectorNumElements();
25691  SDLoc dl(Op);
25692
25693  // Extract the LHS vectors
25694  SDValue LHS = Op.getOperand(0);
25695  SDValue LHS1 = extract128BitVector(LHS, 0, DAG, dl);
25696  SDValue LHS2 = extract128BitVector(LHS, NumElems / 2, DAG, dl);
25697
25698  // Extract the RHS vectors
25699  SDValue RHS = Op.getOperand(1);
25700  SDValue RHS1 = extract128BitVector(RHS, 0, DAG, dl);
25701  SDValue RHS2 = extract128BitVector(RHS, NumElems / 2, DAG, dl);
25702
25703  MVT EltVT = VT.getVectorElementType();
25704  MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
25705
25706  return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
25707                     DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
25708                     DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
25709}
25710
25711/// Break a 512-bit integer operation into two new 256-bit ones and then
25712/// concatenate the result back.
25713static SDValue split512IntArith(SDValue Op, SelectionDAG &DAG) {
25714  MVT VT = Op.getSimpleValueType();
25715
25716  assert(VT.is512BitVector() && VT.isInteger() &&
25717         "Unsupported value type for operation");
25718
25719  unsigned NumElems = VT.getVectorNumElements();
25720  SDLoc dl(Op);
25721
25722  // Extract the LHS vectors
25723  SDValue LHS = Op.getOperand(0);
25724  SDValue LHS1 = extract256BitVector(LHS, 0, DAG, dl);
25725  SDValue LHS2 = extract256BitVector(LHS, NumElems / 2, DAG, dl);
25726
25727  // Extract the RHS vectors
25728  SDValue RHS = Op.getOperand(1);
25729  SDValue RHS1 = extract256BitVector(RHS, 0, DAG, dl);
25730  SDValue RHS2 = extract256BitVector(RHS, NumElems / 2, DAG, dl);
25731
25732  MVT EltVT = VT.getVectorElementType();
25733  MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
25734
25735  return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
25736                     DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
25737                     DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
25738}
25739
25740static SDValue lowerAddSub(SDValue Op, SelectionDAG &DAG,
25741                           const X86Subtarget &Subtarget) {
25742  MVT VT = Op.getSimpleValueType();
25743  if (VT == MVT::i16 || VT == MVT::i32)
25744    return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
25745
25746  if (VT.getScalarType() == MVT::i1)
25747    return DAG.getNode(ISD::XOR, SDLoc(Op), VT,
25748                       Op.getOperand(0), Op.getOperand(1));
25749
25750  assert(Op.getSimpleValueType().is256BitVector() &&
25751         Op.getSimpleValueType().isInteger() &&
25752         "Only handle AVX 256-bit vector integer operation");
25753  return split256IntArith(Op, DAG);
25754}
25755
25756static SDValue LowerADDSAT_SUBSAT(SDValue Op, SelectionDAG &DAG,
25757                                  const X86Subtarget &Subtarget) {
25758  MVT VT = Op.getSimpleValueType();
25759  SDValue X = Op.getOperand(0), Y = Op.getOperand(1);
25760  unsigned Opcode = Op.getOpcode();
25761  if (VT.getScalarType() == MVT::i1) {
25762    SDLoc dl(Op);
25763    switch (Opcode) {
25764    default: llvm_unreachable("Expected saturated arithmetic opcode");
25765    case ISD::UADDSAT:
25766    case ISD::SADDSAT:
25767      // *addsat i1 X, Y --> X | Y
25768      return DAG.getNode(ISD::OR, dl, VT, X, Y);
25769    case ISD::USUBSAT:
25770    case ISD::SSUBSAT:
25771      // *subsat i1 X, Y --> X & ~Y
25772      return DAG.getNode(ISD::AND, dl, VT, X, DAG.getNOT(dl, Y, VT));
25773    }
25774  }
25775
25776  if (VT.is128BitVector()) {
25777    // Avoid the generic expansion with min/max if we don't have pminu*/pmaxu*.
25778    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25779    EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
25780                                                 *DAG.getContext(), VT);
25781    SDLoc DL(Op);
25782    if (Opcode == ISD::UADDSAT && !TLI.isOperationLegal(ISD::UMIN, VT)) {
25783      // uaddsat X, Y --> (X >u (X + Y)) ? -1 : X + Y
25784      SDValue Add = DAG.getNode(ISD::ADD, DL, VT, X, Y);
25785      SDValue Cmp = DAG.getSetCC(DL, SetCCResultType, X, Add, ISD::SETUGT);
25786      return DAG.getSelect(DL, VT, Cmp, DAG.getAllOnesConstant(DL, VT), Add);
25787    }
25788    if (Opcode == ISD::USUBSAT && !TLI.isOperationLegal(ISD::UMAX, VT)) {
25789      // usubsat X, Y --> (X >u Y) ? X - Y : 0
25790      SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, X, Y);
25791      SDValue Cmp = DAG.getSetCC(DL, SetCCResultType, X, Y, ISD::SETUGT);
25792      return DAG.getSelect(DL, VT, Cmp, Sub, DAG.getConstant(0, DL, VT));
25793    }
25794    // Use default expansion.
25795    return SDValue();
25796  }
25797
25798  assert(Op.getSimpleValueType().is256BitVector() &&
25799         Op.getSimpleValueType().isInteger() &&
25800         "Only handle AVX 256-bit vector integer operation");
25801  return split256IntArith(Op, DAG);
25802}
25803
25804static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget,
25805                        SelectionDAG &DAG) {
25806  MVT VT = Op.getSimpleValueType();
25807  if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) {
25808    // Since X86 does not have CMOV for 8-bit integer, we don't convert
25809    // 8-bit integer abs to NEG and CMOV.
25810    SDLoc DL(Op);
25811    SDValue N0 = Op.getOperand(0);
25812    SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
25813                              DAG.getConstant(0, DL, VT), N0);
25814    SDValue Ops[] = {N0, Neg, DAG.getTargetConstant(X86::COND_GE, DL, MVT::i8),
25815                     SDValue(Neg.getNode(), 1)};
25816    return DAG.getNode(X86ISD::CMOV, DL, VT, Ops);
25817  }
25818
25819  // ABS(vXi64 X) --> VPBLENDVPD(X, 0-X, X).
25820  if ((VT == MVT::v2i64 || VT == MVT::v4i64) && Subtarget.hasSSE41()) {
25821    SDLoc DL(Op);
25822    SDValue Src = Op.getOperand(0);
25823    SDValue Sub =
25824        DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
25825    return DAG.getNode(X86ISD::BLENDV, DL, VT, Src, Sub, Src);
25826  }
25827
25828  if (VT.is256BitVector() && !Subtarget.hasInt256()) {
25829    assert(VT.isInteger() &&
25830           "Only handle AVX 256-bit vector integer operation");
25831    return Lower256IntUnary(Op, DAG);
25832  }
25833
25834  // Default to expand.
25835  return SDValue();
25836}
25837
25838static SDValue LowerMINMAX(SDValue Op, SelectionDAG &DAG) {
25839  MVT VT = Op.getSimpleValueType();
25840
25841  // For AVX1 cases, split to use legal ops (everything but v4i64).
25842  if (VT.getScalarType() != MVT::i64 && VT.is256BitVector())
25843    return split256IntArith(Op, DAG);
25844
25845  SDLoc DL(Op);
25846  unsigned Opcode = Op.getOpcode();
25847  SDValue N0 = Op.getOperand(0);
25848  SDValue N1 = Op.getOperand(1);
25849
25850  // For pre-SSE41, we can perform UMIN/UMAX v8i16 by flipping the signbit,
25851  // using the SMIN/SMAX instructions and flipping the signbit back.
25852  if (VT == MVT::v8i16) {
25853    assert((Opcode == ISD::UMIN || Opcode == ISD::UMAX) &&
25854           "Unexpected MIN/MAX opcode");
25855    SDValue Sign = DAG.getConstant(APInt::getSignedMinValue(16), DL, VT);
25856    N0 = DAG.getNode(ISD::XOR, DL, VT, N0, Sign);
25857    N1 = DAG.getNode(ISD::XOR, DL, VT, N1, Sign);
25858    Opcode = (Opcode == ISD::UMIN ? ISD::SMIN : ISD::SMAX);
25859    SDValue Result = DAG.getNode(Opcode, DL, VT, N0, N1);
25860    return DAG.getNode(ISD::XOR, DL, VT, Result, Sign);
25861  }
25862
25863  // Else, expand to a compare/select.
25864  ISD::CondCode CC;
25865  switch (Opcode) {
25866  case ISD::SMIN: CC = ISD::CondCode::SETLT;  break;
25867  case ISD::SMAX: CC = ISD::CondCode::SETGT;  break;
25868  case ISD::UMIN: CC = ISD::CondCode::SETULT; break;
25869  case ISD::UMAX: CC = ISD::CondCode::SETUGT; break;
25870  default: llvm_unreachable("Unknown MINMAX opcode");
25871  }
25872
25873  SDValue Cond = DAG.getSetCC(DL, VT, N0, N1, CC);
25874  return DAG.getSelect(DL, VT, Cond, N0, N1);
25875}
25876
25877static SDValue LowerMUL(SDValue Op, const X86Subtarget &Subtarget,
25878                        SelectionDAG &DAG) {
25879  SDLoc dl(Op);
25880  MVT VT = Op.getSimpleValueType();
25881
25882  if (VT.getScalarType() == MVT::i1)
25883    return DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0), Op.getOperand(1));
25884
25885  // Decompose 256-bit ops into 128-bit ops.
25886  if (VT.is256BitVector() && !Subtarget.hasInt256())
25887    return split256IntArith(Op, DAG);
25888
25889  SDValue A = Op.getOperand(0);
25890  SDValue B = Op.getOperand(1);
25891
25892  // Lower v16i8/v32i8/v64i8 mul as sign-extension to v8i16/v16i16/v32i16
25893  // vector pairs, multiply and truncate.
25894  if (VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8) {
25895    unsigned NumElts = VT.getVectorNumElements();
25896
25897    if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
25898        (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
25899      MVT ExVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
25900      return DAG.getNode(
25901          ISD::TRUNCATE, dl, VT,
25902          DAG.getNode(ISD::MUL, dl, ExVT,
25903                      DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, A),
25904                      DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, B)));
25905    }
25906
25907    MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
25908
25909    // Extract the lo/hi parts to any extend to i16.
25910    // We're going to mask off the low byte of each result element of the
25911    // pmullw, so it doesn't matter what's in the high byte of each 16-bit
25912    // element.
25913    SDValue Undef = DAG.getUNDEF(VT);
25914    SDValue ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A, Undef));
25915    SDValue AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A, Undef));
25916
25917    SDValue BLo, BHi;
25918    if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
25919      // If the LHS is a constant, manually unpackl/unpackh.
25920      SmallVector<SDValue, 16> LoOps, HiOps;
25921      for (unsigned i = 0; i != NumElts; i += 16) {
25922        for (unsigned j = 0; j != 8; ++j) {
25923          LoOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j), dl,
25924                                               MVT::i16));
25925          HiOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j + 8), dl,
25926                                               MVT::i16));
25927        }
25928      }
25929
25930      BLo = DAG.getBuildVector(ExVT, dl, LoOps);
25931      BHi = DAG.getBuildVector(ExVT, dl, HiOps);
25932    } else {
25933      BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B, Undef));
25934      BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B, Undef));
25935    }
25936
25937    // Multiply, mask the lower 8bits of the lo/hi results and pack.
25938    SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
25939    SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
25940    RLo = DAG.getNode(ISD::AND, dl, ExVT, RLo, DAG.getConstant(255, dl, ExVT));
25941    RHi = DAG.getNode(ISD::AND, dl, ExVT, RHi, DAG.getConstant(255, dl, ExVT));
25942    return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
25943  }
25944
25945  // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
25946  if (VT == MVT::v4i32) {
25947    assert(Subtarget.hasSSE2() && !Subtarget.hasSSE41() &&
25948           "Should not custom lower when pmulld is available!");
25949
25950    // Extract the odd parts.
25951    static const int UnpackMask[] = { 1, -1, 3, -1 };
25952    SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
25953    SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
25954
25955    // Multiply the even parts.
25956    SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
25957                                DAG.getBitcast(MVT::v2i64, A),
25958                                DAG.getBitcast(MVT::v2i64, B));
25959    // Now multiply odd parts.
25960    SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
25961                               DAG.getBitcast(MVT::v2i64, Aodds),
25962                               DAG.getBitcast(MVT::v2i64, Bodds));
25963
25964    Evens = DAG.getBitcast(VT, Evens);
25965    Odds = DAG.getBitcast(VT, Odds);
25966
25967    // Merge the two vectors back together with a shuffle. This expands into 2
25968    // shuffles.
25969    static const int ShufMask[] = { 0, 4, 2, 6 };
25970    return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
25971  }
25972
25973  assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
25974         "Only know how to lower V2I64/V4I64/V8I64 multiply");
25975  assert(!Subtarget.hasDQI() && "DQI should use MULLQ");
25976
25977  //  Ahi = psrlqi(a, 32);
25978  //  Bhi = psrlqi(b, 32);
25979  //
25980  //  AloBlo = pmuludq(a, b);
25981  //  AloBhi = pmuludq(a, Bhi);
25982  //  AhiBlo = pmuludq(Ahi, b);
25983  //
25984  //  Hi = psllqi(AloBhi + AhiBlo, 32);
25985  //  return AloBlo + Hi;
25986  KnownBits AKnown = DAG.computeKnownBits(A);
25987  KnownBits BKnown = DAG.computeKnownBits(B);
25988
25989  APInt LowerBitsMask = APInt::getLowBitsSet(64, 32);
25990  bool ALoIsZero = LowerBitsMask.isSubsetOf(AKnown.Zero);
25991  bool BLoIsZero = LowerBitsMask.isSubsetOf(BKnown.Zero);
25992
25993  APInt UpperBitsMask = APInt::getHighBitsSet(64, 32);
25994  bool AHiIsZero = UpperBitsMask.isSubsetOf(AKnown.Zero);
25995  bool BHiIsZero = UpperBitsMask.isSubsetOf(BKnown.Zero);
25996
25997  SDValue Zero = DAG.getConstant(0, dl, VT);
25998
25999  // Only multiply lo/hi halves that aren't known to be zero.
26000  SDValue AloBlo = Zero;
26001  if (!ALoIsZero && !BLoIsZero)
26002    AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
26003
26004  SDValue AloBhi = Zero;
26005  if (!ALoIsZero && !BHiIsZero) {
26006    SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
26007    AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
26008  }
26009
26010  SDValue AhiBlo = Zero;
26011  if (!AHiIsZero && !BLoIsZero) {
26012    SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
26013    AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
26014  }
26015
26016  SDValue Hi = DAG.getNode(ISD::ADD, dl, VT, AloBhi, AhiBlo);
26017  Hi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Hi, 32, DAG);
26018
26019  return DAG.getNode(ISD::ADD, dl, VT, AloBlo, Hi);
26020}
26021
26022static SDValue LowerMULH(SDValue Op, const X86Subtarget &Subtarget,
26023                         SelectionDAG &DAG) {
26024  SDLoc dl(Op);
26025  MVT VT = Op.getSimpleValueType();
26026  bool IsSigned = Op->getOpcode() == ISD::MULHS;
26027  unsigned NumElts = VT.getVectorNumElements();
26028  SDValue A = Op.getOperand(0);
26029  SDValue B = Op.getOperand(1);
26030
26031  // Decompose 256-bit ops into 128-bit ops.
26032  if (VT.is256BitVector() && !Subtarget.hasInt256())
26033    return split256IntArith(Op, DAG);
26034
26035  if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32) {
26036    assert((VT == MVT::v4i32 && Subtarget.hasSSE2()) ||
26037           (VT == MVT::v8i32 && Subtarget.hasInt256()) ||
26038           (VT == MVT::v16i32 && Subtarget.hasAVX512()));
26039
26040    // PMULxD operations multiply each even value (starting at 0) of LHS with
26041    // the related value of RHS and produce a widen result.
26042    // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
26043    // => <2 x i64> <ae|cg>
26044    //
26045    // In other word, to have all the results, we need to perform two PMULxD:
26046    // 1. one with the even values.
26047    // 2. one with the odd values.
26048    // To achieve #2, with need to place the odd values at an even position.
26049    //
26050    // Place the odd value at an even position (basically, shift all values 1
26051    // step to the left):
26052    const int Mask[] = {1, -1,  3, -1,  5, -1,  7, -1,
26053                        9, -1, 11, -1, 13, -1, 15, -1};
26054    // <a|b|c|d> => <b|undef|d|undef>
26055    SDValue Odd0 = DAG.getVectorShuffle(VT, dl, A, A,
26056                                        makeArrayRef(&Mask[0], NumElts));
26057    // <e|f|g|h> => <f|undef|h|undef>
26058    SDValue Odd1 = DAG.getVectorShuffle(VT, dl, B, B,
26059                                        makeArrayRef(&Mask[0], NumElts));
26060
26061    // Emit two multiplies, one for the lower 2 ints and one for the higher 2
26062    // ints.
26063    MVT MulVT = MVT::getVectorVT(MVT::i64, NumElts / 2);
26064    unsigned Opcode =
26065        (IsSigned && Subtarget.hasSSE41()) ? X86ISD::PMULDQ : X86ISD::PMULUDQ;
26066    // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
26067    // => <2 x i64> <ae|cg>
26068    SDValue Mul1 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
26069                                                  DAG.getBitcast(MulVT, A),
26070                                                  DAG.getBitcast(MulVT, B)));
26071    // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
26072    // => <2 x i64> <bf|dh>
26073    SDValue Mul2 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
26074                                                  DAG.getBitcast(MulVT, Odd0),
26075                                                  DAG.getBitcast(MulVT, Odd1)));
26076
26077    // Shuffle it back into the right order.
26078    SmallVector<int, 16> ShufMask(NumElts);
26079    for (int i = 0; i != (int)NumElts; ++i)
26080      ShufMask[i] = (i / 2) * 2 + ((i % 2) * NumElts) + 1;
26081
26082    SDValue Res = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, ShufMask);
26083
26084    // If we have a signed multiply but no PMULDQ fix up the result of an
26085    // unsigned multiply.
26086    if (IsSigned && !Subtarget.hasSSE41()) {
26087      SDValue Zero = DAG.getConstant(0, dl, VT);
26088      SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
26089                               DAG.getSetCC(dl, VT, Zero, A, ISD::SETGT), B);
26090      SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
26091                               DAG.getSetCC(dl, VT, Zero, B, ISD::SETGT), A);
26092
26093      SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
26094      Res = DAG.getNode(ISD::SUB, dl, VT, Res, Fixup);
26095    }
26096
26097    return Res;
26098  }
26099
26100  // Only i8 vectors should need custom lowering after this.
26101  assert((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) ||
26102         (VT == MVT::v64i8 && Subtarget.hasBWI())) &&
26103         "Unsupported vector type");
26104
26105  // Lower v16i8/v32i8 as extension to v8i16/v16i16 vector pairs, multiply,
26106  // logical shift down the upper half and pack back to i8.
26107
26108  // With SSE41 we can use sign/zero extend, but for pre-SSE41 we unpack
26109  // and then ashr/lshr the upper bits down to the lower bits before multiply.
26110  unsigned ExAVX = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
26111
26112  if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
26113      (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
26114    MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
26115    SDValue ExA = DAG.getNode(ExAVX, dl, ExVT, A);
26116    SDValue ExB = DAG.getNode(ExAVX, dl, ExVT, B);
26117    SDValue Mul = DAG.getNode(ISD::MUL, dl, ExVT, ExA, ExB);
26118    Mul = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG);
26119    return DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);
26120  }
26121
26122  // For signed 512-bit vectors, split into 256-bit vectors to allow the
26123  // sign-extension to occur.
26124  if (VT == MVT::v64i8 && IsSigned)
26125    return split512IntArith(Op, DAG);
26126
26127  // Signed AVX2 implementation - extend xmm subvectors to ymm.
26128  if (VT == MVT::v32i8 && IsSigned) {
26129    MVT ExVT = MVT::v16i16;
26130    SDValue ALo = extract128BitVector(A, 0, DAG, dl);
26131    SDValue BLo = extract128BitVector(B, 0, DAG, dl);
26132    SDValue AHi = extract128BitVector(A, NumElts / 2, DAG, dl);
26133    SDValue BHi = extract128BitVector(B, NumElts / 2, DAG, dl);
26134    ALo = DAG.getNode(ExAVX, dl, ExVT, ALo);
26135    BLo = DAG.getNode(ExAVX, dl, ExVT, BLo);
26136    AHi = DAG.getNode(ExAVX, dl, ExVT, AHi);
26137    BHi = DAG.getNode(ExAVX, dl, ExVT, BHi);
26138    SDValue Lo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
26139    SDValue Hi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
26140    Lo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Lo, 8, DAG);
26141    Hi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Hi, 8, DAG);
26142
26143    // Bitcast back to VT and then pack all the even elements from Lo and Hi.
26144    // Shuffle lowering should turn this into PACKUS+PERMQ
26145    Lo = DAG.getBitcast(VT, Lo);
26146    Hi = DAG.getBitcast(VT, Hi);
26147    return DAG.getVectorShuffle(VT, dl, Lo, Hi,
26148                                { 0,  2,  4,  6,  8, 10, 12, 14,
26149                                 16, 18, 20, 22, 24, 26, 28, 30,
26150                                 32, 34, 36, 38, 40, 42, 44, 46,
26151                                 48, 50, 52, 54, 56, 58, 60, 62});
26152  }
26153
26154  // For signed v16i8 and all unsigned vXi8 we will unpack the low and high
26155  // half of each 128 bit lane to widen to a vXi16 type. Do the multiplies,
26156  // shift the results and pack the half lane results back together.
26157
26158  MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
26159
26160  static const int PSHUFDMask[] = { 8,  9, 10, 11, 12, 13, 14, 15,
26161                                   -1, -1, -1, -1, -1, -1, -1, -1};
26162
26163  // Extract the lo parts and zero/sign extend to i16.
26164  // Only use SSE4.1 instructions for signed v16i8 where using unpack requires
26165  // shifts to sign extend. Using unpack for unsigned only requires an xor to
26166  // create zeros and a copy due to tied registers contraints pre-avx. But using
26167  // zero_extend_vector_inreg would require an additional pshufd for the high
26168  // part.
26169
26170  SDValue ALo, AHi;
26171  if (IsSigned && VT == MVT::v16i8 && Subtarget.hasSSE41()) {
26172    ALo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, A);
26173
26174    AHi = DAG.getVectorShuffle(VT, dl, A, A, PSHUFDMask);
26175    AHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, AHi);
26176  } else if (IsSigned) {
26177    ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), A));
26178    AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), A));
26179
26180    ALo = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, ALo, 8, DAG);
26181    AHi = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, AHi, 8, DAG);
26182  } else {
26183    ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A,
26184                                          DAG.getConstant(0, dl, VT)));
26185    AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A,
26186                                          DAG.getConstant(0, dl, VT)));
26187  }
26188
26189  SDValue BLo, BHi;
26190  if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
26191    // If the LHS is a constant, manually unpackl/unpackh and extend.
26192    SmallVector<SDValue, 16> LoOps, HiOps;
26193    for (unsigned i = 0; i != NumElts; i += 16) {
26194      for (unsigned j = 0; j != 8; ++j) {
26195        SDValue LoOp = B.getOperand(i + j);
26196        SDValue HiOp = B.getOperand(i + j + 8);
26197
26198        if (IsSigned) {
26199          LoOp = DAG.getSExtOrTrunc(LoOp, dl, MVT::i16);
26200          HiOp = DAG.getSExtOrTrunc(HiOp, dl, MVT::i16);
26201        } else {
26202          LoOp = DAG.getZExtOrTrunc(LoOp, dl, MVT::i16);
26203          HiOp = DAG.getZExtOrTrunc(HiOp, dl, MVT::i16);
26204        }
26205
26206        LoOps.push_back(LoOp);
26207        HiOps.push_back(HiOp);
26208      }
26209    }
26210
26211    BLo = DAG.getBuildVector(ExVT, dl, LoOps);
26212    BHi = DAG.getBuildVector(ExVT, dl, HiOps);
26213  } else if (IsSigned && VT == MVT::v16i8 && Subtarget.hasSSE41()) {
26214    BLo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, B);
26215
26216    BHi = DAG.getVectorShuffle(VT, dl, B, B, PSHUFDMask);
26217    BHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, BHi);
26218  } else if (IsSigned) {
26219    BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), B));
26220    BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), B));
26221
26222    BLo = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, BLo, 8, DAG);
26223    BHi = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, BHi, 8, DAG);
26224  } else {
26225    BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B,
26226                                          DAG.getConstant(0, dl, VT)));
26227    BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B,
26228                                          DAG.getConstant(0, dl, VT)));
26229  }
26230
26231  // Multiply, lshr the upper 8bits to the lower 8bits of the lo/hi results and
26232  // pack back to vXi8.
26233  SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
26234  SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
26235  RLo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, RLo, 8, DAG);
26236  RHi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, RHi, 8, DAG);
26237
26238  // Bitcast back to VT and then pack all the even elements from Lo and Hi.
26239  return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
26240}
26241
26242SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
26243  assert(Subtarget.isTargetWin64() && "Unexpected target");
26244  EVT VT = Op.getValueType();
26245  assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
26246         "Unexpected return type for lowering");
26247
26248  RTLIB::Libcall LC;
26249  bool isSigned;
26250  switch (Op->getOpcode()) {
26251  default: llvm_unreachable("Unexpected request for libcall!");
26252  case ISD::SDIV:      isSigned = true;  LC = RTLIB::SDIV_I128;    break;
26253  case ISD::UDIV:      isSigned = false; LC = RTLIB::UDIV_I128;    break;
26254  case ISD::SREM:      isSigned = true;  LC = RTLIB::SREM_I128;    break;
26255  case ISD::UREM:      isSigned = false; LC = RTLIB::UREM_I128;    break;
26256  case ISD::SDIVREM:   isSigned = true;  LC = RTLIB::SDIVREM_I128; break;
26257  case ISD::UDIVREM:   isSigned = false; LC = RTLIB::UDIVREM_I128; break;
26258  }
26259
26260  SDLoc dl(Op);
26261  SDValue InChain = DAG.getEntryNode();
26262
26263  TargetLowering::ArgListTy Args;
26264  TargetLowering::ArgListEntry Entry;
26265  for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
26266    EVT ArgVT = Op->getOperand(i).getValueType();
26267    assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
26268           "Unexpected argument type for lowering");
26269    SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
26270    Entry.Node = StackPtr;
26271    InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr,
26272                           MachinePointerInfo(), /* Alignment = */ 16);
26273    Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
26274    Entry.Ty = PointerType::get(ArgTy,0);
26275    Entry.IsSExt = false;
26276    Entry.IsZExt = false;
26277    Args.push_back(Entry);
26278  }
26279
26280  SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
26281                                         getPointerTy(DAG.getDataLayout()));
26282
26283  TargetLowering::CallLoweringInfo CLI(DAG);
26284  CLI.setDebugLoc(dl)
26285      .setChain(InChain)
26286      .setLibCallee(
26287          getLibcallCallingConv(LC),
26288          static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()), Callee,
26289          std::move(Args))
26290      .setInRegister()
26291      .setSExtResult(isSigned)
26292      .setZExtResult(!isSigned);
26293
26294  std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
26295  return DAG.getBitcast(VT, CallInfo.first);
26296}
26297
26298// Return true if the required (according to Opcode) shift-imm form is natively
26299// supported by the Subtarget
26300static bool SupportedVectorShiftWithImm(MVT VT, const X86Subtarget &Subtarget,
26301                                        unsigned Opcode) {
26302  if (VT.getScalarSizeInBits() < 16)
26303    return false;
26304
26305  if (VT.is512BitVector() && Subtarget.hasAVX512() &&
26306      (VT.getScalarSizeInBits() > 16 || Subtarget.hasBWI()))
26307    return true;
26308
26309  bool LShift = (VT.is128BitVector() && Subtarget.hasSSE2()) ||
26310                (VT.is256BitVector() && Subtarget.hasInt256());
26311
26312  bool AShift = LShift && (Subtarget.hasAVX512() ||
26313                           (VT != MVT::v2i64 && VT != MVT::v4i64));
26314  return (Opcode == ISD::SRA) ? AShift : LShift;
26315}
26316
26317// The shift amount is a variable, but it is the same for all vector lanes.
26318// These instructions are defined together with shift-immediate.
26319static
26320bool SupportedVectorShiftWithBaseAmnt(MVT VT, const X86Subtarget &Subtarget,
26321                                      unsigned Opcode) {
26322  return SupportedVectorShiftWithImm(VT, Subtarget, Opcode);
26323}
26324
26325// Return true if the required (according to Opcode) variable-shift form is
26326// natively supported by the Subtarget
26327static bool SupportedVectorVarShift(MVT VT, const X86Subtarget &Subtarget,
26328                                    unsigned Opcode) {
26329
26330  if (!Subtarget.hasInt256() || VT.getScalarSizeInBits() < 16)
26331    return false;
26332
26333  // vXi16 supported only on AVX-512, BWI
26334  if (VT.getScalarSizeInBits() == 16 && !Subtarget.hasBWI())
26335    return false;
26336
26337  if (Subtarget.hasAVX512())
26338    return true;
26339
26340  bool LShift = VT.is128BitVector() || VT.is256BitVector();
26341  bool AShift = LShift &&  VT != MVT::v2i64 && VT != MVT::v4i64;
26342  return (Opcode == ISD::SRA) ? AShift : LShift;
26343}
26344
26345static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
26346                                         const X86Subtarget &Subtarget) {
26347  MVT VT = Op.getSimpleValueType();
26348  SDLoc dl(Op);
26349  SDValue R = Op.getOperand(0);
26350  SDValue Amt = Op.getOperand(1);
26351  unsigned X86Opc = getTargetVShiftUniformOpcode(Op.getOpcode(), false);
26352
26353  auto ArithmeticShiftRight64 = [&](uint64_t ShiftAmt) {
26354    assert((VT == MVT::v2i64 || VT == MVT::v4i64) && "Unexpected SRA type");
26355    MVT ExVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2);
26356    SDValue Ex = DAG.getBitcast(ExVT, R);
26357
26358    // ashr(R, 63) === cmp_slt(R, 0)
26359    if (ShiftAmt == 63 && Subtarget.hasSSE42()) {
26360      assert((VT != MVT::v4i64 || Subtarget.hasInt256()) &&
26361             "Unsupported PCMPGT op");
26362      return DAG.getNode(X86ISD::PCMPGT, dl, VT, DAG.getConstant(0, dl, VT), R);
26363    }
26364
26365    if (ShiftAmt >= 32) {
26366      // Splat sign to upper i32 dst, and SRA upper i32 src to lower i32.
26367      SDValue Upper =
26368          getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex, 31, DAG);
26369      SDValue Lower = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
26370                                                 ShiftAmt - 32, DAG);
26371      if (VT == MVT::v2i64)
26372        Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {5, 1, 7, 3});
26373      if (VT == MVT::v4i64)
26374        Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
26375                                  {9, 1, 11, 3, 13, 5, 15, 7});
26376    } else {
26377      // SRA upper i32, SRL whole i64 and select lower i32.
26378      SDValue Upper = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
26379                                                 ShiftAmt, DAG);
26380      SDValue Lower =
26381          getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt, DAG);
26382      Lower = DAG.getBitcast(ExVT, Lower);
26383      if (VT == MVT::v2i64)
26384        Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {4, 1, 6, 3});
26385      if (VT == MVT::v4i64)
26386        Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
26387                                  {8, 1, 10, 3, 12, 5, 14, 7});
26388    }
26389    return DAG.getBitcast(VT, Ex);
26390  };
26391
26392  // Optimize shl/srl/sra with constant shift amount.
26393  APInt APIntShiftAmt;
26394  if (!X86::isConstantSplat(Amt, APIntShiftAmt))
26395    return SDValue();
26396
26397  // If the shift amount is out of range, return undef.
26398  if (APIntShiftAmt.uge(VT.getScalarSizeInBits()))
26399    return DAG.getUNDEF(VT);
26400
26401  uint64_t ShiftAmt = APIntShiftAmt.getZExtValue();
26402
26403  if (SupportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode()))
26404    return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG);
26405
26406  // i64 SRA needs to be performed as partial shifts.
26407  if (((!Subtarget.hasXOP() && VT == MVT::v2i64) ||
26408       (Subtarget.hasInt256() && VT == MVT::v4i64)) &&
26409      Op.getOpcode() == ISD::SRA)
26410    return ArithmeticShiftRight64(ShiftAmt);
26411
26412  if (VT == MVT::v16i8 || (Subtarget.hasInt256() && VT == MVT::v32i8) ||
26413      VT == MVT::v64i8) {
26414    unsigned NumElts = VT.getVectorNumElements();
26415    MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
26416
26417    // Simple i8 add case
26418    if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1)
26419      return DAG.getNode(ISD::ADD, dl, VT, R, R);
26420
26421    // ashr(R, 7)  === cmp_slt(R, 0)
26422    if (Op.getOpcode() == ISD::SRA && ShiftAmt == 7) {
26423      SDValue Zeros = DAG.getConstant(0, dl, VT);
26424      if (VT.is512BitVector()) {
26425        assert(VT == MVT::v64i8 && "Unexpected element type!");
26426        SDValue CMP = DAG.getSetCC(dl, MVT::v64i1, Zeros, R, ISD::SETGT);
26427        return DAG.getNode(ISD::SIGN_EXTEND, dl, VT, CMP);
26428      }
26429      return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
26430    }
26431
26432    // XOP can shift v16i8 directly instead of as shift v8i16 + mask.
26433    if (VT == MVT::v16i8 && Subtarget.hasXOP())
26434      return SDValue();
26435
26436    if (Op.getOpcode() == ISD::SHL) {
26437      // Make a large shift.
26438      SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ShiftVT, R,
26439                                               ShiftAmt, DAG);
26440      SHL = DAG.getBitcast(VT, SHL);
26441      // Zero out the rightmost bits.
26442      APInt Mask = APInt::getHighBitsSet(8, 8 - ShiftAmt);
26443      return DAG.getNode(ISD::AND, dl, VT, SHL, DAG.getConstant(Mask, dl, VT));
26444    }
26445    if (Op.getOpcode() == ISD::SRL) {
26446      // Make a large shift.
26447      SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ShiftVT, R,
26448                                               ShiftAmt, DAG);
26449      SRL = DAG.getBitcast(VT, SRL);
26450      // Zero out the leftmost bits.
26451      return DAG.getNode(ISD::AND, dl, VT, SRL,
26452                         DAG.getConstant(uint8_t(-1U) >> ShiftAmt, dl, VT));
26453    }
26454    if (Op.getOpcode() == ISD::SRA) {
26455      // ashr(R, Amt) === sub(xor(lshr(R, Amt), Mask), Mask)
26456      SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
26457
26458      SDValue Mask = DAG.getConstant(128 >> ShiftAmt, dl, VT);
26459      Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
26460      Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
26461      return Res;
26462    }
26463    llvm_unreachable("Unknown shift opcode.");
26464  }
26465
26466  return SDValue();
26467}
26468
26469static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
26470                                        const X86Subtarget &Subtarget) {
26471  MVT VT = Op.getSimpleValueType();
26472  SDLoc dl(Op);
26473  SDValue R = Op.getOperand(0);
26474  SDValue Amt = Op.getOperand(1);
26475  unsigned Opcode = Op.getOpcode();
26476  unsigned X86OpcI = getTargetVShiftUniformOpcode(Opcode, false);
26477  unsigned X86OpcV = getTargetVShiftUniformOpcode(Opcode, true);
26478
26479  if (SDValue BaseShAmt = DAG.getSplatValue(Amt)) {
26480    if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Opcode)) {
26481      MVT EltVT = VT.getVectorElementType();
26482      assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
26483      if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
26484        BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
26485      else if (EltVT.bitsLT(MVT::i32))
26486        BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
26487
26488      return getTargetVShiftNode(X86OpcI, dl, VT, R, BaseShAmt, Subtarget, DAG);
26489    }
26490
26491    // vXi8 shifts - shift as v8i16 + mask result.
26492    if (((VT == MVT::v16i8 && !Subtarget.canExtendTo512DQ()) ||
26493         (VT == MVT::v32i8 && !Subtarget.canExtendTo512BW()) ||
26494         VT == MVT::v64i8) &&
26495        !Subtarget.hasXOP()) {
26496      unsigned NumElts = VT.getVectorNumElements();
26497      MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
26498      if (SupportedVectorShiftWithBaseAmnt(ExtVT, Subtarget, Opcode)) {
26499        unsigned LogicalOp = (Opcode == ISD::SHL ? ISD::SHL : ISD::SRL);
26500        unsigned LogicalX86Op = getTargetVShiftUniformOpcode(LogicalOp, false);
26501        BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
26502
26503        // Create the mask using vXi16 shifts. For shift-rights we need to move
26504        // the upper byte down before splatting the vXi8 mask.
26505        SDValue BitMask = DAG.getConstant(-1, dl, ExtVT);
26506        BitMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, BitMask,
26507                                      BaseShAmt, Subtarget, DAG);
26508        if (Opcode != ISD::SHL)
26509          BitMask = getTargetVShiftByConstNode(LogicalX86Op, dl, ExtVT, BitMask,
26510                                               8, DAG);
26511        BitMask = DAG.getBitcast(VT, BitMask);
26512        BitMask = DAG.getVectorShuffle(VT, dl, BitMask, BitMask,
26513                                       SmallVector<int, 64>(NumElts, 0));
26514
26515        SDValue Res = getTargetVShiftNode(LogicalX86Op, dl, ExtVT,
26516                                          DAG.getBitcast(ExtVT, R), BaseShAmt,
26517                                          Subtarget, DAG);
26518        Res = DAG.getBitcast(VT, Res);
26519        Res = DAG.getNode(ISD::AND, dl, VT, Res, BitMask);
26520
26521        if (Opcode == ISD::SRA) {
26522          // ashr(R, Amt) === sub(xor(lshr(R, Amt), SignMask), SignMask)
26523          // SignMask = lshr(SignBit, Amt) - safe to do this with PSRLW.
26524          SDValue SignMask = DAG.getConstant(0x8080, dl, ExtVT);
26525          SignMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, SignMask,
26526                                         BaseShAmt, Subtarget, DAG);
26527          SignMask = DAG.getBitcast(VT, SignMask);
26528          Res = DAG.getNode(ISD::XOR, dl, VT, Res, SignMask);
26529          Res = DAG.getNode(ISD::SUB, dl, VT, Res, SignMask);
26530        }
26531        return Res;
26532      }
26533    }
26534  }
26535
26536  // Check cases (mainly 32-bit) where i64 is expanded into high and low parts.
26537  if (VT == MVT::v2i64 && Amt.getOpcode() == ISD::BITCAST &&
26538      Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
26539    Amt = Amt.getOperand(0);
26540    unsigned Ratio = 64 / Amt.getScalarValueSizeInBits();
26541    std::vector<SDValue> Vals(Ratio);
26542    for (unsigned i = 0; i != Ratio; ++i)
26543      Vals[i] = Amt.getOperand(i);
26544    for (unsigned i = Ratio, e = Amt.getNumOperands(); i != e; i += Ratio) {
26545      for (unsigned j = 0; j != Ratio; ++j)
26546        if (Vals[j] != Amt.getOperand(i + j))
26547          return SDValue();
26548    }
26549
26550    if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Op.getOpcode()))
26551      return DAG.getNode(X86OpcV, dl, VT, R, Op.getOperand(1));
26552  }
26553  return SDValue();
26554}
26555
26556// Convert a shift/rotate left amount to a multiplication scale factor.
26557static SDValue convertShiftLeftToScale(SDValue Amt, const SDLoc &dl,
26558                                       const X86Subtarget &Subtarget,
26559                                       SelectionDAG &DAG) {
26560  MVT VT = Amt.getSimpleValueType();
26561  if (!(VT == MVT::v8i16 || VT == MVT::v4i32 ||
26562        (Subtarget.hasInt256() && VT == MVT::v16i16) ||
26563        (!Subtarget.hasAVX512() && VT == MVT::v16i8)))
26564    return SDValue();
26565
26566  if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
26567    SmallVector<SDValue, 8> Elts;
26568    MVT SVT = VT.getVectorElementType();
26569    unsigned SVTBits = SVT.getSizeInBits();
26570    APInt One(SVTBits, 1);
26571    unsigned NumElems = VT.getVectorNumElements();
26572
26573    for (unsigned i = 0; i != NumElems; ++i) {
26574      SDValue Op = Amt->getOperand(i);
26575      if (Op->isUndef()) {
26576        Elts.push_back(Op);
26577        continue;
26578      }
26579
26580      ConstantSDNode *ND = cast<ConstantSDNode>(Op);
26581      APInt C(SVTBits, ND->getZExtValue());
26582      uint64_t ShAmt = C.getZExtValue();
26583      if (ShAmt >= SVTBits) {
26584        Elts.push_back(DAG.getUNDEF(SVT));
26585        continue;
26586      }
26587      Elts.push_back(DAG.getConstant(One.shl(ShAmt), dl, SVT));
26588    }
26589    return DAG.getBuildVector(VT, dl, Elts);
26590  }
26591
26592  // If the target doesn't support variable shifts, use either FP conversion
26593  // or integer multiplication to avoid shifting each element individually.
26594  if (VT == MVT::v4i32) {
26595    Amt = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, dl, VT));
26596    Amt = DAG.getNode(ISD::ADD, dl, VT, Amt,
26597                      DAG.getConstant(0x3f800000U, dl, VT));
26598    Amt = DAG.getBitcast(MVT::v4f32, Amt);
26599    return DAG.getNode(ISD::FP_TO_SINT, dl, VT, Amt);
26600  }
26601
26602  // AVX2 can more effectively perform this as a zext/trunc to/from v8i32.
26603  if (VT == MVT::v8i16 && !Subtarget.hasAVX2()) {
26604    SDValue Z = DAG.getConstant(0, dl, VT);
26605    SDValue Lo = DAG.getBitcast(MVT::v4i32, getUnpackl(DAG, dl, VT, Amt, Z));
26606    SDValue Hi = DAG.getBitcast(MVT::v4i32, getUnpackh(DAG, dl, VT, Amt, Z));
26607    Lo = convertShiftLeftToScale(Lo, dl, Subtarget, DAG);
26608    Hi = convertShiftLeftToScale(Hi, dl, Subtarget, DAG);
26609    if (Subtarget.hasSSE41())
26610      return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
26611
26612    return DAG.getVectorShuffle(VT, dl, DAG.getBitcast(VT, Lo),
26613                                        DAG.getBitcast(VT, Hi),
26614                                        {0, 2, 4, 6, 8, 10, 12, 14});
26615  }
26616
26617  return SDValue();
26618}
26619
26620static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
26621                          SelectionDAG &DAG) {
26622  MVT VT = Op.getSimpleValueType();
26623  SDLoc dl(Op);
26624  SDValue R = Op.getOperand(0);
26625  SDValue Amt = Op.getOperand(1);
26626  unsigned EltSizeInBits = VT.getScalarSizeInBits();
26627  bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
26628
26629  unsigned Opc = Op.getOpcode();
26630  unsigned X86OpcV = getTargetVShiftUniformOpcode(Opc, true);
26631  unsigned X86OpcI = getTargetVShiftUniformOpcode(Opc, false);
26632
26633  assert(VT.isVector() && "Custom lowering only for vector shifts!");
26634  assert(Subtarget.hasSSE2() && "Only custom lower when we have SSE2!");
26635
26636  if (SDValue V = LowerScalarImmediateShift(Op, DAG, Subtarget))
26637    return V;
26638
26639  if (SDValue V = LowerScalarVariableShift(Op, DAG, Subtarget))
26640    return V;
26641
26642  if (SupportedVectorVarShift(VT, Subtarget, Opc))
26643    return Op;
26644
26645  // XOP has 128-bit variable logical/arithmetic shifts.
26646  // +ve/-ve Amt = shift left/right.
26647  if (Subtarget.hasXOP() && (VT == MVT::v2i64 || VT == MVT::v4i32 ||
26648                             VT == MVT::v8i16 || VT == MVT::v16i8)) {
26649    if (Opc == ISD::SRL || Opc == ISD::SRA) {
26650      SDValue Zero = DAG.getConstant(0, dl, VT);
26651      Amt = DAG.getNode(ISD::SUB, dl, VT, Zero, Amt);
26652    }
26653    if (Opc == ISD::SHL || Opc == ISD::SRL)
26654      return DAG.getNode(X86ISD::VPSHL, dl, VT, R, Amt);
26655    if (Opc == ISD::SRA)
26656      return DAG.getNode(X86ISD::VPSHA, dl, VT, R, Amt);
26657  }
26658
26659  // 2i64 vector logical shifts can efficiently avoid scalarization - do the
26660  // shifts per-lane and then shuffle the partial results back together.
26661  if (VT == MVT::v2i64 && Opc != ISD::SRA) {
26662    // Splat the shift amounts so the scalar shifts above will catch it.
26663    SDValue Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {0, 0});
26664    SDValue Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {1, 1});
26665    SDValue R0 = DAG.getNode(Opc, dl, VT, R, Amt0);
26666    SDValue R1 = DAG.getNode(Opc, dl, VT, R, Amt1);
26667    return DAG.getVectorShuffle(VT, dl, R0, R1, {0, 3});
26668  }
26669
26670  // i64 vector arithmetic shift can be emulated with the transform:
26671  // M = lshr(SIGN_MASK, Amt)
26672  // ashr(R, Amt) === sub(xor(lshr(R, Amt), M), M)
26673  if ((VT == MVT::v2i64 || (VT == MVT::v4i64 && Subtarget.hasInt256())) &&
26674      Opc == ISD::SRA) {
26675    SDValue S = DAG.getConstant(APInt::getSignMask(64), dl, VT);
26676    SDValue M = DAG.getNode(ISD::SRL, dl, VT, S, Amt);
26677    R = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
26678    R = DAG.getNode(ISD::XOR, dl, VT, R, M);
26679    R = DAG.getNode(ISD::SUB, dl, VT, R, M);
26680    return R;
26681  }
26682
26683  // If possible, lower this shift as a sequence of two shifts by
26684  // constant plus a BLENDing shuffle instead of scalarizing it.
26685  // Example:
26686  //   (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
26687  //
26688  // Could be rewritten as:
26689  //   (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
26690  //
26691  // The advantage is that the two shifts from the example would be
26692  // lowered as X86ISD::VSRLI nodes in parallel before blending.
26693  if (ConstantAmt && (VT == MVT::v8i16 || VT == MVT::v4i32 ||
26694                      (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
26695    SDValue Amt1, Amt2;
26696    unsigned NumElts = VT.getVectorNumElements();
26697    SmallVector<int, 8> ShuffleMask;
26698    for (unsigned i = 0; i != NumElts; ++i) {
26699      SDValue A = Amt->getOperand(i);
26700      if (A.isUndef()) {
26701        ShuffleMask.push_back(SM_SentinelUndef);
26702        continue;
26703      }
26704      if (!Amt1 || Amt1 == A) {
26705        ShuffleMask.push_back(i);
26706        Amt1 = A;
26707        continue;
26708      }
26709      if (!Amt2 || Amt2 == A) {
26710        ShuffleMask.push_back(i + NumElts);
26711        Amt2 = A;
26712        continue;
26713      }
26714      break;
26715    }
26716
26717    // Only perform this blend if we can perform it without loading a mask.
26718    if (ShuffleMask.size() == NumElts && Amt1 && Amt2 &&
26719        (VT != MVT::v16i16 ||
26720         is128BitLaneRepeatedShuffleMask(VT, ShuffleMask)) &&
26721        (VT == MVT::v4i32 || Subtarget.hasSSE41() || Opc != ISD::SHL ||
26722         canWidenShuffleElements(ShuffleMask))) {
26723      auto *Cst1 = dyn_cast<ConstantSDNode>(Amt1);
26724      auto *Cst2 = dyn_cast<ConstantSDNode>(Amt2);
26725      if (Cst1 && Cst2 && Cst1->getAPIntValue().ult(EltSizeInBits) &&
26726          Cst2->getAPIntValue().ult(EltSizeInBits)) {
26727        SDValue Shift1 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
26728                                                    Cst1->getZExtValue(), DAG);
26729        SDValue Shift2 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
26730                                                    Cst2->getZExtValue(), DAG);
26731        return DAG.getVectorShuffle(VT, dl, Shift1, Shift2, ShuffleMask);
26732      }
26733    }
26734  }
26735
26736  // If possible, lower this packed shift into a vector multiply instead of
26737  // expanding it into a sequence of scalar shifts.
26738  if (Opc == ISD::SHL)
26739    if (SDValue Scale = convertShiftLeftToScale(Amt, dl, Subtarget, DAG))
26740      return DAG.getNode(ISD::MUL, dl, VT, R, Scale);
26741
26742  // Constant ISD::SRL can be performed efficiently on vXi16 vectors as we
26743  // can replace with ISD::MULHU, creating scale factor from (NumEltBits - Amt).
26744  if (Opc == ISD::SRL && ConstantAmt &&
26745      (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
26746    SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
26747    SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
26748    if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
26749      SDValue Zero = DAG.getConstant(0, dl, VT);
26750      SDValue ZAmt = DAG.getSetCC(dl, VT, Amt, Zero, ISD::SETEQ);
26751      SDValue Res = DAG.getNode(ISD::MULHU, dl, VT, R, Scale);
26752      return DAG.getSelect(dl, VT, ZAmt, R, Res);
26753    }
26754  }
26755
26756  // Constant ISD::SRA can be performed efficiently on vXi16 vectors as we
26757  // can replace with ISD::MULHS, creating scale factor from (NumEltBits - Amt).
26758  // TODO: Special case handling for shift by 0/1, really we can afford either
26759  // of these cases in pre-SSE41/XOP/AVX512 but not both.
26760  if (Opc == ISD::SRA && ConstantAmt &&
26761      (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256())) &&
26762      ((Subtarget.hasSSE41() && !Subtarget.hasXOP() &&
26763        !Subtarget.hasAVX512()) ||
26764       DAG.isKnownNeverZero(Amt))) {
26765    SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
26766    SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
26767    if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
26768      SDValue Amt0 =
26769          DAG.getSetCC(dl, VT, Amt, DAG.getConstant(0, dl, VT), ISD::SETEQ);
26770      SDValue Amt1 =
26771          DAG.getSetCC(dl, VT, Amt, DAG.getConstant(1, dl, VT), ISD::SETEQ);
26772      SDValue Sra1 =
26773          getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, 1, DAG);
26774      SDValue Res = DAG.getNode(ISD::MULHS, dl, VT, R, Scale);
26775      Res = DAG.getSelect(dl, VT, Amt0, R, Res);
26776      return DAG.getSelect(dl, VT, Amt1, Sra1, Res);
26777    }
26778  }
26779
26780  // v4i32 Non Uniform Shifts.
26781  // If the shift amount is constant we can shift each lane using the SSE2
26782  // immediate shifts, else we need to zero-extend each lane to the lower i64
26783  // and shift using the SSE2 variable shifts.
26784  // The separate results can then be blended together.
26785  if (VT == MVT::v4i32) {
26786    SDValue Amt0, Amt1, Amt2, Amt3;
26787    if (ConstantAmt) {
26788      Amt0 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {0, 0, 0, 0});
26789      Amt1 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {1, 1, 1, 1});
26790      Amt2 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {2, 2, 2, 2});
26791      Amt3 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {3, 3, 3, 3});
26792    } else {
26793      // The SSE2 shifts use the lower i64 as the same shift amount for
26794      // all lanes and the upper i64 is ignored. On AVX we're better off
26795      // just zero-extending, but for SSE just duplicating the top 16-bits is
26796      // cheaper and has the same effect for out of range values.
26797      if (Subtarget.hasAVX()) {
26798        SDValue Z = DAG.getConstant(0, dl, VT);
26799        Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Z, {0, 4, -1, -1});
26800        Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Z, {1, 5, -1, -1});
26801        Amt2 = DAG.getVectorShuffle(VT, dl, Amt, Z, {2, 6, -1, -1});
26802        Amt3 = DAG.getVectorShuffle(VT, dl, Amt, Z, {3, 7, -1, -1});
26803      } else {
26804        SDValue Amt01 = DAG.getBitcast(MVT::v8i16, Amt);
26805        SDValue Amt23 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
26806                                             {4, 5, 6, 7, -1, -1, -1, -1});
26807        Amt0 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
26808                                    {0, 1, 1, 1, -1, -1, -1, -1});
26809        Amt1 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
26810                                    {2, 3, 3, 3, -1, -1, -1, -1});
26811        Amt2 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt23, Amt23,
26812                                    {0, 1, 1, 1, -1, -1, -1, -1});
26813        Amt3 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt23, Amt23,
26814                                    {2, 3, 3, 3, -1, -1, -1, -1});
26815      }
26816    }
26817
26818    unsigned ShOpc = ConstantAmt ? Opc : X86OpcV;
26819    SDValue R0 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt0));
26820    SDValue R1 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt1));
26821    SDValue R2 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt2));
26822    SDValue R3 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt3));
26823
26824    // Merge the shifted lane results optimally with/without PBLENDW.
26825    // TODO - ideally shuffle combining would handle this.
26826    if (Subtarget.hasSSE41()) {
26827      SDValue R02 = DAG.getVectorShuffle(VT, dl, R0, R2, {0, -1, 6, -1});
26828      SDValue R13 = DAG.getVectorShuffle(VT, dl, R1, R3, {-1, 1, -1, 7});
26829      return DAG.getVectorShuffle(VT, dl, R02, R13, {0, 5, 2, 7});
26830    }
26831    SDValue R01 = DAG.getVectorShuffle(VT, dl, R0, R1, {0, -1, -1, 5});
26832    SDValue R23 = DAG.getVectorShuffle(VT, dl, R2, R3, {2, -1, -1, 7});
26833    return DAG.getVectorShuffle(VT, dl, R01, R23, {0, 3, 4, 7});
26834  }
26835
26836  // It's worth extending once and using the vXi16/vXi32 shifts for smaller
26837  // types, but without AVX512 the extra overheads to get from vXi8 to vXi32
26838  // make the existing SSE solution better.
26839  // NOTE: We honor prefered vector width before promoting to 512-bits.
26840  if ((Subtarget.hasInt256() && VT == MVT::v8i16) ||
26841      (Subtarget.canExtendTo512DQ() && VT == MVT::v16i16) ||
26842      (Subtarget.canExtendTo512DQ() && VT == MVT::v16i8) ||
26843      (Subtarget.canExtendTo512BW() && VT == MVT::v32i8) ||
26844      (Subtarget.hasBWI() && Subtarget.hasVLX() && VT == MVT::v16i8)) {
26845    assert((!Subtarget.hasBWI() || VT == MVT::v32i8 || VT == MVT::v16i8) &&
26846           "Unexpected vector type");
26847    MVT EvtSVT = Subtarget.hasBWI() ? MVT::i16 : MVT::i32;
26848    MVT ExtVT = MVT::getVectorVT(EvtSVT, VT.getVectorNumElements());
26849    unsigned ExtOpc = Opc == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
26850    R = DAG.getNode(ExtOpc, dl, ExtVT, R);
26851    Amt = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Amt);
26852    return DAG.getNode(ISD::TRUNCATE, dl, VT,
26853                       DAG.getNode(Opc, dl, ExtVT, R, Amt));
26854  }
26855
26856  // Constant ISD::SRA/SRL can be performed efficiently on vXi8 vectors as we
26857  // extend to vXi16 to perform a MUL scale effectively as a MUL_LOHI.
26858  if (ConstantAmt && (Opc == ISD::SRA || Opc == ISD::SRL) &&
26859      (VT == MVT::v16i8 || VT == MVT::v64i8 ||
26860       (VT == MVT::v32i8 && Subtarget.hasInt256())) &&
26861      !Subtarget.hasXOP()) {
26862    int NumElts = VT.getVectorNumElements();
26863    SDValue Cst8 = DAG.getTargetConstant(8, dl, MVT::i8);
26864
26865    // Extend constant shift amount to vXi16 (it doesn't matter if the type
26866    // isn't legal).
26867    MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
26868    Amt = DAG.getZExtOrTrunc(Amt, dl, ExVT);
26869    Amt = DAG.getNode(ISD::SUB, dl, ExVT, DAG.getConstant(8, dl, ExVT), Amt);
26870    Amt = DAG.getNode(ISD::SHL, dl, ExVT, DAG.getConstant(1, dl, ExVT), Amt);
26871    assert(ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()) &&
26872           "Constant build vector expected");
26873
26874    if (VT == MVT::v16i8 && Subtarget.hasInt256()) {
26875      R = Opc == ISD::SRA ? DAG.getSExtOrTrunc(R, dl, ExVT)
26876                          : DAG.getZExtOrTrunc(R, dl, ExVT);
26877      R = DAG.getNode(ISD::MUL, dl, ExVT, R, Amt);
26878      R = DAG.getNode(X86ISD::VSRLI, dl, ExVT, R, Cst8);
26879      return DAG.getZExtOrTrunc(R, dl, VT);
26880    }
26881
26882    SmallVector<SDValue, 16> LoAmt, HiAmt;
26883    for (int i = 0; i != NumElts; i += 16) {
26884      for (int j = 0; j != 8; ++j) {
26885        LoAmt.push_back(Amt.getOperand(i + j));
26886        HiAmt.push_back(Amt.getOperand(i + j + 8));
26887      }
26888    }
26889
26890    MVT VT16 = MVT::getVectorVT(MVT::i16, NumElts / 2);
26891    SDValue LoA = DAG.getBuildVector(VT16, dl, LoAmt);
26892    SDValue HiA = DAG.getBuildVector(VT16, dl, HiAmt);
26893
26894    SDValue LoR = DAG.getBitcast(VT16, getUnpackl(DAG, dl, VT, R, R));
26895    SDValue HiR = DAG.getBitcast(VT16, getUnpackh(DAG, dl, VT, R, R));
26896    LoR = DAG.getNode(X86OpcI, dl, VT16, LoR, Cst8);
26897    HiR = DAG.getNode(X86OpcI, dl, VT16, HiR, Cst8);
26898    LoR = DAG.getNode(ISD::MUL, dl, VT16, LoR, LoA);
26899    HiR = DAG.getNode(ISD::MUL, dl, VT16, HiR, HiA);
26900    LoR = DAG.getNode(X86ISD::VSRLI, dl, VT16, LoR, Cst8);
26901    HiR = DAG.getNode(X86ISD::VSRLI, dl, VT16, HiR, Cst8);
26902    return DAG.getNode(X86ISD::PACKUS, dl, VT, LoR, HiR);
26903  }
26904
26905  if (VT == MVT::v16i8 ||
26906      (VT == MVT::v32i8 && Subtarget.hasInt256() && !Subtarget.hasXOP()) ||
26907      (VT == MVT::v64i8 && Subtarget.hasBWI())) {
26908    MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
26909
26910    auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
26911      if (VT.is512BitVector()) {
26912        // On AVX512BW targets we make use of the fact that VSELECT lowers
26913        // to a masked blend which selects bytes based just on the sign bit
26914        // extracted to a mask.
26915        MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
26916        V0 = DAG.getBitcast(VT, V0);
26917        V1 = DAG.getBitcast(VT, V1);
26918        Sel = DAG.getBitcast(VT, Sel);
26919        Sel = DAG.getSetCC(dl, MaskVT, DAG.getConstant(0, dl, VT), Sel,
26920                           ISD::SETGT);
26921        return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1));
26922      } else if (Subtarget.hasSSE41()) {
26923        // On SSE41 targets we make use of the fact that VSELECT lowers
26924        // to PBLENDVB which selects bytes based just on the sign bit.
26925        V0 = DAG.getBitcast(VT, V0);
26926        V1 = DAG.getBitcast(VT, V1);
26927        Sel = DAG.getBitcast(VT, Sel);
26928        return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1));
26929      }
26930      // On pre-SSE41 targets we test for the sign bit by comparing to
26931      // zero - a negative value will set all bits of the lanes to true
26932      // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
26933      SDValue Z = DAG.getConstant(0, dl, SelVT);
26934      SDValue C = DAG.getNode(X86ISD::PCMPGT, dl, SelVT, Z, Sel);
26935      return DAG.getSelect(dl, SelVT, C, V0, V1);
26936    };
26937
26938    // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
26939    // We can safely do this using i16 shifts as we're only interested in
26940    // the 3 lower bits of each byte.
26941    Amt = DAG.getBitcast(ExtVT, Amt);
26942    Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ExtVT, Amt, 5, DAG);
26943    Amt = DAG.getBitcast(VT, Amt);
26944
26945    if (Opc == ISD::SHL || Opc == ISD::SRL) {
26946      // r = VSELECT(r, shift(r, 4), a);
26947      SDValue M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(4, dl, VT));
26948      R = SignBitSelect(VT, Amt, M, R);
26949
26950      // a += a
26951      Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
26952
26953      // r = VSELECT(r, shift(r, 2), a);
26954      M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(2, dl, VT));
26955      R = SignBitSelect(VT, Amt, M, R);
26956
26957      // a += a
26958      Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
26959
26960      // return VSELECT(r, shift(r, 1), a);
26961      M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(1, dl, VT));
26962      R = SignBitSelect(VT, Amt, M, R);
26963      return R;
26964    }
26965
26966    if (Opc == ISD::SRA) {
26967      // For SRA we need to unpack each byte to the higher byte of a i16 vector
26968      // so we can correctly sign extend. We don't care what happens to the
26969      // lower byte.
26970      SDValue ALo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
26971      SDValue AHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
26972      SDValue RLo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), R);
26973      SDValue RHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), R);
26974      ALo = DAG.getBitcast(ExtVT, ALo);
26975      AHi = DAG.getBitcast(ExtVT, AHi);
26976      RLo = DAG.getBitcast(ExtVT, RLo);
26977      RHi = DAG.getBitcast(ExtVT, RHi);
26978
26979      // r = VSELECT(r, shift(r, 4), a);
26980      SDValue MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 4, DAG);
26981      SDValue MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 4, DAG);
26982      RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
26983      RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
26984
26985      // a += a
26986      ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
26987      AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
26988
26989      // r = VSELECT(r, shift(r, 2), a);
26990      MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 2, DAG);
26991      MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 2, DAG);
26992      RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
26993      RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
26994
26995      // a += a
26996      ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
26997      AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
26998
26999      // r = VSELECT(r, shift(r, 1), a);
27000      MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 1, DAG);
27001      MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 1, DAG);
27002      RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
27003      RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
27004
27005      // Logical shift the result back to the lower byte, leaving a zero upper
27006      // byte meaning that we can safely pack with PACKUSWB.
27007      RLo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RLo, 8, DAG);
27008      RHi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RHi, 8, DAG);
27009      return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
27010    }
27011  }
27012
27013  if (Subtarget.hasInt256() && !Subtarget.hasXOP() && VT == MVT::v16i16) {
27014    MVT ExtVT = MVT::v8i32;
27015    SDValue Z = DAG.getConstant(0, dl, VT);
27016    SDValue ALo = getUnpackl(DAG, dl, VT, Amt, Z);
27017    SDValue AHi = getUnpackh(DAG, dl, VT, Amt, Z);
27018    SDValue RLo = getUnpackl(DAG, dl, VT, Z, R);
27019    SDValue RHi = getUnpackh(DAG, dl, VT, Z, R);
27020    ALo = DAG.getBitcast(ExtVT, ALo);
27021    AHi = DAG.getBitcast(ExtVT, AHi);
27022    RLo = DAG.getBitcast(ExtVT, RLo);
27023    RHi = DAG.getBitcast(ExtVT, RHi);
27024    SDValue Lo = DAG.getNode(Opc, dl, ExtVT, RLo, ALo);
27025    SDValue Hi = DAG.getNode(Opc, dl, ExtVT, RHi, AHi);
27026    Lo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Lo, 16, DAG);
27027    Hi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Hi, 16, DAG);
27028    return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
27029  }
27030
27031  if (VT == MVT::v8i16) {
27032    // If we have a constant shift amount, the non-SSE41 path is best as
27033    // avoiding bitcasts make it easier to constant fold and reduce to PBLENDW.
27034    bool UseSSE41 = Subtarget.hasSSE41() &&
27035                    !ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
27036
27037    auto SignBitSelect = [&](SDValue Sel, SDValue V0, SDValue V1) {
27038      // On SSE41 targets we make use of the fact that VSELECT lowers
27039      // to PBLENDVB which selects bytes based just on the sign bit.
27040      if (UseSSE41) {
27041        MVT ExtVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() * 2);
27042        V0 = DAG.getBitcast(ExtVT, V0);
27043        V1 = DAG.getBitcast(ExtVT, V1);
27044        Sel = DAG.getBitcast(ExtVT, Sel);
27045        return DAG.getBitcast(VT, DAG.getSelect(dl, ExtVT, Sel, V0, V1));
27046      }
27047      // On pre-SSE41 targets we splat the sign bit - a negative value will
27048      // set all bits of the lanes to true and VSELECT uses that in
27049      // its OR(AND(V0,C),AND(V1,~C)) lowering.
27050      SDValue C =
27051          getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Sel, 15, DAG);
27052      return DAG.getSelect(dl, VT, C, V0, V1);
27053    };
27054
27055    // Turn 'a' into a mask suitable for VSELECT: a = a << 12;
27056    if (UseSSE41) {
27057      // On SSE41 targets we need to replicate the shift mask in both
27058      // bytes for PBLENDVB.
27059      Amt = DAG.getNode(
27060          ISD::OR, dl, VT,
27061          getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 4, DAG),
27062          getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG));
27063    } else {
27064      Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG);
27065    }
27066
27067    // r = VSELECT(r, shift(r, 8), a);
27068    SDValue M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 8, DAG);
27069    R = SignBitSelect(Amt, M, R);
27070
27071    // a += a
27072    Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
27073
27074    // r = VSELECT(r, shift(r, 4), a);
27075    M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 4, DAG);
27076    R = SignBitSelect(Amt, M, R);
27077
27078    // a += a
27079    Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
27080
27081    // r = VSELECT(r, shift(r, 2), a);
27082    M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 2, DAG);
27083    R = SignBitSelect(Amt, M, R);
27084
27085    // a += a
27086    Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
27087
27088    // return VSELECT(r, shift(r, 1), a);
27089    M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 1, DAG);
27090    R = SignBitSelect(Amt, M, R);
27091    return R;
27092  }
27093
27094  // Decompose 256-bit shifts into 128-bit shifts.
27095  if (VT.is256BitVector())
27096    return split256IntArith(Op, DAG);
27097
27098  return SDValue();
27099}
27100
27101static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
27102                           SelectionDAG &DAG) {
27103  MVT VT = Op.getSimpleValueType();
27104  assert(VT.isVector() && "Custom lowering only for vector rotates!");
27105
27106  SDLoc DL(Op);
27107  SDValue R = Op.getOperand(0);
27108  SDValue Amt = Op.getOperand(1);
27109  unsigned Opcode = Op.getOpcode();
27110  unsigned EltSizeInBits = VT.getScalarSizeInBits();
27111  int NumElts = VT.getVectorNumElements();
27112
27113  // Check for constant splat rotation amount.
27114  APInt UndefElts;
27115  SmallVector<APInt, 32> EltBits;
27116  int CstSplatIndex = -1;
27117  if (getTargetConstantBitsFromNode(Amt, EltSizeInBits, UndefElts, EltBits))
27118    for (int i = 0; i != NumElts; ++i)
27119      if (!UndefElts[i]) {
27120        if (CstSplatIndex < 0 || EltBits[i] == EltBits[CstSplatIndex]) {
27121          CstSplatIndex = i;
27122          continue;
27123        }
27124        CstSplatIndex = -1;
27125        break;
27126      }
27127
27128  // AVX512 implicitly uses modulo rotation amounts.
27129  if (Subtarget.hasAVX512() && 32 <= EltSizeInBits) {
27130    // Attempt to rotate by immediate.
27131    if (0 <= CstSplatIndex) {
27132      unsigned Op = (Opcode == ISD::ROTL ? X86ISD::VROTLI : X86ISD::VROTRI);
27133      uint64_t RotateAmt = EltBits[CstSplatIndex].urem(EltSizeInBits);
27134      return DAG.getNode(Op, DL, VT, R,
27135                         DAG.getTargetConstant(RotateAmt, DL, MVT::i8));
27136    }
27137
27138    // Else, fall-back on VPROLV/VPRORV.
27139    return Op;
27140  }
27141
27142  assert((Opcode == ISD::ROTL) && "Only ROTL supported");
27143
27144  // XOP has 128-bit vector variable + immediate rotates.
27145  // +ve/-ve Amt = rotate left/right - just need to handle ISD::ROTL.
27146  // XOP implicitly uses modulo rotation amounts.
27147  if (Subtarget.hasXOP()) {
27148    if (VT.is256BitVector())
27149      return split256IntArith(Op, DAG);
27150    assert(VT.is128BitVector() && "Only rotate 128-bit vectors!");
27151
27152    // Attempt to rotate by immediate.
27153    if (0 <= CstSplatIndex) {
27154      uint64_t RotateAmt = EltBits[CstSplatIndex].urem(EltSizeInBits);
27155      return DAG.getNode(X86ISD::VROTLI, DL, VT, R,
27156                         DAG.getTargetConstant(RotateAmt, DL, MVT::i8));
27157    }
27158
27159    // Use general rotate by variable (per-element).
27160    return Op;
27161  }
27162
27163  // Split 256-bit integers on pre-AVX2 targets.
27164  if (VT.is256BitVector() && !Subtarget.hasAVX2())
27165    return split256IntArith(Op, DAG);
27166
27167  assert((VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 ||
27168          ((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) &&
27169           Subtarget.hasAVX2())) &&
27170         "Only vXi32/vXi16/vXi8 vector rotates supported");
27171
27172  // Rotate by an uniform constant - expand back to shifts.
27173  if (0 <= CstSplatIndex)
27174    return SDValue();
27175
27176  bool IsSplatAmt = DAG.isSplatValue(Amt);
27177
27178  // v16i8/v32i8: Split rotation into rot4/rot2/rot1 stages and select by
27179  // the amount bit.
27180  if (EltSizeInBits == 8 && !IsSplatAmt) {
27181    if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()))
27182      return SDValue();
27183
27184    // We don't need ModuloAmt here as we just peek at individual bits.
27185    MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
27186
27187    auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
27188      if (Subtarget.hasSSE41()) {
27189        // On SSE41 targets we make use of the fact that VSELECT lowers
27190        // to PBLENDVB which selects bytes based just on the sign bit.
27191        V0 = DAG.getBitcast(VT, V0);
27192        V1 = DAG.getBitcast(VT, V1);
27193        Sel = DAG.getBitcast(VT, Sel);
27194        return DAG.getBitcast(SelVT, DAG.getSelect(DL, VT, Sel, V0, V1));
27195      }
27196      // On pre-SSE41 targets we test for the sign bit by comparing to
27197      // zero - a negative value will set all bits of the lanes to true
27198      // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
27199      SDValue Z = DAG.getConstant(0, DL, SelVT);
27200      SDValue C = DAG.getNode(X86ISD::PCMPGT, DL, SelVT, Z, Sel);
27201      return DAG.getSelect(DL, SelVT, C, V0, V1);
27202    };
27203
27204    // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
27205    // We can safely do this using i16 shifts as we're only interested in
27206    // the 3 lower bits of each byte.
27207    Amt = DAG.getBitcast(ExtVT, Amt);
27208    Amt = DAG.getNode(ISD::SHL, DL, ExtVT, Amt, DAG.getConstant(5, DL, ExtVT));
27209    Amt = DAG.getBitcast(VT, Amt);
27210
27211    // r = VSELECT(r, rot(r, 4), a);
27212    SDValue M;
27213    M = DAG.getNode(
27214        ISD::OR, DL, VT,
27215        DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(4, DL, VT)),
27216        DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(4, DL, VT)));
27217    R = SignBitSelect(VT, Amt, M, R);
27218
27219    // a += a
27220    Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
27221
27222    // r = VSELECT(r, rot(r, 2), a);
27223    M = DAG.getNode(
27224        ISD::OR, DL, VT,
27225        DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(2, DL, VT)),
27226        DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(6, DL, VT)));
27227    R = SignBitSelect(VT, Amt, M, R);
27228
27229    // a += a
27230    Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
27231
27232    // return VSELECT(r, rot(r, 1), a);
27233    M = DAG.getNode(
27234        ISD::OR, DL, VT,
27235        DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(1, DL, VT)),
27236        DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(7, DL, VT)));
27237    return SignBitSelect(VT, Amt, M, R);
27238  }
27239
27240  // ISD::ROT* uses modulo rotate amounts.
27241  Amt = DAG.getNode(ISD::AND, DL, VT, Amt,
27242                    DAG.getConstant(EltSizeInBits - 1, DL, VT));
27243
27244  bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
27245  bool LegalVarShifts = SupportedVectorVarShift(VT, Subtarget, ISD::SHL) &&
27246                        SupportedVectorVarShift(VT, Subtarget, ISD::SRL);
27247
27248  // Fallback for splats + all supported variable shifts.
27249  // Fallback for non-constants AVX2 vXi16 as well.
27250  if (IsSplatAmt || LegalVarShifts || (Subtarget.hasAVX2() && !ConstantAmt)) {
27251    SDValue AmtR = DAG.getConstant(EltSizeInBits, DL, VT);
27252    AmtR = DAG.getNode(ISD::SUB, DL, VT, AmtR, Amt);
27253    SDValue SHL = DAG.getNode(ISD::SHL, DL, VT, R, Amt);
27254    SDValue SRL = DAG.getNode(ISD::SRL, DL, VT, R, AmtR);
27255    return DAG.getNode(ISD::OR, DL, VT, SHL, SRL);
27256  }
27257
27258  // As with shifts, convert the rotation amount to a multiplication factor.
27259  SDValue Scale = convertShiftLeftToScale(Amt, DL, Subtarget, DAG);
27260  assert(Scale && "Failed to convert ROTL amount to scale");
27261
27262  // v8i16/v16i16: perform unsigned multiply hi/lo and OR the results.
27263  if (EltSizeInBits == 16) {
27264    SDValue Lo = DAG.getNode(ISD::MUL, DL, VT, R, Scale);
27265    SDValue Hi = DAG.getNode(ISD::MULHU, DL, VT, R, Scale);
27266    return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
27267  }
27268
27269  // v4i32: make use of the PMULUDQ instruction to multiply 2 lanes of v4i32
27270  // to v2i64 results at a time. The upper 32-bits contain the wrapped bits
27271  // that can then be OR'd with the lower 32-bits.
27272  assert(VT == MVT::v4i32 && "Only v4i32 vector rotate expected");
27273  static const int OddMask[] = {1, -1, 3, -1};
27274  SDValue R13 = DAG.getVectorShuffle(VT, DL, R, R, OddMask);
27275  SDValue Scale13 = DAG.getVectorShuffle(VT, DL, Scale, Scale, OddMask);
27276
27277  SDValue Res02 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
27278                              DAG.getBitcast(MVT::v2i64, R),
27279                              DAG.getBitcast(MVT::v2i64, Scale));
27280  SDValue Res13 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
27281                              DAG.getBitcast(MVT::v2i64, R13),
27282                              DAG.getBitcast(MVT::v2i64, Scale13));
27283  Res02 = DAG.getBitcast(VT, Res02);
27284  Res13 = DAG.getBitcast(VT, Res13);
27285
27286  return DAG.getNode(ISD::OR, DL, VT,
27287                     DAG.getVectorShuffle(VT, DL, Res02, Res13, {0, 4, 2, 6}),
27288                     DAG.getVectorShuffle(VT, DL, Res02, Res13, {1, 5, 3, 7}));
27289}
27290
27291/// Returns true if the operand type is exactly twice the native width, and
27292/// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
27293/// Used to know whether to use cmpxchg8/16b when expanding atomic operations
27294/// (otherwise we leave them alone to become __sync_fetch_and_... calls).
27295bool X86TargetLowering::needsCmpXchgNb(Type *MemType) const {
27296  unsigned OpWidth = MemType->getPrimitiveSizeInBits();
27297
27298  if (OpWidth == 64)
27299    return Subtarget.hasCmpxchg8b() && !Subtarget.is64Bit();
27300  if (OpWidth == 128)
27301    return Subtarget.hasCmpxchg16b();
27302
27303  return false;
27304}
27305
27306// TODO: In 32-bit mode, use MOVLPS when SSE1 is available?
27307// TODO: In 32-bit mode, use FISTP when X87 is available?
27308bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
27309  Type *MemType = SI->getValueOperand()->getType();
27310
27311  bool NoImplicitFloatOps =
27312      SI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
27313  if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
27314      !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2())
27315    return false;
27316
27317  return needsCmpXchgNb(MemType);
27318}
27319
27320// Note: this turns large loads into lock cmpxchg8b/16b.
27321// TODO: In 32-bit mode, use MOVLPS when SSE1 is available?
27322TargetLowering::AtomicExpansionKind
27323X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
27324  Type *MemType = LI->getType();
27325
27326  // If this a 64 bit atomic load on a 32-bit target and SSE2 is enabled, we
27327  // can use movq to do the load. If we have X87 we can load into an 80-bit
27328  // X87 register and store it to a stack temporary.
27329  bool NoImplicitFloatOps =
27330      LI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
27331  if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
27332      !Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
27333      (Subtarget.hasSSE2() || Subtarget.hasX87()))
27334    return AtomicExpansionKind::None;
27335
27336  return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
27337                                 : AtomicExpansionKind::None;
27338}
27339
27340TargetLowering::AtomicExpansionKind
27341X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
27342  unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
27343  Type *MemType = AI->getType();
27344
27345  // If the operand is too big, we must see if cmpxchg8/16b is available
27346  // and default to library calls otherwise.
27347  if (MemType->getPrimitiveSizeInBits() > NativeWidth) {
27348    return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
27349                                   : AtomicExpansionKind::None;
27350  }
27351
27352  AtomicRMWInst::BinOp Op = AI->getOperation();
27353  switch (Op) {
27354  default:
27355    llvm_unreachable("Unknown atomic operation");
27356  case AtomicRMWInst::Xchg:
27357  case AtomicRMWInst::Add:
27358  case AtomicRMWInst::Sub:
27359    // It's better to use xadd, xsub or xchg for these in all cases.
27360    return AtomicExpansionKind::None;
27361  case AtomicRMWInst::Or:
27362  case AtomicRMWInst::And:
27363  case AtomicRMWInst::Xor:
27364    // If the atomicrmw's result isn't actually used, we can just add a "lock"
27365    // prefix to a normal instruction for these operations.
27366    return !AI->use_empty() ? AtomicExpansionKind::CmpXChg
27367                            : AtomicExpansionKind::None;
27368  case AtomicRMWInst::Nand:
27369  case AtomicRMWInst::Max:
27370  case AtomicRMWInst::Min:
27371  case AtomicRMWInst::UMax:
27372  case AtomicRMWInst::UMin:
27373  case AtomicRMWInst::FAdd:
27374  case AtomicRMWInst::FSub:
27375    // These always require a non-trivial set of data operations on x86. We must
27376    // use a cmpxchg loop.
27377    return AtomicExpansionKind::CmpXChg;
27378  }
27379}
27380
27381LoadInst *
27382X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
27383  unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
27384  Type *MemType = AI->getType();
27385  // Accesses larger than the native width are turned into cmpxchg/libcalls, so
27386  // there is no benefit in turning such RMWs into loads, and it is actually
27387  // harmful as it introduces a mfence.
27388  if (MemType->getPrimitiveSizeInBits() > NativeWidth)
27389    return nullptr;
27390
27391  // If this is a canonical idempotent atomicrmw w/no uses, we have a better
27392  // lowering available in lowerAtomicArith.
27393  // TODO: push more cases through this path.
27394  if (auto *C = dyn_cast<ConstantInt>(AI->getValOperand()))
27395    if (AI->getOperation() == AtomicRMWInst::Or && C->isZero() &&
27396        AI->use_empty())
27397      return nullptr;
27398
27399  auto Builder = IRBuilder<>(AI);
27400  Module *M = Builder.GetInsertBlock()->getParent()->getParent();
27401  auto SSID = AI->getSyncScopeID();
27402  // We must restrict the ordering to avoid generating loads with Release or
27403  // ReleaseAcquire orderings.
27404  auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
27405
27406  // Before the load we need a fence. Here is an example lifted from
27407  // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
27408  // is required:
27409  // Thread 0:
27410  //   x.store(1, relaxed);
27411  //   r1 = y.fetch_add(0, release);
27412  // Thread 1:
27413  //   y.fetch_add(42, acquire);
27414  //   r2 = x.load(relaxed);
27415  // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
27416  // lowered to just a load without a fence. A mfence flushes the store buffer,
27417  // making the optimization clearly correct.
27418  // FIXME: it is required if isReleaseOrStronger(Order) but it is not clear
27419  // otherwise, we might be able to be more aggressive on relaxed idempotent
27420  // rmw. In practice, they do not look useful, so we don't try to be
27421  // especially clever.
27422  if (SSID == SyncScope::SingleThread)
27423    // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
27424    // the IR level, so we must wrap it in an intrinsic.
27425    return nullptr;
27426
27427  if (!Subtarget.hasMFence())
27428    // FIXME: it might make sense to use a locked operation here but on a
27429    // different cache-line to prevent cache-line bouncing. In practice it
27430    // is probably a small win, and x86 processors without mfence are rare
27431    // enough that we do not bother.
27432    return nullptr;
27433
27434  Function *MFence =
27435      llvm::Intrinsic::getDeclaration(M, Intrinsic::x86_sse2_mfence);
27436  Builder.CreateCall(MFence, {});
27437
27438  // Finally we can emit the atomic load.
27439  LoadInst *Loaded =
27440      Builder.CreateAlignedLoad(AI->getType(), AI->getPointerOperand(),
27441                                AI->getType()->getPrimitiveSizeInBits());
27442  Loaded->setAtomic(Order, SSID);
27443  AI->replaceAllUsesWith(Loaded);
27444  AI->eraseFromParent();
27445  return Loaded;
27446}
27447
27448bool X86TargetLowering::lowerAtomicStoreAsStoreSDNode(const StoreInst &SI) const {
27449  if (!SI.isUnordered())
27450    return false;
27451  return ExperimentalUnorderedISEL;
27452}
27453bool X86TargetLowering::lowerAtomicLoadAsLoadSDNode(const LoadInst &LI) const {
27454  if (!LI.isUnordered())
27455    return false;
27456  return ExperimentalUnorderedISEL;
27457}
27458
27459
27460/// Emit a locked operation on a stack location which does not change any
27461/// memory location, but does involve a lock prefix.  Location is chosen to be
27462/// a) very likely accessed only by a single thread to minimize cache traffic,
27463/// and b) definitely dereferenceable.  Returns the new Chain result.
27464static SDValue emitLockedStackOp(SelectionDAG &DAG,
27465                                 const X86Subtarget &Subtarget,
27466                                 SDValue Chain, SDLoc DL) {
27467  // Implementation notes:
27468  // 1) LOCK prefix creates a full read/write reordering barrier for memory
27469  // operations issued by the current processor.  As such, the location
27470  // referenced is not relevant for the ordering properties of the instruction.
27471  // See: Intel�� 64 and IA-32 ArchitecturesSoftware Developer���s Manual,
27472  // 8.2.3.9  Loads and Stores Are Not Reordered with Locked Instructions
27473  // 2) Using an immediate operand appears to be the best encoding choice
27474  // here since it doesn't require an extra register.
27475  // 3) OR appears to be very slightly faster than ADD. (Though, the difference
27476  // is small enough it might just be measurement noise.)
27477  // 4) When choosing offsets, there are several contributing factors:
27478  //   a) If there's no redzone, we default to TOS.  (We could allocate a cache
27479  //      line aligned stack object to improve this case.)
27480  //   b) To minimize our chances of introducing a false dependence, we prefer
27481  //      to offset the stack usage from TOS slightly.
27482  //   c) To minimize concerns about cross thread stack usage - in particular,
27483  //      the idiomatic MyThreadPool.run([&StackVars]() {...}) pattern which
27484  //      captures state in the TOS frame and accesses it from many threads -
27485  //      we want to use an offset such that the offset is in a distinct cache
27486  //      line from the TOS frame.
27487  //
27488  // For a general discussion of the tradeoffs and benchmark results, see:
27489  // https://shipilev.net/blog/2014/on-the-fence-with-dependencies/
27490
27491  auto &MF = DAG.getMachineFunction();
27492  auto &TFL = *Subtarget.getFrameLowering();
27493  const unsigned SPOffset = TFL.has128ByteRedZone(MF) ? -64 : 0;
27494
27495  if (Subtarget.is64Bit()) {
27496    SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
27497    SDValue Ops[] = {
27498      DAG.getRegister(X86::RSP, MVT::i64),                  // Base
27499      DAG.getTargetConstant(1, DL, MVT::i8),                // Scale
27500      DAG.getRegister(0, MVT::i64),                         // Index
27501      DAG.getTargetConstant(SPOffset, DL, MVT::i32),        // Disp
27502      DAG.getRegister(0, MVT::i16),                         // Segment.
27503      Zero,
27504      Chain};
27505    SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
27506                                     MVT::Other, Ops);
27507    return SDValue(Res, 1);
27508  }
27509
27510  SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
27511  SDValue Ops[] = {
27512    DAG.getRegister(X86::ESP, MVT::i32),            // Base
27513    DAG.getTargetConstant(1, DL, MVT::i8),          // Scale
27514    DAG.getRegister(0, MVT::i32),                   // Index
27515    DAG.getTargetConstant(SPOffset, DL, MVT::i32),  // Disp
27516    DAG.getRegister(0, MVT::i16),                   // Segment.
27517    Zero,
27518    Chain
27519  };
27520  SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
27521                                   MVT::Other, Ops);
27522  return SDValue(Res, 1);
27523}
27524
27525static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget &Subtarget,
27526                                 SelectionDAG &DAG) {
27527  SDLoc dl(Op);
27528  AtomicOrdering FenceOrdering =
27529      static_cast<AtomicOrdering>(Op.getConstantOperandVal(1));
27530  SyncScope::ID FenceSSID =
27531      static_cast<SyncScope::ID>(Op.getConstantOperandVal(2));
27532
27533  // The only fence that needs an instruction is a sequentially-consistent
27534  // cross-thread fence.
27535  if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
27536      FenceSSID == SyncScope::System) {
27537    if (Subtarget.hasMFence())
27538      return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
27539
27540    SDValue Chain = Op.getOperand(0);
27541    return emitLockedStackOp(DAG, Subtarget, Chain, dl);
27542  }
27543
27544  // MEMBARRIER is a compiler barrier; it codegens to a no-op.
27545  return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
27546}
27547
27548static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget &Subtarget,
27549                             SelectionDAG &DAG) {
27550  MVT T = Op.getSimpleValueType();
27551  SDLoc DL(Op);
27552  unsigned Reg = 0;
27553  unsigned size = 0;
27554  switch(T.SimpleTy) {
27555  default: llvm_unreachable("Invalid value type!");
27556  case MVT::i8:  Reg = X86::AL;  size = 1; break;
27557  case MVT::i16: Reg = X86::AX;  size = 2; break;
27558  case MVT::i32: Reg = X86::EAX; size = 4; break;
27559  case MVT::i64:
27560    assert(Subtarget.is64Bit() && "Node not type legal!");
27561    Reg = X86::RAX; size = 8;
27562    break;
27563  }
27564  SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
27565                                  Op.getOperand(2), SDValue());
27566  SDValue Ops[] = { cpIn.getValue(0),
27567                    Op.getOperand(1),
27568                    Op.getOperand(3),
27569                    DAG.getTargetConstant(size, DL, MVT::i8),
27570                    cpIn.getValue(1) };
27571  SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
27572  MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
27573  SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
27574                                           Ops, T, MMO);
27575
27576  SDValue cpOut =
27577    DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
27578  SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
27579                                      MVT::i32, cpOut.getValue(2));
27580  SDValue Success = getSETCC(X86::COND_E, EFLAGS, DL, DAG);
27581
27582  return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(),
27583                     cpOut, Success, EFLAGS.getValue(1));
27584}
27585
27586// Create MOVMSKB, taking into account whether we need to split for AVX1.
27587static SDValue getPMOVMSKB(const SDLoc &DL, SDValue V, SelectionDAG &DAG,
27588                           const X86Subtarget &Subtarget) {
27589  MVT InVT = V.getSimpleValueType();
27590
27591  if (InVT == MVT::v64i8) {
27592    SDValue Lo, Hi;
27593    std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
27594    Lo = getPMOVMSKB(DL, Lo, DAG, Subtarget);
27595    Hi = getPMOVMSKB(DL, Hi, DAG, Subtarget);
27596    Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Lo);
27597    Hi = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Hi);
27598    Hi = DAG.getNode(ISD::SHL, DL, MVT::i64, Hi,
27599                     DAG.getConstant(32, DL, MVT::i8));
27600    return DAG.getNode(ISD::OR, DL, MVT::i64, Lo, Hi);
27601  }
27602  if (InVT == MVT::v32i8 && !Subtarget.hasInt256()) {
27603    SDValue Lo, Hi;
27604    std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
27605    Lo = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Lo);
27606    Hi = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Hi);
27607    Hi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi,
27608                     DAG.getConstant(16, DL, MVT::i8));
27609    return DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi);
27610  }
27611
27612  return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
27613}
27614
27615static SDValue LowerBITCAST(SDValue Op, const X86Subtarget &Subtarget,
27616                            SelectionDAG &DAG) {
27617  SDValue Src = Op.getOperand(0);
27618  MVT SrcVT = Src.getSimpleValueType();
27619  MVT DstVT = Op.getSimpleValueType();
27620
27621  // Legalize (v64i1 (bitcast i64 (X))) by splitting the i64, bitcasting each
27622  // half to v32i1 and concatenating the result.
27623  if (SrcVT == MVT::i64 && DstVT == MVT::v64i1) {
27624    assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
27625    assert(Subtarget.hasBWI() && "Expected BWI target");
27626    SDLoc dl(Op);
27627    SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src,
27628                             DAG.getIntPtrConstant(0, dl));
27629    Lo = DAG.getBitcast(MVT::v32i1, Lo);
27630    SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src,
27631                             DAG.getIntPtrConstant(1, dl));
27632    Hi = DAG.getBitcast(MVT::v32i1, Hi);
27633    return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
27634  }
27635
27636  // Custom splitting for BWI types when AVX512F is available but BWI isn't.
27637  if ((SrcVT == MVT::v32i16 || SrcVT == MVT::v64i8) && DstVT.isVector() &&
27638    DAG.getTargetLoweringInfo().isTypeLegal(DstVT)) {
27639    SDLoc dl(Op);
27640    SDValue Lo, Hi;
27641    std::tie(Lo, Hi) = DAG.SplitVector(Op.getOperand(0), dl);
27642    MVT CastVT = DstVT.getHalfNumVectorElementsVT();
27643    Lo = DAG.getBitcast(CastVT, Lo);
27644    Hi = DAG.getBitcast(CastVT, Hi);
27645    return DAG.getNode(ISD::CONCAT_VECTORS, dl, DstVT, Lo, Hi);
27646  }
27647
27648  // Use MOVMSK for vector to scalar conversion to prevent scalarization.
27649  if ((SrcVT == MVT::v16i1 || SrcVT == MVT::v32i1) && DstVT.isScalarInteger()) {
27650    assert(!Subtarget.hasAVX512() && "Should use K-registers with AVX512");
27651    MVT SExtVT = SrcVT == MVT::v16i1 ? MVT::v16i8 : MVT::v32i8;
27652    SDLoc DL(Op);
27653    SDValue V = DAG.getSExtOrTrunc(Src, DL, SExtVT);
27654    V = getPMOVMSKB(DL, V, DAG, Subtarget);
27655    return DAG.getZExtOrTrunc(V, DL, DstVT);
27656  }
27657
27658  assert((SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8 ||
27659          SrcVT == MVT::i64) && "Unexpected VT!");
27660
27661  assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
27662  if (!(DstVT == MVT::f64 && SrcVT == MVT::i64) &&
27663      !(DstVT == MVT::x86mmx && SrcVT.isVector()))
27664    // This conversion needs to be expanded.
27665    return SDValue();
27666
27667  SDLoc dl(Op);
27668  if (SrcVT.isVector()) {
27669    // Widen the vector in input in the case of MVT::v2i32.
27670    // Example: from MVT::v2i32 to MVT::v4i32.
27671    MVT NewVT = MVT::getVectorVT(SrcVT.getVectorElementType(),
27672                                 SrcVT.getVectorNumElements() * 2);
27673    Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewVT, Src,
27674                      DAG.getUNDEF(SrcVT));
27675  } else {
27676    assert(SrcVT == MVT::i64 && !Subtarget.is64Bit() &&
27677           "Unexpected source type in LowerBITCAST");
27678    Src = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Src);
27679  }
27680
27681  MVT V2X64VT = DstVT == MVT::f64 ? MVT::v2f64 : MVT::v2i64;
27682  Src = DAG.getNode(ISD::BITCAST, dl, V2X64VT, Src);
27683
27684  if (DstVT == MVT::x86mmx)
27685    return DAG.getNode(X86ISD::MOVDQ2Q, dl, DstVT, Src);
27686
27687  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, DstVT, Src,
27688                     DAG.getIntPtrConstant(0, dl));
27689}
27690
27691/// Compute the horizontal sum of bytes in V for the elements of VT.
27692///
27693/// Requires V to be a byte vector and VT to be an integer vector type with
27694/// wider elements than V's type. The width of the elements of VT determines
27695/// how many bytes of V are summed horizontally to produce each element of the
27696/// result.
27697static SDValue LowerHorizontalByteSum(SDValue V, MVT VT,
27698                                      const X86Subtarget &Subtarget,
27699                                      SelectionDAG &DAG) {
27700  SDLoc DL(V);
27701  MVT ByteVecVT = V.getSimpleValueType();
27702  MVT EltVT = VT.getVectorElementType();
27703  assert(ByteVecVT.getVectorElementType() == MVT::i8 &&
27704         "Expected value to have byte element type.");
27705  assert(EltVT != MVT::i8 &&
27706         "Horizontal byte sum only makes sense for wider elements!");
27707  unsigned VecSize = VT.getSizeInBits();
27708  assert(ByteVecVT.getSizeInBits() == VecSize && "Cannot change vector size!");
27709
27710  // PSADBW instruction horizontally add all bytes and leave the result in i64
27711  // chunks, thus directly computes the pop count for v2i64 and v4i64.
27712  if (EltVT == MVT::i64) {
27713    SDValue Zeros = DAG.getConstant(0, DL, ByteVecVT);
27714    MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
27715    V = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT, V, Zeros);
27716    return DAG.getBitcast(VT, V);
27717  }
27718
27719  if (EltVT == MVT::i32) {
27720    // We unpack the low half and high half into i32s interleaved with zeros so
27721    // that we can use PSADBW to horizontally sum them. The most useful part of
27722    // this is that it lines up the results of two PSADBW instructions to be
27723    // two v2i64 vectors which concatenated are the 4 population counts. We can
27724    // then use PACKUSWB to shrink and concatenate them into a v4i32 again.
27725    SDValue Zeros = DAG.getConstant(0, DL, VT);
27726    SDValue V32 = DAG.getBitcast(VT, V);
27727    SDValue Low = getUnpackl(DAG, DL, VT, V32, Zeros);
27728    SDValue High = getUnpackh(DAG, DL, VT, V32, Zeros);
27729
27730    // Do the horizontal sums into two v2i64s.
27731    Zeros = DAG.getConstant(0, DL, ByteVecVT);
27732    MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
27733    Low = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
27734                      DAG.getBitcast(ByteVecVT, Low), Zeros);
27735    High = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
27736                       DAG.getBitcast(ByteVecVT, High), Zeros);
27737
27738    // Merge them together.
27739    MVT ShortVecVT = MVT::getVectorVT(MVT::i16, VecSize / 16);
27740    V = DAG.getNode(X86ISD::PACKUS, DL, ByteVecVT,
27741                    DAG.getBitcast(ShortVecVT, Low),
27742                    DAG.getBitcast(ShortVecVT, High));
27743
27744    return DAG.getBitcast(VT, V);
27745  }
27746
27747  // The only element type left is i16.
27748  assert(EltVT == MVT::i16 && "Unknown how to handle type");
27749
27750  // To obtain pop count for each i16 element starting from the pop count for
27751  // i8 elements, shift the i16s left by 8, sum as i8s, and then shift as i16s
27752  // right by 8. It is important to shift as i16s as i8 vector shift isn't
27753  // directly supported.
27754  SDValue ShifterV = DAG.getConstant(8, DL, VT);
27755  SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
27756  V = DAG.getNode(ISD::ADD, DL, ByteVecVT, DAG.getBitcast(ByteVecVT, Shl),
27757                  DAG.getBitcast(ByteVecVT, V));
27758  return DAG.getNode(ISD::SRL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
27759}
27760
27761static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, const SDLoc &DL,
27762                                        const X86Subtarget &Subtarget,
27763                                        SelectionDAG &DAG) {
27764  MVT VT = Op.getSimpleValueType();
27765  MVT EltVT = VT.getVectorElementType();
27766  int NumElts = VT.getVectorNumElements();
27767  (void)EltVT;
27768  assert(EltVT == MVT::i8 && "Only vXi8 vector CTPOP lowering supported.");
27769
27770  // Implement a lookup table in register by using an algorithm based on:
27771  // http://wm.ite.pl/articles/sse-popcount.html
27772  //
27773  // The general idea is that every lower byte nibble in the input vector is an
27774  // index into a in-register pre-computed pop count table. We then split up the
27775  // input vector in two new ones: (1) a vector with only the shifted-right
27776  // higher nibbles for each byte and (2) a vector with the lower nibbles (and
27777  // masked out higher ones) for each byte. PSHUFB is used separately with both
27778  // to index the in-register table. Next, both are added and the result is a
27779  // i8 vector where each element contains the pop count for input byte.
27780  const int LUT[16] = {/* 0 */ 0, /* 1 */ 1, /* 2 */ 1, /* 3 */ 2,
27781                       /* 4 */ 1, /* 5 */ 2, /* 6 */ 2, /* 7 */ 3,
27782                       /* 8 */ 1, /* 9 */ 2, /* a */ 2, /* b */ 3,
27783                       /* c */ 2, /* d */ 3, /* e */ 3, /* f */ 4};
27784
27785  SmallVector<SDValue, 64> LUTVec;
27786  for (int i = 0; i < NumElts; ++i)
27787    LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
27788  SDValue InRegLUT = DAG.getBuildVector(VT, DL, LUTVec);
27789  SDValue M0F = DAG.getConstant(0x0F, DL, VT);
27790
27791  // High nibbles
27792  SDValue FourV = DAG.getConstant(4, DL, VT);
27793  SDValue HiNibbles = DAG.getNode(ISD::SRL, DL, VT, Op, FourV);
27794
27795  // Low nibbles
27796  SDValue LoNibbles = DAG.getNode(ISD::AND, DL, VT, Op, M0F);
27797
27798  // The input vector is used as the shuffle mask that index elements into the
27799  // LUT. After counting low and high nibbles, add the vector to obtain the
27800  // final pop count per i8 element.
27801  SDValue HiPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, HiNibbles);
27802  SDValue LoPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, LoNibbles);
27803  return DAG.getNode(ISD::ADD, DL, VT, HiPopCnt, LoPopCnt);
27804}
27805
27806// Please ensure that any codegen change from LowerVectorCTPOP is reflected in
27807// updated cost models in X86TTIImpl::getIntrinsicInstrCost.
27808static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget &Subtarget,
27809                                SelectionDAG &DAG) {
27810  MVT VT = Op.getSimpleValueType();
27811  assert((VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector()) &&
27812         "Unknown CTPOP type to handle");
27813  SDLoc DL(Op.getNode());
27814  SDValue Op0 = Op.getOperand(0);
27815
27816  // TRUNC(CTPOP(ZEXT(X))) to make use of vXi32/vXi64 VPOPCNT instructions.
27817  if (Subtarget.hasVPOPCNTDQ()) {
27818    unsigned NumElems = VT.getVectorNumElements();
27819    assert((VT.getVectorElementType() == MVT::i8 ||
27820            VT.getVectorElementType() == MVT::i16) && "Unexpected type");
27821    if (NumElems < 16 || (NumElems == 16 && Subtarget.canExtendTo512DQ())) {
27822      MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
27823      Op = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, Op0);
27824      Op = DAG.getNode(ISD::CTPOP, DL, NewVT, Op);
27825      return DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
27826    }
27827  }
27828
27829  // Decompose 256-bit ops into smaller 128-bit ops.
27830  if (VT.is256BitVector() && !Subtarget.hasInt256())
27831    return Lower256IntUnary(Op, DAG);
27832
27833  // Decompose 512-bit ops into smaller 256-bit ops.
27834  if (VT.is512BitVector() && !Subtarget.hasBWI())
27835    return Lower512IntUnary(Op, DAG);
27836
27837  // For element types greater than i8, do vXi8 pop counts and a bytesum.
27838  if (VT.getScalarType() != MVT::i8) {
27839    MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
27840    SDValue ByteOp = DAG.getBitcast(ByteVT, Op0);
27841    SDValue PopCnt8 = DAG.getNode(ISD::CTPOP, DL, ByteVT, ByteOp);
27842    return LowerHorizontalByteSum(PopCnt8, VT, Subtarget, DAG);
27843  }
27844
27845  // We can't use the fast LUT approach, so fall back on LegalizeDAG.
27846  if (!Subtarget.hasSSSE3())
27847    return SDValue();
27848
27849  return LowerVectorCTPOPInRegLUT(Op0, DL, Subtarget, DAG);
27850}
27851
27852static SDValue LowerCTPOP(SDValue Op, const X86Subtarget &Subtarget,
27853                          SelectionDAG &DAG) {
27854  assert(Op.getSimpleValueType().isVector() &&
27855         "We only do custom lowering for vector population count.");
27856  return LowerVectorCTPOP(Op, Subtarget, DAG);
27857}
27858
27859static SDValue LowerBITREVERSE_XOP(SDValue Op, SelectionDAG &DAG) {
27860  MVT VT = Op.getSimpleValueType();
27861  SDValue In = Op.getOperand(0);
27862  SDLoc DL(Op);
27863
27864  // For scalars, its still beneficial to transfer to/from the SIMD unit to
27865  // perform the BITREVERSE.
27866  if (!VT.isVector()) {
27867    MVT VecVT = MVT::getVectorVT(VT, 128 / VT.getSizeInBits());
27868    SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, In);
27869    Res = DAG.getNode(ISD::BITREVERSE, DL, VecVT, Res);
27870    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Res,
27871                       DAG.getIntPtrConstant(0, DL));
27872  }
27873
27874  int NumElts = VT.getVectorNumElements();
27875  int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8;
27876
27877  // Decompose 256-bit ops into smaller 128-bit ops.
27878  if (VT.is256BitVector())
27879    return Lower256IntUnary(Op, DAG);
27880
27881  assert(VT.is128BitVector() &&
27882         "Only 128-bit vector bitreverse lowering supported.");
27883
27884  // VPPERM reverses the bits of a byte with the permute Op (2 << 5), and we
27885  // perform the BSWAP in the shuffle.
27886  // Its best to shuffle using the second operand as this will implicitly allow
27887  // memory folding for multiple vectors.
27888  SmallVector<SDValue, 16> MaskElts;
27889  for (int i = 0; i != NumElts; ++i) {
27890    for (int j = ScalarSizeInBytes - 1; j >= 0; --j) {
27891      int SourceByte = 16 + (i * ScalarSizeInBytes) + j;
27892      int PermuteByte = SourceByte | (2 << 5);
27893      MaskElts.push_back(DAG.getConstant(PermuteByte, DL, MVT::i8));
27894    }
27895  }
27896
27897  SDValue Mask = DAG.getBuildVector(MVT::v16i8, DL, MaskElts);
27898  SDValue Res = DAG.getBitcast(MVT::v16i8, In);
27899  Res = DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, DAG.getUNDEF(MVT::v16i8),
27900                    Res, Mask);
27901  return DAG.getBitcast(VT, Res);
27902}
27903
27904static SDValue LowerBITREVERSE(SDValue Op, const X86Subtarget &Subtarget,
27905                               SelectionDAG &DAG) {
27906  MVT VT = Op.getSimpleValueType();
27907
27908  if (Subtarget.hasXOP() && !VT.is512BitVector())
27909    return LowerBITREVERSE_XOP(Op, DAG);
27910
27911  assert(Subtarget.hasSSSE3() && "SSSE3 required for BITREVERSE");
27912
27913  SDValue In = Op.getOperand(0);
27914  SDLoc DL(Op);
27915
27916  // Split v8i64/v16i32 without BWI so that we can still use the PSHUFB
27917  // lowering.
27918  if (VT == MVT::v8i64 || VT == MVT::v16i32) {
27919    assert(!Subtarget.hasBWI() && "BWI should Expand BITREVERSE");
27920    return Lower512IntUnary(Op, DAG);
27921  }
27922
27923  unsigned NumElts = VT.getVectorNumElements();
27924  assert(VT.getScalarType() == MVT::i8 &&
27925         "Only byte vector BITREVERSE supported");
27926
27927  // Decompose 256-bit ops into smaller 128-bit ops on pre-AVX2.
27928  if (VT.is256BitVector() && !Subtarget.hasInt256())
27929    return Lower256IntUnary(Op, DAG);
27930
27931  // Perform BITREVERSE using PSHUFB lookups. Each byte is split into
27932  // two nibbles and a PSHUFB lookup to find the bitreverse of each
27933  // 0-15 value (moved to the other nibble).
27934  SDValue NibbleMask = DAG.getConstant(0xF, DL, VT);
27935  SDValue Lo = DAG.getNode(ISD::AND, DL, VT, In, NibbleMask);
27936  SDValue Hi = DAG.getNode(ISD::SRL, DL, VT, In, DAG.getConstant(4, DL, VT));
27937
27938  const int LoLUT[16] = {
27939      /* 0 */ 0x00, /* 1 */ 0x80, /* 2 */ 0x40, /* 3 */ 0xC0,
27940      /* 4 */ 0x20, /* 5 */ 0xA0, /* 6 */ 0x60, /* 7 */ 0xE0,
27941      /* 8 */ 0x10, /* 9 */ 0x90, /* a */ 0x50, /* b */ 0xD0,
27942      /* c */ 0x30, /* d */ 0xB0, /* e */ 0x70, /* f */ 0xF0};
27943  const int HiLUT[16] = {
27944      /* 0 */ 0x00, /* 1 */ 0x08, /* 2 */ 0x04, /* 3 */ 0x0C,
27945      /* 4 */ 0x02, /* 5 */ 0x0A, /* 6 */ 0x06, /* 7 */ 0x0E,
27946      /* 8 */ 0x01, /* 9 */ 0x09, /* a */ 0x05, /* b */ 0x0D,
27947      /* c */ 0x03, /* d */ 0x0B, /* e */ 0x07, /* f */ 0x0F};
27948
27949  SmallVector<SDValue, 16> LoMaskElts, HiMaskElts;
27950  for (unsigned i = 0; i < NumElts; ++i) {
27951    LoMaskElts.push_back(DAG.getConstant(LoLUT[i % 16], DL, MVT::i8));
27952    HiMaskElts.push_back(DAG.getConstant(HiLUT[i % 16], DL, MVT::i8));
27953  }
27954
27955  SDValue LoMask = DAG.getBuildVector(VT, DL, LoMaskElts);
27956  SDValue HiMask = DAG.getBuildVector(VT, DL, HiMaskElts);
27957  Lo = DAG.getNode(X86ISD::PSHUFB, DL, VT, LoMask, Lo);
27958  Hi = DAG.getNode(X86ISD::PSHUFB, DL, VT, HiMask, Hi);
27959  return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
27960}
27961
27962static SDValue lowerAtomicArithWithLOCK(SDValue N, SelectionDAG &DAG,
27963                                        const X86Subtarget &Subtarget) {
27964  unsigned NewOpc = 0;
27965  switch (N->getOpcode()) {
27966  case ISD::ATOMIC_LOAD_ADD:
27967    NewOpc = X86ISD::LADD;
27968    break;
27969  case ISD::ATOMIC_LOAD_SUB:
27970    NewOpc = X86ISD::LSUB;
27971    break;
27972  case ISD::ATOMIC_LOAD_OR:
27973    NewOpc = X86ISD::LOR;
27974    break;
27975  case ISD::ATOMIC_LOAD_XOR:
27976    NewOpc = X86ISD::LXOR;
27977    break;
27978  case ISD::ATOMIC_LOAD_AND:
27979    NewOpc = X86ISD::LAND;
27980    break;
27981  default:
27982    llvm_unreachable("Unknown ATOMIC_LOAD_ opcode");
27983  }
27984
27985  MachineMemOperand *MMO = cast<MemSDNode>(N)->getMemOperand();
27986
27987  return DAG.getMemIntrinsicNode(
27988      NewOpc, SDLoc(N), DAG.getVTList(MVT::i32, MVT::Other),
27989      {N->getOperand(0), N->getOperand(1), N->getOperand(2)},
27990      /*MemVT=*/N->getSimpleValueType(0), MMO);
27991}
27992
27993/// Lower atomic_load_ops into LOCK-prefixed operations.
27994static SDValue lowerAtomicArith(SDValue N, SelectionDAG &DAG,
27995                                const X86Subtarget &Subtarget) {
27996  AtomicSDNode *AN = cast<AtomicSDNode>(N.getNode());
27997  SDValue Chain = N->getOperand(0);
27998  SDValue LHS = N->getOperand(1);
27999  SDValue RHS = N->getOperand(2);
28000  unsigned Opc = N->getOpcode();
28001  MVT VT = N->getSimpleValueType(0);
28002  SDLoc DL(N);
28003
28004  // We can lower atomic_load_add into LXADD. However, any other atomicrmw op
28005  // can only be lowered when the result is unused.  They should have already
28006  // been transformed into a cmpxchg loop in AtomicExpand.
28007  if (N->hasAnyUseOfValue(0)) {
28008    // Handle (atomic_load_sub p, v) as (atomic_load_add p, -v), to be able to
28009    // select LXADD if LOCK_SUB can't be selected.
28010    if (Opc == ISD::ATOMIC_LOAD_SUB) {
28011      RHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), RHS);
28012      return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, VT, Chain, LHS,
28013                           RHS, AN->getMemOperand());
28014    }
28015    assert(Opc == ISD::ATOMIC_LOAD_ADD &&
28016           "Used AtomicRMW ops other than Add should have been expanded!");
28017    return N;
28018  }
28019
28020  // Specialized lowering for the canonical form of an idemptotent atomicrmw.
28021  // The core idea here is that since the memory location isn't actually
28022  // changing, all we need is a lowering for the *ordering* impacts of the
28023  // atomicrmw.  As such, we can chose a different operation and memory
28024  // location to minimize impact on other code.
28025  if (Opc == ISD::ATOMIC_LOAD_OR && isNullConstant(RHS)) {
28026    // On X86, the only ordering which actually requires an instruction is
28027    // seq_cst which isn't SingleThread, everything just needs to be preserved
28028    // during codegen and then dropped. Note that we expect (but don't assume),
28029    // that orderings other than seq_cst and acq_rel have been canonicalized to
28030    // a store or load.
28031    if (AN->getOrdering() == AtomicOrdering::SequentiallyConsistent &&
28032        AN->getSyncScopeID() == SyncScope::System) {
28033      // Prefer a locked operation against a stack location to minimize cache
28034      // traffic.  This assumes that stack locations are very likely to be
28035      // accessed only by the owning thread.
28036      SDValue NewChain = emitLockedStackOp(DAG, Subtarget, Chain, DL);
28037      assert(!N->hasAnyUseOfValue(0));
28038      // NOTE: The getUNDEF is needed to give something for the unused result 0.
28039      return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
28040                         DAG.getUNDEF(VT), NewChain);
28041    }
28042    // MEMBARRIER is a compiler barrier; it codegens to a no-op.
28043    SDValue NewChain = DAG.getNode(X86ISD::MEMBARRIER, DL, MVT::Other, Chain);
28044    assert(!N->hasAnyUseOfValue(0));
28045    // NOTE: The getUNDEF is needed to give something for the unused result 0.
28046    return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
28047                       DAG.getUNDEF(VT), NewChain);
28048  }
28049
28050  SDValue LockOp = lowerAtomicArithWithLOCK(N, DAG, Subtarget);
28051  // RAUW the chain, but don't worry about the result, as it's unused.
28052  assert(!N->hasAnyUseOfValue(0));
28053  // NOTE: The getUNDEF is needed to give something for the unused result 0.
28054  return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
28055                     DAG.getUNDEF(VT), LockOp.getValue(1));
28056}
28057
28058static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG,
28059                                 const X86Subtarget &Subtarget) {
28060  auto *Node = cast<AtomicSDNode>(Op.getNode());
28061  SDLoc dl(Node);
28062  EVT VT = Node->getMemoryVT();
28063
28064  bool IsSeqCst = Node->getOrdering() == AtomicOrdering::SequentiallyConsistent;
28065  bool IsTypeLegal = DAG.getTargetLoweringInfo().isTypeLegal(VT);
28066
28067  // If this store is not sequentially consistent and the type is legal
28068  // we can just keep it.
28069  if (!IsSeqCst && IsTypeLegal)
28070    return Op;
28071
28072  if (VT == MVT::i64 && !IsTypeLegal) {
28073    // For illegal i64 atomic_stores, we can try to use MOVQ if SSE2 is enabled.
28074    // FIXME: Use movlps with SSE1.
28075    // FIXME: Use fist with X87.
28076    bool NoImplicitFloatOps =
28077        DAG.getMachineFunction().getFunction().hasFnAttribute(
28078            Attribute::NoImplicitFloat);
28079    if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
28080        Subtarget.hasSSE2()) {
28081      SDValue SclToVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
28082                                     Node->getOperand(2));
28083      SDVTList Tys = DAG.getVTList(MVT::Other);
28084      SDValue Ops[] = { Node->getChain(), SclToVec, Node->getBasePtr() };
28085      SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys,
28086                                              Ops, MVT::i64,
28087                                              Node->getMemOperand());
28088
28089      // If this is a sequentially consistent store, also emit an appropriate
28090      // barrier.
28091      if (IsSeqCst)
28092        Chain = emitLockedStackOp(DAG, Subtarget, Chain, dl);
28093
28094      return Chain;
28095    }
28096  }
28097
28098  // Convert seq_cst store -> xchg
28099  // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
28100  // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
28101  SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
28102                               Node->getMemoryVT(),
28103                               Node->getOperand(0),
28104                               Node->getOperand(1), Node->getOperand(2),
28105                               Node->getMemOperand());
28106  return Swap.getValue(1);
28107}
28108
28109static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) {
28110  SDNode *N = Op.getNode();
28111  MVT VT = N->getSimpleValueType(0);
28112
28113  // Let legalize expand this if it isn't a legal type yet.
28114  if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
28115    return SDValue();
28116
28117  SDVTList VTs = DAG.getVTList(VT, MVT::i32);
28118  SDLoc DL(N);
28119
28120  // Set the carry flag.
28121  SDValue Carry = Op.getOperand(2);
28122  EVT CarryVT = Carry.getValueType();
28123  APInt NegOne = APInt::getAllOnesValue(CarryVT.getScalarSizeInBits());
28124  Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
28125                      Carry, DAG.getConstant(NegOne, DL, CarryVT));
28126
28127  unsigned Opc = Op.getOpcode() == ISD::ADDCARRY ? X86ISD::ADC : X86ISD::SBB;
28128  SDValue Sum = DAG.getNode(Opc, DL, VTs, Op.getOperand(0),
28129                            Op.getOperand(1), Carry.getValue(1));
28130
28131  SDValue SetCC = getSETCC(X86::COND_B, Sum.getValue(1), DL, DAG);
28132  if (N->getValueType(1) == MVT::i1)
28133    SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
28134
28135  return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
28136}
28137
28138static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget &Subtarget,
28139                            SelectionDAG &DAG) {
28140  assert(Subtarget.isTargetDarwin() && Subtarget.is64Bit());
28141
28142  // For MacOSX, we want to call an alternative entry point: __sincos_stret,
28143  // which returns the values as { float, float } (in XMM0) or
28144  // { double, double } (which is returned in XMM0, XMM1).
28145  SDLoc dl(Op);
28146  SDValue Arg = Op.getOperand(0);
28147  EVT ArgVT = Arg.getValueType();
28148  Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
28149
28150  TargetLowering::ArgListTy Args;
28151  TargetLowering::ArgListEntry Entry;
28152
28153  Entry.Node = Arg;
28154  Entry.Ty = ArgTy;
28155  Entry.IsSExt = false;
28156  Entry.IsZExt = false;
28157  Args.push_back(Entry);
28158
28159  bool isF64 = ArgVT == MVT::f64;
28160  // Only optimize x86_64 for now. i386 is a bit messy. For f32,
28161  // the small struct {f32, f32} is returned in (eax, edx). For f64,
28162  // the results are returned via SRet in memory.
28163  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28164  RTLIB::Libcall LC = isF64 ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32;
28165  const char *LibcallName = TLI.getLibcallName(LC);
28166  SDValue Callee =
28167      DAG.getExternalSymbol(LibcallName, TLI.getPointerTy(DAG.getDataLayout()));
28168
28169  Type *RetTy = isF64 ? (Type *)StructType::get(ArgTy, ArgTy)
28170                      : (Type *)VectorType::get(ArgTy, 4);
28171
28172  TargetLowering::CallLoweringInfo CLI(DAG);
28173  CLI.setDebugLoc(dl)
28174      .setChain(DAG.getEntryNode())
28175      .setLibCallee(CallingConv::C, RetTy, Callee, std::move(Args));
28176
28177  std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
28178
28179  if (isF64)
28180    // Returned in xmm0 and xmm1.
28181    return CallResult.first;
28182
28183  // Returned in bits 0:31 and 32:64 xmm0.
28184  SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
28185                               CallResult.first, DAG.getIntPtrConstant(0, dl));
28186  SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
28187                               CallResult.first, DAG.getIntPtrConstant(1, dl));
28188  SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
28189  return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
28190}
28191
28192/// Widen a vector input to a vector of NVT.  The
28193/// input vector must have the same element type as NVT.
28194static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
28195                            bool FillWithZeroes = false) {
28196  // Check if InOp already has the right width.
28197  MVT InVT = InOp.getSimpleValueType();
28198  if (InVT == NVT)
28199    return InOp;
28200
28201  if (InOp.isUndef())
28202    return DAG.getUNDEF(NVT);
28203
28204  assert(InVT.getVectorElementType() == NVT.getVectorElementType() &&
28205         "input and widen element type must match");
28206
28207  unsigned InNumElts = InVT.getVectorNumElements();
28208  unsigned WidenNumElts = NVT.getVectorNumElements();
28209  assert(WidenNumElts > InNumElts && WidenNumElts % InNumElts == 0 &&
28210         "Unexpected request for vector widening");
28211
28212  SDLoc dl(InOp);
28213  if (InOp.getOpcode() == ISD::CONCAT_VECTORS &&
28214      InOp.getNumOperands() == 2) {
28215    SDValue N1 = InOp.getOperand(1);
28216    if ((ISD::isBuildVectorAllZeros(N1.getNode()) && FillWithZeroes) ||
28217        N1.isUndef()) {
28218      InOp = InOp.getOperand(0);
28219      InVT = InOp.getSimpleValueType();
28220      InNumElts = InVT.getVectorNumElements();
28221    }
28222  }
28223  if (ISD::isBuildVectorOfConstantSDNodes(InOp.getNode()) ||
28224      ISD::isBuildVectorOfConstantFPSDNodes(InOp.getNode())) {
28225    SmallVector<SDValue, 16> Ops;
28226    for (unsigned i = 0; i < InNumElts; ++i)
28227      Ops.push_back(InOp.getOperand(i));
28228
28229    EVT EltVT = InOp.getOperand(0).getValueType();
28230
28231    SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, EltVT) :
28232      DAG.getUNDEF(EltVT);
28233    for (unsigned i = 0; i < WidenNumElts - InNumElts; ++i)
28234      Ops.push_back(FillVal);
28235    return DAG.getBuildVector(NVT, dl, Ops);
28236  }
28237  SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, NVT) :
28238    DAG.getUNDEF(NVT);
28239  return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NVT, FillVal,
28240                     InOp, DAG.getIntPtrConstant(0, dl));
28241}
28242
28243static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget &Subtarget,
28244                             SelectionDAG &DAG) {
28245  assert(Subtarget.hasAVX512() &&
28246         "MGATHER/MSCATTER are supported on AVX-512 arch only");
28247
28248  MaskedScatterSDNode *N = cast<MaskedScatterSDNode>(Op.getNode());
28249  SDValue Src = N->getValue();
28250  MVT VT = Src.getSimpleValueType();
28251  assert(VT.getScalarSizeInBits() >= 32 && "Unsupported scatter op");
28252  SDLoc dl(Op);
28253
28254  SDValue Scale = N->getScale();
28255  SDValue Index = N->getIndex();
28256  SDValue Mask = N->getMask();
28257  SDValue Chain = N->getChain();
28258  SDValue BasePtr = N->getBasePtr();
28259
28260  if (VT == MVT::v2f32 || VT == MVT::v2i32) {
28261    assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
28262    // If the index is v2i64 and we have VLX we can use xmm for data and index.
28263    if (Index.getValueType() == MVT::v2i64 && Subtarget.hasVLX()) {
28264      const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28265      EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
28266      Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Src, DAG.getUNDEF(VT));
28267      SDVTList VTs = DAG.getVTList(MVT::v2i1, MVT::Other);
28268      SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
28269      SDValue NewScatter = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>(
28270          VTs, Ops, dl, N->getMemoryVT(), N->getMemOperand());
28271      return SDValue(NewScatter.getNode(), 1);
28272    }
28273    return SDValue();
28274  }
28275
28276  MVT IndexVT = Index.getSimpleValueType();
28277  MVT MaskVT = Mask.getSimpleValueType();
28278
28279  // If the index is v2i32, we're being called by type legalization and we
28280  // should just let the default handling take care of it.
28281  if (IndexVT == MVT::v2i32)
28282    return SDValue();
28283
28284  // If we don't have VLX and neither the passthru or index is 512-bits, we
28285  // need to widen until one is.
28286  if (!Subtarget.hasVLX() && !VT.is512BitVector() &&
28287      !Index.getSimpleValueType().is512BitVector()) {
28288    // Determine how much we need to widen by to get a 512-bit type.
28289    unsigned Factor = std::min(512/VT.getSizeInBits(),
28290                               512/IndexVT.getSizeInBits());
28291    unsigned NumElts = VT.getVectorNumElements() * Factor;
28292
28293    VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
28294    IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
28295    MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
28296
28297    Src = ExtendToType(Src, VT, DAG);
28298    Index = ExtendToType(Index, IndexVT, DAG);
28299    Mask = ExtendToType(Mask, MaskVT, DAG, true);
28300  }
28301
28302  SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
28303  SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
28304  SDValue NewScatter = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>(
28305      VTs, Ops, dl, N->getMemoryVT(), N->getMemOperand());
28306  return SDValue(NewScatter.getNode(), 1);
28307}
28308
28309static SDValue LowerMLOAD(SDValue Op, const X86Subtarget &Subtarget,
28310                          SelectionDAG &DAG) {
28311
28312  MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode());
28313  MVT VT = Op.getSimpleValueType();
28314  MVT ScalarVT = VT.getScalarType();
28315  SDValue Mask = N->getMask();
28316  MVT MaskVT = Mask.getSimpleValueType();
28317  SDValue PassThru = N->getPassThru();
28318  SDLoc dl(Op);
28319
28320  // Handle AVX masked loads which don't support passthru other than 0.
28321  if (MaskVT.getVectorElementType() != MVT::i1) {
28322    // We also allow undef in the isel pattern.
28323    if (PassThru.isUndef() || ISD::isBuildVectorAllZeros(PassThru.getNode()))
28324      return Op;
28325
28326    SDValue NewLoad = DAG.getMaskedLoad(
28327        VT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask,
28328        getZeroVector(VT, Subtarget, DAG, dl), N->getMemoryVT(),
28329        N->getMemOperand(), N->getAddressingMode(), N->getExtensionType(),
28330        N->isExpandingLoad());
28331    // Emit a blend.
28332    SDValue Select = DAG.getNode(ISD::VSELECT, dl, MaskVT, Mask, NewLoad,
28333                                 PassThru);
28334    return DAG.getMergeValues({ Select, NewLoad.getValue(1) }, dl);
28335  }
28336
28337  assert((!N->isExpandingLoad() || Subtarget.hasAVX512()) &&
28338         "Expanding masked load is supported on AVX-512 target only!");
28339
28340  assert((!N->isExpandingLoad() || ScalarVT.getSizeInBits() >= 32) &&
28341         "Expanding masked load is supported for 32 and 64-bit types only!");
28342
28343  assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
28344         "Cannot lower masked load op.");
28345
28346  assert((ScalarVT.getSizeInBits() >= 32 ||
28347          (Subtarget.hasBWI() &&
28348              (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
28349         "Unsupported masked load op.");
28350
28351  // This operation is legal for targets with VLX, but without
28352  // VLX the vector should be widened to 512 bit
28353  unsigned NumEltsInWideVec = 512 / VT.getScalarSizeInBits();
28354  MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
28355  PassThru = ExtendToType(PassThru, WideDataVT, DAG);
28356
28357  // Mask element has to be i1.
28358  assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
28359         "Unexpected mask type");
28360
28361  MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
28362
28363  Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
28364  SDValue NewLoad = DAG.getMaskedLoad(
28365      WideDataVT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask,
28366      PassThru, N->getMemoryVT(), N->getMemOperand(), N->getAddressingMode(),
28367      N->getExtensionType(), N->isExpandingLoad());
28368
28369  SDValue Exract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
28370                               NewLoad.getValue(0),
28371                               DAG.getIntPtrConstant(0, dl));
28372  SDValue RetOps[] = {Exract, NewLoad.getValue(1)};
28373  return DAG.getMergeValues(RetOps, dl);
28374}
28375
28376static SDValue LowerMSTORE(SDValue Op, const X86Subtarget &Subtarget,
28377                           SelectionDAG &DAG) {
28378  MaskedStoreSDNode *N = cast<MaskedStoreSDNode>(Op.getNode());
28379  SDValue DataToStore = N->getValue();
28380  MVT VT = DataToStore.getSimpleValueType();
28381  MVT ScalarVT = VT.getScalarType();
28382  SDValue Mask = N->getMask();
28383  SDLoc dl(Op);
28384
28385  assert((!N->isCompressingStore() || Subtarget.hasAVX512()) &&
28386         "Expanding masked load is supported on AVX-512 target only!");
28387
28388  assert((!N->isCompressingStore() || ScalarVT.getSizeInBits() >= 32) &&
28389         "Expanding masked load is supported for 32 and 64-bit types only!");
28390
28391  assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
28392         "Cannot lower masked store op.");
28393
28394  assert((ScalarVT.getSizeInBits() >= 32 ||
28395          (Subtarget.hasBWI() &&
28396              (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
28397          "Unsupported masked store op.");
28398
28399  // This operation is legal for targets with VLX, but without
28400  // VLX the vector should be widened to 512 bit
28401  unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits();
28402  MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
28403
28404  // Mask element has to be i1.
28405  assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
28406         "Unexpected mask type");
28407
28408  MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
28409
28410  DataToStore = ExtendToType(DataToStore, WideDataVT, DAG);
28411  Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
28412  return DAG.getMaskedStore(N->getChain(), dl, DataToStore, N->getBasePtr(),
28413                            N->getOffset(), Mask, N->getMemoryVT(),
28414                            N->getMemOperand(), N->getAddressingMode(),
28415                            N->isTruncatingStore(), N->isCompressingStore());
28416}
28417
28418static SDValue LowerMGATHER(SDValue Op, const X86Subtarget &Subtarget,
28419                            SelectionDAG &DAG) {
28420  assert(Subtarget.hasAVX2() &&
28421         "MGATHER/MSCATTER are supported on AVX-512/AVX-2 arch only");
28422
28423  MaskedGatherSDNode *N = cast<MaskedGatherSDNode>(Op.getNode());
28424  SDLoc dl(Op);
28425  MVT VT = Op.getSimpleValueType();
28426  SDValue Index = N->getIndex();
28427  SDValue Mask = N->getMask();
28428  SDValue PassThru = N->getPassThru();
28429  MVT IndexVT = Index.getSimpleValueType();
28430  MVT MaskVT = Mask.getSimpleValueType();
28431
28432  assert(VT.getScalarSizeInBits() >= 32 && "Unsupported gather op");
28433
28434  // If the index is v2i32, we're being called by type legalization.
28435  if (IndexVT == MVT::v2i32)
28436    return SDValue();
28437
28438  // If we don't have VLX and neither the passthru or index is 512-bits, we
28439  // need to widen until one is.
28440  MVT OrigVT = VT;
28441  if (Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
28442      !IndexVT.is512BitVector()) {
28443    // Determine how much we need to widen by to get a 512-bit type.
28444    unsigned Factor = std::min(512/VT.getSizeInBits(),
28445                               512/IndexVT.getSizeInBits());
28446
28447    unsigned NumElts = VT.getVectorNumElements() * Factor;
28448
28449    VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
28450    IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
28451    MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
28452
28453    PassThru = ExtendToType(PassThru, VT, DAG);
28454    Index = ExtendToType(Index, IndexVT, DAG);
28455    Mask = ExtendToType(Mask, MaskVT, DAG, true);
28456  }
28457
28458  SDValue Ops[] = { N->getChain(), PassThru, Mask, N->getBasePtr(), Index,
28459                    N->getScale() };
28460  SDValue NewGather = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
28461      DAG.getVTList(VT, MaskVT, MVT::Other), Ops, dl, N->getMemoryVT(),
28462      N->getMemOperand());
28463  SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OrigVT,
28464                                NewGather, DAG.getIntPtrConstant(0, dl));
28465  return DAG.getMergeValues({Extract, NewGather.getValue(2)}, dl);
28466}
28467
28468static SDValue LowerADDRSPACECAST(SDValue Op, SelectionDAG &DAG) {
28469  SDLoc dl(Op);
28470  SDValue Src = Op.getOperand(0);
28471  MVT DstVT = Op.getSimpleValueType();
28472
28473  AddrSpaceCastSDNode *N = cast<AddrSpaceCastSDNode>(Op.getNode());
28474  unsigned SrcAS = N->getSrcAddressSpace();
28475
28476  assert(SrcAS != N->getDestAddressSpace() &&
28477         "addrspacecast must be between different address spaces");
28478
28479  if (SrcAS == X86AS::PTR32_UPTR && DstVT == MVT::i64) {
28480    Op = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Src);
28481  } else if (DstVT == MVT::i64) {
28482    Op = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Src);
28483  } else if (DstVT == MVT::i32) {
28484    Op = DAG.getNode(ISD::TRUNCATE, dl, DstVT, Src);
28485  } else {
28486    report_fatal_error("Bad address space in addrspacecast");
28487  }
28488  return Op;
28489}
28490
28491SDValue X86TargetLowering::LowerGC_TRANSITION(SDValue Op,
28492                                              SelectionDAG &DAG) const {
28493  // TODO: Eventually, the lowering of these nodes should be informed by or
28494  // deferred to the GC strategy for the function in which they appear. For
28495  // now, however, they must be lowered to something. Since they are logically
28496  // no-ops in the case of a null GC strategy (or a GC strategy which does not
28497  // require special handling for these nodes), lower them as literal NOOPs for
28498  // the time being.
28499  SmallVector<SDValue, 2> Ops;
28500
28501  Ops.push_back(Op.getOperand(0));
28502  if (Op->getGluedNode())
28503    Ops.push_back(Op->getOperand(Op->getNumOperands() - 1));
28504
28505  SDLoc OpDL(Op);
28506  SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
28507  SDValue NOOP(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);
28508
28509  return NOOP;
28510}
28511
28512SDValue X86TargetLowering::LowerF128Call(SDValue Op, SelectionDAG &DAG,
28513                                         RTLIB::Libcall Call) const {
28514
28515  bool IsStrict = Op->isStrictFPOpcode();
28516  unsigned Offset = IsStrict ? 1 : 0;
28517  SmallVector<SDValue, 2> Ops(Op->op_begin() + Offset, Op->op_end());
28518
28519  SDLoc dl(Op);
28520  SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
28521  MakeLibCallOptions CallOptions;
28522  std::pair<SDValue, SDValue> Tmp = makeLibCall(DAG, Call, MVT::f128, Ops,
28523                                                CallOptions, dl, Chain);
28524
28525  if (IsStrict)
28526    return DAG.getMergeValues({ Tmp.first, Tmp.second }, dl);
28527
28528  return Tmp.first;
28529}
28530
28531/// Provide custom lowering hooks for some operations.
28532SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
28533  switch (Op.getOpcode()) {
28534  default: llvm_unreachable("Should not custom lower this!");
28535  case ISD::ATOMIC_FENCE:       return LowerATOMIC_FENCE(Op, Subtarget, DAG);
28536  case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
28537    return LowerCMP_SWAP(Op, Subtarget, DAG);
28538  case ISD::CTPOP:              return LowerCTPOP(Op, Subtarget, DAG);
28539  case ISD::ATOMIC_LOAD_ADD:
28540  case ISD::ATOMIC_LOAD_SUB:
28541  case ISD::ATOMIC_LOAD_OR:
28542  case ISD::ATOMIC_LOAD_XOR:
28543  case ISD::ATOMIC_LOAD_AND:    return lowerAtomicArith(Op, DAG, Subtarget);
28544  case ISD::ATOMIC_STORE:       return LowerATOMIC_STORE(Op, DAG, Subtarget);
28545  case ISD::BITREVERSE:         return LowerBITREVERSE(Op, Subtarget, DAG);
28546  case ISD::BUILD_VECTOR:       return LowerBUILD_VECTOR(Op, DAG);
28547  case ISD::CONCAT_VECTORS:     return LowerCONCAT_VECTORS(Op, Subtarget, DAG);
28548  case ISD::VECTOR_SHUFFLE:     return lowerVECTOR_SHUFFLE(Op, Subtarget, DAG);
28549  case ISD::VSELECT:            return LowerVSELECT(Op, DAG);
28550  case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
28551  case ISD::INSERT_VECTOR_ELT:  return LowerINSERT_VECTOR_ELT(Op, DAG);
28552  case ISD::INSERT_SUBVECTOR:   return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
28553  case ISD::EXTRACT_SUBVECTOR:  return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
28554  case ISD::SCALAR_TO_VECTOR:   return LowerSCALAR_TO_VECTOR(Op, Subtarget,DAG);
28555  case ISD::ConstantPool:       return LowerConstantPool(Op, DAG);
28556  case ISD::GlobalAddress:      return LowerGlobalAddress(Op, DAG);
28557  case ISD::GlobalTLSAddress:   return LowerGlobalTLSAddress(Op, DAG);
28558  case ISD::ExternalSymbol:     return LowerExternalSymbol(Op, DAG);
28559  case ISD::BlockAddress:       return LowerBlockAddress(Op, DAG);
28560  case ISD::SHL_PARTS:
28561  case ISD::SRA_PARTS:
28562  case ISD::SRL_PARTS:          return LowerShiftParts(Op, DAG);
28563  case ISD::FSHL:
28564  case ISD::FSHR:               return LowerFunnelShift(Op, Subtarget, DAG);
28565  case ISD::STRICT_SINT_TO_FP:
28566  case ISD::SINT_TO_FP:         return LowerSINT_TO_FP(Op, DAG);
28567  case ISD::STRICT_UINT_TO_FP:
28568  case ISD::UINT_TO_FP:         return LowerUINT_TO_FP(Op, DAG);
28569  case ISD::TRUNCATE:           return LowerTRUNCATE(Op, DAG);
28570  case ISD::ZERO_EXTEND:        return LowerZERO_EXTEND(Op, Subtarget, DAG);
28571  case ISD::SIGN_EXTEND:        return LowerSIGN_EXTEND(Op, Subtarget, DAG);
28572  case ISD::ANY_EXTEND:         return LowerANY_EXTEND(Op, Subtarget, DAG);
28573  case ISD::ZERO_EXTEND_VECTOR_INREG:
28574  case ISD::SIGN_EXTEND_VECTOR_INREG:
28575    return LowerEXTEND_VECTOR_INREG(Op, Subtarget, DAG);
28576  case ISD::FP_TO_SINT:
28577  case ISD::STRICT_FP_TO_SINT:
28578  case ISD::FP_TO_UINT:
28579  case ISD::STRICT_FP_TO_UINT:  return LowerFP_TO_INT(Op, DAG);
28580  case ISD::FP_EXTEND:
28581  case ISD::STRICT_FP_EXTEND:   return LowerFP_EXTEND(Op, DAG);
28582  case ISD::FP_ROUND:
28583  case ISD::STRICT_FP_ROUND:    return LowerFP_ROUND(Op, DAG);
28584  case ISD::LOAD:               return LowerLoad(Op, Subtarget, DAG);
28585  case ISD::STORE:              return LowerStore(Op, Subtarget, DAG);
28586  case ISD::FADD:
28587  case ISD::FSUB:               return lowerFaddFsub(Op, DAG);
28588  case ISD::FABS:
28589  case ISD::FNEG:               return LowerFABSorFNEG(Op, DAG);
28590  case ISD::FCOPYSIGN:          return LowerFCOPYSIGN(Op, DAG);
28591  case ISD::FGETSIGN:           return LowerFGETSIGN(Op, DAG);
28592  case ISD::SETCC:
28593  case ISD::STRICT_FSETCC:
28594  case ISD::STRICT_FSETCCS:     return LowerSETCC(Op, DAG);
28595  case ISD::SETCCCARRY:         return LowerSETCCCARRY(Op, DAG);
28596  case ISD::SELECT:             return LowerSELECT(Op, DAG);
28597  case ISD::BRCOND:             return LowerBRCOND(Op, DAG);
28598  case ISD::JumpTable:          return LowerJumpTable(Op, DAG);
28599  case ISD::VASTART:            return LowerVASTART(Op, DAG);
28600  case ISD::VAARG:              return LowerVAARG(Op, DAG);
28601  case ISD::VACOPY:             return LowerVACOPY(Op, Subtarget, DAG);
28602  case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
28603  case ISD::INTRINSIC_VOID:
28604  case ISD::INTRINSIC_W_CHAIN:  return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
28605  case ISD::RETURNADDR:         return LowerRETURNADDR(Op, DAG);
28606  case ISD::ADDROFRETURNADDR:   return LowerADDROFRETURNADDR(Op, DAG);
28607  case ISD::FRAMEADDR:          return LowerFRAMEADDR(Op, DAG);
28608  case ISD::FRAME_TO_ARGS_OFFSET:
28609                                return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
28610  case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
28611  case ISD::EH_RETURN:          return LowerEH_RETURN(Op, DAG);
28612  case ISD::EH_SJLJ_SETJMP:     return lowerEH_SJLJ_SETJMP(Op, DAG);
28613  case ISD::EH_SJLJ_LONGJMP:    return lowerEH_SJLJ_LONGJMP(Op, DAG);
28614  case ISD::EH_SJLJ_SETUP_DISPATCH:
28615    return lowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
28616  case ISD::INIT_TRAMPOLINE:    return LowerINIT_TRAMPOLINE(Op, DAG);
28617  case ISD::ADJUST_TRAMPOLINE:  return LowerADJUST_TRAMPOLINE(Op, DAG);
28618  case ISD::FLT_ROUNDS_:        return LowerFLT_ROUNDS_(Op, DAG);
28619  case ISD::CTLZ:
28620  case ISD::CTLZ_ZERO_UNDEF:    return LowerCTLZ(Op, Subtarget, DAG);
28621  case ISD::CTTZ:
28622  case ISD::CTTZ_ZERO_UNDEF:    return LowerCTTZ(Op, Subtarget, DAG);
28623  case ISD::MUL:                return LowerMUL(Op, Subtarget, DAG);
28624  case ISD::MULHS:
28625  case ISD::MULHU:              return LowerMULH(Op, Subtarget, DAG);
28626  case ISD::ROTL:
28627  case ISD::ROTR:               return LowerRotate(Op, Subtarget, DAG);
28628  case ISD::SRA:
28629  case ISD::SRL:
28630  case ISD::SHL:                return LowerShift(Op, Subtarget, DAG);
28631  case ISD::SADDO:
28632  case ISD::UADDO:
28633  case ISD::SSUBO:
28634  case ISD::USUBO:
28635  case ISD::SMULO:
28636  case ISD::UMULO:              return LowerXALUO(Op, DAG);
28637  case ISD::READCYCLECOUNTER:   return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
28638  case ISD::BITCAST:            return LowerBITCAST(Op, Subtarget, DAG);
28639  case ISD::ADDCARRY:
28640  case ISD::SUBCARRY:           return LowerADDSUBCARRY(Op, DAG);
28641  case ISD::ADD:
28642  case ISD::SUB:                return lowerAddSub(Op, DAG, Subtarget);
28643  case ISD::UADDSAT:
28644  case ISD::SADDSAT:
28645  case ISD::USUBSAT:
28646  case ISD::SSUBSAT:            return LowerADDSAT_SUBSAT(Op, DAG, Subtarget);
28647  case ISD::SMAX:
28648  case ISD::SMIN:
28649  case ISD::UMAX:
28650  case ISD::UMIN:               return LowerMINMAX(Op, DAG);
28651  case ISD::ABS:                return LowerABS(Op, Subtarget, DAG);
28652  case ISD::FSINCOS:            return LowerFSINCOS(Op, Subtarget, DAG);
28653  case ISD::MLOAD:              return LowerMLOAD(Op, Subtarget, DAG);
28654  case ISD::MSTORE:             return LowerMSTORE(Op, Subtarget, DAG);
28655  case ISD::MGATHER:            return LowerMGATHER(Op, Subtarget, DAG);
28656  case ISD::MSCATTER:           return LowerMSCATTER(Op, Subtarget, DAG);
28657  case ISD::GC_TRANSITION_START:
28658  case ISD::GC_TRANSITION_END:  return LowerGC_TRANSITION(Op, DAG);
28659  case ISD::ADDRSPACECAST:
28660    return LowerADDRSPACECAST(Op, DAG);
28661  }
28662}
28663
28664/// Places new result values for the node in Results (their number
28665/// and types must exactly match those of the original return values of
28666/// the node), or leaves Results empty, which indicates that the node is not
28667/// to be custom lowered after all.
28668void X86TargetLowering::LowerOperationWrapper(SDNode *N,
28669                                              SmallVectorImpl<SDValue> &Results,
28670                                              SelectionDAG &DAG) const {
28671  SDValue Res = LowerOperation(SDValue(N, 0), DAG);
28672
28673  if (!Res.getNode())
28674    return;
28675
28676  // If the original node has one result, take the return value from
28677  // LowerOperation as is. It might not be result number 0.
28678  if (N->getNumValues() == 1) {
28679    Results.push_back(Res);
28680    return;
28681  }
28682
28683  // If the original node has multiple results, then the return node should
28684  // have the same number of results.
28685  assert((N->getNumValues() == Res->getNumValues()) &&
28686      "Lowering returned the wrong number of results!");
28687
28688  // Places new result values base on N result number.
28689  for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
28690    Results.push_back(Res.getValue(I));
28691}
28692
28693/// Replace a node with an illegal result type with a new node built out of
28694/// custom code.
28695void X86TargetLowering::ReplaceNodeResults(SDNode *N,
28696                                           SmallVectorImpl<SDValue>&Results,
28697                                           SelectionDAG &DAG) const {
28698  SDLoc dl(N);
28699  switch (N->getOpcode()) {
28700  default:
28701#ifndef NDEBUG
28702    dbgs() << "ReplaceNodeResults: ";
28703    N->dump(&DAG);
28704#endif
28705    llvm_unreachable("Do not know how to custom type legalize this operation!");
28706  case ISD::CTPOP: {
28707    assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
28708    // Use a v2i64 if possible.
28709    bool NoImplicitFloatOps =
28710        DAG.getMachineFunction().getFunction().hasFnAttribute(
28711            Attribute::NoImplicitFloat);
28712    if (isTypeLegal(MVT::v2i64) && !NoImplicitFloatOps) {
28713      SDValue Wide =
28714          DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, N->getOperand(0));
28715      Wide = DAG.getNode(ISD::CTPOP, dl, MVT::v2i64, Wide);
28716      // Bit count should fit in 32-bits, extract it as that and then zero
28717      // extend to i64. Otherwise we end up extracting bits 63:32 separately.
28718      Wide = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Wide);
28719      Wide = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, Wide,
28720                         DAG.getIntPtrConstant(0, dl));
28721      Wide = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Wide);
28722      Results.push_back(Wide);
28723    }
28724    return;
28725  }
28726  case ISD::MUL: {
28727    EVT VT = N->getValueType(0);
28728    assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
28729           VT.getVectorElementType() == MVT::i8 && "Unexpected VT!");
28730    // Pre-promote these to vXi16 to avoid op legalization thinking all 16
28731    // elements are needed.
28732    MVT MulVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
28733    SDValue Op0 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(0));
28734    SDValue Op1 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(1));
28735    SDValue Res = DAG.getNode(ISD::MUL, dl, MulVT, Op0, Op1);
28736    Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
28737    unsigned NumConcats = 16 / VT.getVectorNumElements();
28738    SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
28739    ConcatOps[0] = Res;
28740    Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i8, ConcatOps);
28741    Results.push_back(Res);
28742    return;
28743  }
28744  case X86ISD::VPMADDWD:
28745  case X86ISD::AVG: {
28746    // Legalize types for X86ISD::AVG/VPMADDWD by widening.
28747    assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
28748
28749    EVT VT = N->getValueType(0);
28750    EVT InVT = N->getOperand(0).getValueType();
28751    assert(VT.getSizeInBits() < 128 && 128 % VT.getSizeInBits() == 0 &&
28752           "Expected a VT that divides into 128 bits.");
28753    assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
28754           "Unexpected type action!");
28755    unsigned NumConcat = 128 / InVT.getSizeInBits();
28756
28757    EVT InWideVT = EVT::getVectorVT(*DAG.getContext(),
28758                                    InVT.getVectorElementType(),
28759                                    NumConcat * InVT.getVectorNumElements());
28760    EVT WideVT = EVT::getVectorVT(*DAG.getContext(),
28761                                  VT.getVectorElementType(),
28762                                  NumConcat * VT.getVectorNumElements());
28763
28764    SmallVector<SDValue, 16> Ops(NumConcat, DAG.getUNDEF(InVT));
28765    Ops[0] = N->getOperand(0);
28766    SDValue InVec0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
28767    Ops[0] = N->getOperand(1);
28768    SDValue InVec1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
28769
28770    SDValue Res = DAG.getNode(N->getOpcode(), dl, WideVT, InVec0, InVec1);
28771    Results.push_back(Res);
28772    return;
28773  }
28774  case ISD::ABS: {
28775    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28776    assert(N->getValueType(0) == MVT::i64 &&
28777           "Unexpected type (!= i64) on ABS.");
28778    MVT HalfT = MVT::i32;
28779    SDValue Lo, Hi, Tmp;
28780    SDVTList VTList = DAG.getVTList(HalfT, MVT::i1);
28781
28782    Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0),
28783                     DAG.getConstant(0, dl, HalfT));
28784    Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0),
28785                     DAG.getConstant(1, dl, HalfT));
28786    Tmp = DAG.getNode(
28787        ISD::SRA, dl, HalfT, Hi,
28788        DAG.getConstant(HalfT.getSizeInBits() - 1, dl,
28789                        TLI.getShiftAmountTy(HalfT, DAG.getDataLayout())));
28790    Lo = DAG.getNode(ISD::UADDO, dl, VTList, Tmp, Lo);
28791    Hi = DAG.getNode(ISD::ADDCARRY, dl, VTList, Tmp, Hi,
28792                     SDValue(Lo.getNode(), 1));
28793    Hi = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Hi);
28794    Lo = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Lo);
28795    Results.push_back(Lo);
28796    Results.push_back(Hi);
28797    return;
28798  }
28799  // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
28800  case X86ISD::FMINC:
28801  case X86ISD::FMIN:
28802  case X86ISD::FMAXC:
28803  case X86ISD::FMAX: {
28804    EVT VT = N->getValueType(0);
28805    assert(VT == MVT::v2f32 && "Unexpected type (!= v2f32) on FMIN/FMAX.");
28806    SDValue UNDEF = DAG.getUNDEF(VT);
28807    SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
28808                              N->getOperand(0), UNDEF);
28809    SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
28810                              N->getOperand(1), UNDEF);
28811    Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
28812    return;
28813  }
28814  case ISD::SDIV:
28815  case ISD::UDIV:
28816  case ISD::SREM:
28817  case ISD::UREM: {
28818    EVT VT = N->getValueType(0);
28819    if (VT.isVector()) {
28820      assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
28821             "Unexpected type action!");
28822      // If this RHS is a constant splat vector we can widen this and let
28823      // division/remainder by constant optimize it.
28824      // TODO: Can we do something for non-splat?
28825      APInt SplatVal;
28826      if (ISD::isConstantSplatVector(N->getOperand(1).getNode(), SplatVal)) {
28827        unsigned NumConcats = 128 / VT.getSizeInBits();
28828        SmallVector<SDValue, 8> Ops0(NumConcats, DAG.getUNDEF(VT));
28829        Ops0[0] = N->getOperand(0);
28830        EVT ResVT = getTypeToTransformTo(*DAG.getContext(), VT);
28831        SDValue N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Ops0);
28832        SDValue N1 = DAG.getConstant(SplatVal, dl, ResVT);
28833        SDValue Res = DAG.getNode(N->getOpcode(), dl, ResVT, N0, N1);
28834        Results.push_back(Res);
28835      }
28836      return;
28837    }
28838
28839    LLVM_FALLTHROUGH;
28840  }
28841  case ISD::SDIVREM:
28842  case ISD::UDIVREM: {
28843    SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
28844    Results.push_back(V);
28845    return;
28846  }
28847  case ISD::TRUNCATE: {
28848    MVT VT = N->getSimpleValueType(0);
28849    if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector)
28850      return;
28851
28852    // The generic legalizer will try to widen the input type to the same
28853    // number of elements as the widened result type. But this isn't always
28854    // the best thing so do some custom legalization to avoid some cases.
28855    MVT WidenVT = getTypeToTransformTo(*DAG.getContext(), VT).getSimpleVT();
28856    SDValue In = N->getOperand(0);
28857    EVT InVT = In.getValueType();
28858
28859    unsigned InBits = InVT.getSizeInBits();
28860    if (128 % InBits == 0) {
28861      // 128 bit and smaller inputs should avoid truncate all together and
28862      // just use a build_vector that will become a shuffle.
28863      // TODO: Widen and use a shuffle directly?
28864      MVT InEltVT = InVT.getSimpleVT().getVectorElementType();
28865      EVT EltVT = VT.getVectorElementType();
28866      unsigned WidenNumElts = WidenVT.getVectorNumElements();
28867      SmallVector<SDValue, 16> Ops(WidenNumElts, DAG.getUNDEF(EltVT));
28868      // Use the original element count so we don't do more scalar opts than
28869      // necessary.
28870      unsigned MinElts = VT.getVectorNumElements();
28871      for (unsigned i=0; i < MinElts; ++i) {
28872        SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, In,
28873                                  DAG.getIntPtrConstant(i, dl));
28874        Ops[i] = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Val);
28875      }
28876      Results.push_back(DAG.getBuildVector(WidenVT, dl, Ops));
28877      return;
28878    }
28879    // With AVX512 there are some cases that can use a target specific
28880    // truncate node to go from 256/512 to less than 128 with zeros in the
28881    // upper elements of the 128 bit result.
28882    if (Subtarget.hasAVX512() && isTypeLegal(InVT)) {
28883      // We can use VTRUNC directly if for 256 bits with VLX or for any 512.
28884      if ((InBits == 256 && Subtarget.hasVLX()) || InBits == 512) {
28885        Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
28886        return;
28887      }
28888      // There's one case we can widen to 512 bits and use VTRUNC.
28889      if (InVT == MVT::v4i64 && VT == MVT::v4i8 && isTypeLegal(MVT::v8i64)) {
28890        In = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i64, In,
28891                         DAG.getUNDEF(MVT::v4i64));
28892        Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
28893        return;
28894      }
28895    }
28896    if (Subtarget.hasVLX() && InVT == MVT::v8i64 && VT == MVT::v8i8 &&
28897        getTypeAction(*DAG.getContext(), InVT) == TypeSplitVector &&
28898        isTypeLegal(MVT::v4i64)) {
28899      // Input needs to be split and output needs to widened. Let's use two
28900      // VTRUNCs, and shuffle their results together into the wider type.
28901      SDValue Lo, Hi;
28902      std::tie(Lo, Hi) = DAG.SplitVector(In, dl);
28903
28904      Lo = DAG.getNode(X86ISD::VTRUNC, dl, MVT::v16i8, Lo);
28905      Hi = DAG.getNode(X86ISD::VTRUNC, dl, MVT::v16i8, Hi);
28906      SDValue Res = DAG.getVectorShuffle(MVT::v16i8, dl, Lo, Hi,
28907                                         { 0,  1,  2,  3, 16, 17, 18, 19,
28908                                          -1, -1, -1, -1, -1, -1, -1, -1 });
28909      Results.push_back(Res);
28910      return;
28911    }
28912
28913    return;
28914  }
28915  case ISD::ANY_EXTEND:
28916    // Right now, only MVT::v8i8 has Custom action for an illegal type.
28917    // It's intended to custom handle the input type.
28918    assert(N->getValueType(0) == MVT::v8i8 &&
28919           "Do not know how to legalize this Node");
28920    return;
28921  case ISD::SIGN_EXTEND:
28922  case ISD::ZERO_EXTEND: {
28923    EVT VT = N->getValueType(0);
28924    SDValue In = N->getOperand(0);
28925    EVT InVT = In.getValueType();
28926    if (!Subtarget.hasSSE41() && VT == MVT::v4i64 &&
28927        (InVT == MVT::v4i16 || InVT == MVT::v4i8)){
28928      assert(getTypeAction(*DAG.getContext(), InVT) == TypeWidenVector &&
28929             "Unexpected type action!");
28930      assert(N->getOpcode() == ISD::SIGN_EXTEND && "Unexpected opcode");
28931      // Custom split this so we can extend i8/i16->i32 invec. This is better
28932      // since sign_extend_inreg i8/i16->i64 requires an extend to i32 using
28933      // sra. Then extending from i32 to i64 using pcmpgt. By custom splitting
28934      // we allow the sra from the extend to i32 to be shared by the split.
28935      In = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, In);
28936
28937      // Fill a vector with sign bits for each element.
28938      SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
28939      SDValue SignBits = DAG.getSetCC(dl, MVT::v4i32, Zero, In, ISD::SETGT);
28940
28941      // Create an unpackl and unpackh to interleave the sign bits then bitcast
28942      // to v2i64.
28943      SDValue Lo = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
28944                                        {0, 4, 1, 5});
28945      Lo = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Lo);
28946      SDValue Hi = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
28947                                        {2, 6, 3, 7});
28948      Hi = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Hi);
28949
28950      SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
28951      Results.push_back(Res);
28952      return;
28953    }
28954
28955    if (VT == MVT::v16i32 || VT == MVT::v8i64) {
28956      if (!InVT.is128BitVector()) {
28957        // Not a 128 bit vector, but maybe type legalization will promote
28958        // it to 128 bits.
28959        if (getTypeAction(*DAG.getContext(), InVT) != TypePromoteInteger)
28960          return;
28961        InVT = getTypeToTransformTo(*DAG.getContext(), InVT);
28962        if (!InVT.is128BitVector())
28963          return;
28964
28965        // Promote the input to 128 bits. Type legalization will turn this into
28966        // zext_inreg/sext_inreg.
28967        In = DAG.getNode(N->getOpcode(), dl, InVT, In);
28968      }
28969
28970      // Perform custom splitting instead of the two stage extend we would get
28971      // by default.
28972      EVT LoVT, HiVT;
28973      std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
28974      assert(isTypeLegal(LoVT) && "Split VT not legal?");
28975
28976      SDValue Lo = getExtendInVec(N->getOpcode(), dl, LoVT, In, DAG);
28977
28978      // We need to shift the input over by half the number of elements.
28979      unsigned NumElts = InVT.getVectorNumElements();
28980      unsigned HalfNumElts = NumElts / 2;
28981      SmallVector<int, 16> ShufMask(NumElts, SM_SentinelUndef);
28982      for (unsigned i = 0; i != HalfNumElts; ++i)
28983        ShufMask[i] = i + HalfNumElts;
28984
28985      SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
28986      Hi = getExtendInVec(N->getOpcode(), dl, HiVT, Hi, DAG);
28987
28988      SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
28989      Results.push_back(Res);
28990    }
28991    return;
28992  }
28993  case ISD::FP_TO_SINT:
28994  case ISD::STRICT_FP_TO_SINT:
28995  case ISD::FP_TO_UINT:
28996  case ISD::STRICT_FP_TO_UINT: {
28997    bool IsStrict = N->isStrictFPOpcode();
28998    bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
28999                    N->getOpcode() == ISD::STRICT_FP_TO_SINT;
29000    EVT VT = N->getValueType(0);
29001    SDValue Src = N->getOperand(IsStrict ? 1 : 0);
29002    EVT SrcVT = Src.getValueType();
29003
29004    if (VT.isVector() && VT.getScalarSizeInBits() < 32) {
29005      assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
29006             "Unexpected type action!");
29007
29008      // Try to create a 128 bit vector, but don't exceed a 32 bit element.
29009      unsigned NewEltWidth = std::min(128 / VT.getVectorNumElements(), 32U);
29010      MVT PromoteVT = MVT::getVectorVT(MVT::getIntegerVT(NewEltWidth),
29011                                       VT.getVectorNumElements());
29012      SDValue Res;
29013      SDValue Chain;
29014      if (IsStrict) {
29015        Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, {PromoteVT, MVT::Other},
29016                          {N->getOperand(0), Src});
29017        Chain = Res.getValue(1);
29018      } else
29019        Res = DAG.getNode(ISD::FP_TO_SINT, dl, PromoteVT, Src);
29020
29021      // Preserve what we know about the size of the original result. Except
29022      // when the result is v2i32 since we can't widen the assert.
29023      if (PromoteVT != MVT::v2i32)
29024        Res = DAG.getNode(!IsSigned ? ISD::AssertZext : ISD::AssertSext,
29025                          dl, PromoteVT, Res,
29026                          DAG.getValueType(VT.getVectorElementType()));
29027
29028      // Truncate back to the original width.
29029      Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
29030
29031      // Now widen to 128 bits.
29032      unsigned NumConcats = 128 / VT.getSizeInBits();
29033      MVT ConcatVT = MVT::getVectorVT(VT.getSimpleVT().getVectorElementType(),
29034                                      VT.getVectorNumElements() * NumConcats);
29035      SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
29036      ConcatOps[0] = Res;
29037      Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatVT, ConcatOps);
29038      Results.push_back(Res);
29039      if (IsStrict)
29040        Results.push_back(Chain);
29041      return;
29042    }
29043
29044
29045    if (VT == MVT::v2i32) {
29046      assert((IsSigned || Subtarget.hasAVX512()) &&
29047             "Can only handle signed conversion without AVX512");
29048      assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
29049      assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
29050             "Unexpected type action!");
29051      if (Src.getValueType() == MVT::v2f64) {
29052        unsigned Opc;
29053        if (IsStrict)
29054          Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
29055        else
29056          Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
29057
29058        // If we have VLX we can emit a target specific FP_TO_UINT node,.
29059        if (!IsSigned && !Subtarget.hasVLX()) {
29060          // Otherwise we can defer to the generic legalizer which will widen
29061          // the input as well. This will be further widened during op
29062          // legalization to v8i32<-v8f64.
29063          // For strict nodes we'll need to widen ourselves.
29064          // FIXME: Fix the type legalizer to safely widen strict nodes?
29065          if (!IsStrict)
29066            return;
29067          Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f64, Src,
29068                            DAG.getConstantFP(0.0, dl, MVT::v2f64));
29069          Opc = N->getOpcode();
29070        }
29071        SDValue Res;
29072        SDValue Chain;
29073        if (IsStrict) {
29074          Res = DAG.getNode(Opc, dl, {MVT::v4i32, MVT::Other},
29075                            {N->getOperand(0), Src});
29076          Chain = Res.getValue(1);
29077        } else {
29078          Res = DAG.getNode(Opc, dl, MVT::v4i32, Src);
29079        }
29080        Results.push_back(Res);
29081        if (IsStrict)
29082          Results.push_back(Chain);
29083        return;
29084      }
29085
29086      // Custom widen strict v2f32->v2i32 by padding with zeros.
29087      // FIXME: Should generic type legalizer do this?
29088      if (Src.getValueType() == MVT::v2f32 && IsStrict) {
29089        Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
29090                          DAG.getConstantFP(0.0, dl, MVT::v2f32));
29091        SDValue Res = DAG.getNode(N->getOpcode(), dl, {MVT::v4i32, MVT::Other},
29092                                  {N->getOperand(0), Src});
29093        Results.push_back(Res);
29094        Results.push_back(Res.getValue(1));
29095        return;
29096      }
29097
29098      // The FP_TO_INTHelper below only handles f32/f64/f80 scalar inputs,
29099      // so early out here.
29100      return;
29101    }
29102
29103    assert(!VT.isVector() && "Vectors should have been handled above!");
29104
29105    if (Subtarget.hasDQI() && VT == MVT::i64 &&
29106        (SrcVT == MVT::f32 || SrcVT == MVT::f64)) {
29107      assert(!Subtarget.is64Bit() && "i64 should be legal");
29108      unsigned NumElts = Subtarget.hasVLX() ? 2 : 8;
29109      // If we use a 128-bit result we might need to use a target specific node.
29110      unsigned SrcElts =
29111          std::max(NumElts, 128U / (unsigned)SrcVT.getSizeInBits());
29112      MVT VecVT = MVT::getVectorVT(MVT::i64, NumElts);
29113      MVT VecInVT = MVT::getVectorVT(SrcVT.getSimpleVT(), SrcElts);
29114      unsigned Opc = N->getOpcode();
29115      if (NumElts != SrcElts) {
29116        if (IsStrict)
29117          Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
29118        else
29119          Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
29120      }
29121
29122      SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
29123      SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecInVT,
29124                                DAG.getConstantFP(0.0, dl, VecInVT), Src,
29125                                ZeroIdx);
29126      SDValue Chain;
29127      if (IsStrict) {
29128        SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
29129        Res = DAG.getNode(Opc, SDLoc(N), Tys, N->getOperand(0), Res);
29130        Chain = Res.getValue(1);
29131      } else
29132        Res = DAG.getNode(Opc, SDLoc(N), VecVT, Res);
29133      Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Res, ZeroIdx);
29134      Results.push_back(Res);
29135      if (IsStrict)
29136        Results.push_back(Chain);
29137      return;
29138    }
29139
29140    SDValue Chain;
29141    if (SDValue V = FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, Chain)) {
29142      Results.push_back(V);
29143      if (IsStrict)
29144        Results.push_back(Chain);
29145    }
29146    return;
29147  }
29148  case ISD::SINT_TO_FP:
29149  case ISD::STRICT_SINT_TO_FP:
29150  case ISD::UINT_TO_FP:
29151  case ISD::STRICT_UINT_TO_FP: {
29152    bool IsStrict = N->isStrictFPOpcode();
29153    bool IsSigned = N->getOpcode() == ISD::SINT_TO_FP ||
29154                    N->getOpcode() == ISD::STRICT_SINT_TO_FP;
29155    EVT VT = N->getValueType(0);
29156    if (VT != MVT::v2f32)
29157      return;
29158    SDValue Src = N->getOperand(IsStrict ? 1 : 0);
29159    EVT SrcVT = Src.getValueType();
29160    if (Subtarget.hasDQI() && Subtarget.hasVLX() && SrcVT == MVT::v2i64) {
29161      if (IsStrict) {
29162        unsigned Opc = IsSigned ? X86ISD::STRICT_CVTSI2P
29163                                : X86ISD::STRICT_CVTUI2P;
29164        SDValue Res = DAG.getNode(Opc, dl, {MVT::v4f32, MVT::Other},
29165                                  {N->getOperand(0), Src});
29166        Results.push_back(Res);
29167        Results.push_back(Res.getValue(1));
29168      } else {
29169        unsigned Opc = IsSigned ? X86ISD::CVTSI2P : X86ISD::CVTUI2P;
29170        Results.push_back(DAG.getNode(Opc, dl, MVT::v4f32, Src));
29171      }
29172      return;
29173    }
29174    if (SrcVT == MVT::v2i64 && !IsSigned && Subtarget.is64Bit() &&
29175        Subtarget.hasSSE41() && !Subtarget.hasAVX512()) {
29176      SDValue Zero = DAG.getConstant(0, dl, SrcVT);
29177      SDValue One  = DAG.getConstant(1, dl, SrcVT);
29178      SDValue Sign = DAG.getNode(ISD::OR, dl, SrcVT,
29179                                 DAG.getNode(ISD::SRL, dl, SrcVT, Src, One),
29180                                 DAG.getNode(ISD::AND, dl, SrcVT, Src, One));
29181      SDValue IsNeg = DAG.getSetCC(dl, MVT::v2i64, Src, Zero, ISD::SETLT);
29182      SDValue SignSrc = DAG.getSelect(dl, SrcVT, IsNeg, Sign, Src);
29183      SmallVector<SDValue, 4> SignCvts(4, DAG.getConstantFP(0.0, dl, MVT::f32));
29184      for (int i = 0; i != 2; ++i) {
29185        SDValue Src = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64,
29186                                  SignSrc, DAG.getIntPtrConstant(i, dl));
29187        if (IsStrict)
29188          SignCvts[i] =
29189              DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {MVT::f32, MVT::Other},
29190                          {N->getOperand(0), Src});
29191        else
29192          SignCvts[i] = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Src);
29193      };
29194      SDValue SignCvt = DAG.getBuildVector(MVT::v4f32, dl, SignCvts);
29195      SDValue Slow, Chain;
29196      if (IsStrict) {
29197        Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
29198                            SignCvts[0].getValue(1), SignCvts[1].getValue(1));
29199        Slow = DAG.getNode(ISD::STRICT_FADD, dl, {MVT::v4f32, MVT::Other},
29200                           {Chain, SignCvt, SignCvt});
29201        Chain = Slow.getValue(1);
29202      } else {
29203        Slow = DAG.getNode(ISD::FADD, dl, MVT::v4f32, SignCvt, SignCvt);
29204      }
29205      IsNeg = DAG.getBitcast(MVT::v4i32, IsNeg);
29206      IsNeg =
29207          DAG.getVectorShuffle(MVT::v4i32, dl, IsNeg, IsNeg, {1, 3, -1, -1});
29208      SDValue Cvt = DAG.getSelect(dl, MVT::v4f32, IsNeg, Slow, SignCvt);
29209      Results.push_back(Cvt);
29210      if (IsStrict)
29211        Results.push_back(Chain);
29212      return;
29213    }
29214
29215    if (SrcVT != MVT::v2i32)
29216      return;
29217
29218    if (IsSigned || Subtarget.hasAVX512()) {
29219      if (!IsStrict)
29220        return;
29221
29222      // Custom widen strict v2i32->v2f32 to avoid scalarization.
29223      // FIXME: Should generic type legalizer do this?
29224      Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
29225                        DAG.getConstant(0, dl, MVT::v2i32));
29226      SDValue Res = DAG.getNode(N->getOpcode(), dl, {MVT::v4f32, MVT::Other},
29227                                {N->getOperand(0), Src});
29228      Results.push_back(Res);
29229      Results.push_back(Res.getValue(1));
29230      return;
29231    }
29232
29233    assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
29234    SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64, Src);
29235    SDValue VBias =
29236        DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl, MVT::v2f64);
29237    SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
29238                             DAG.getBitcast(MVT::v2i64, VBias));
29239    Or = DAG.getBitcast(MVT::v2f64, Or);
29240    if (IsStrict) {
29241      SDValue Sub = DAG.getNode(ISD::STRICT_FSUB, dl, {MVT::v2f64, MVT::Other},
29242                                {N->getOperand(0), Or, VBias});
29243      SDValue Res = DAG.getNode(X86ISD::STRICT_VFPROUND, dl,
29244                                {MVT::v4f32, MVT::Other},
29245                                {Sub.getValue(1), Sub});
29246      Results.push_back(Res);
29247      Results.push_back(Res.getValue(1));
29248    } else {
29249      // TODO: Are there any fast-math-flags to propagate here?
29250      SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
29251      Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
29252    }
29253    return;
29254  }
29255  case ISD::STRICT_FP_ROUND:
29256  case ISD::FP_ROUND: {
29257    bool IsStrict = N->isStrictFPOpcode();
29258    SDValue Src = N->getOperand(IsStrict ? 1 : 0);
29259    if (!isTypeLegal(Src.getValueType()))
29260      return;
29261    SDValue V;
29262    if (IsStrict)
29263      V = DAG.getNode(X86ISD::STRICT_VFPROUND, dl, {MVT::v4f32, MVT::Other},
29264                      {N->getOperand(0), N->getOperand(1)});
29265    else
29266      V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
29267    Results.push_back(V);
29268    if (IsStrict)
29269      Results.push_back(V.getValue(1));
29270    return;
29271  }
29272  case ISD::FP_EXTEND: {
29273    // Right now, only MVT::v2f32 has OperationAction for FP_EXTEND.
29274    // No other ValueType for FP_EXTEND should reach this point.
29275    assert(N->getValueType(0) == MVT::v2f32 &&
29276           "Do not know how to legalize this Node");
29277    return;
29278  }
29279  case ISD::INTRINSIC_W_CHAIN: {
29280    unsigned IntNo = N->getConstantOperandVal(1);
29281    switch (IntNo) {
29282    default : llvm_unreachable("Do not know how to custom type "
29283                               "legalize this intrinsic operation!");
29284    case Intrinsic::x86_rdtsc:
29285      return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget,
29286                                     Results);
29287    case Intrinsic::x86_rdtscp:
29288      return getReadTimeStampCounter(N, dl, X86::RDTSCP, DAG, Subtarget,
29289                                     Results);
29290    case Intrinsic::x86_rdpmc:
29291      expandIntrinsicWChainHelper(N, dl, DAG, X86::RDPMC, X86::ECX, Subtarget,
29292                                  Results);
29293      return;
29294    case Intrinsic::x86_xgetbv:
29295      expandIntrinsicWChainHelper(N, dl, DAG, X86::XGETBV, X86::ECX, Subtarget,
29296                                  Results);
29297      return;
29298    }
29299  }
29300  case ISD::READCYCLECOUNTER: {
29301    return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget, Results);
29302  }
29303  case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
29304    EVT T = N->getValueType(0);
29305    assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
29306    bool Regs64bit = T == MVT::i128;
29307    assert((!Regs64bit || Subtarget.hasCmpxchg16b()) &&
29308           "64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS requires CMPXCHG16B");
29309    MVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
29310    SDValue cpInL, cpInH;
29311    cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
29312                        DAG.getConstant(0, dl, HalfT));
29313    cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
29314                        DAG.getConstant(1, dl, HalfT));
29315    cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
29316                             Regs64bit ? X86::RAX : X86::EAX,
29317                             cpInL, SDValue());
29318    cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
29319                             Regs64bit ? X86::RDX : X86::EDX,
29320                             cpInH, cpInL.getValue(1));
29321    SDValue swapInL, swapInH;
29322    swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
29323                          DAG.getConstant(0, dl, HalfT));
29324    swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
29325                          DAG.getConstant(1, dl, HalfT));
29326    swapInH =
29327        DAG.getCopyToReg(cpInH.getValue(0), dl, Regs64bit ? X86::RCX : X86::ECX,
29328                         swapInH, cpInH.getValue(1));
29329    // If the current function needs the base pointer, RBX,
29330    // we shouldn't use cmpxchg directly.
29331    // Indeed the lowering of that instruction will clobber
29332    // that register and since RBX will be a reserved register
29333    // the register allocator will not make sure its value will
29334    // be properly saved and restored around this live-range.
29335    const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
29336    SDValue Result;
29337    SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
29338    Register BasePtr = TRI->getBaseRegister();
29339    MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
29340    if (TRI->hasBasePointer(DAG.getMachineFunction()) &&
29341        (BasePtr == X86::RBX || BasePtr == X86::EBX)) {
29342      // ISel prefers the LCMPXCHG64 variant.
29343      // If that assert breaks, that means it is not the case anymore,
29344      // and we need to teach LCMPXCHG8_SAVE_EBX_DAG how to save RBX,
29345      // not just EBX. This is a matter of accepting i64 input for that
29346      // pseudo, and restoring into the register of the right wide
29347      // in expand pseudo. Everything else should just work.
29348      assert(((Regs64bit == (BasePtr == X86::RBX)) || BasePtr == X86::EBX) &&
29349             "Saving only half of the RBX");
29350      unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_SAVE_RBX_DAG
29351                                  : X86ISD::LCMPXCHG8_SAVE_EBX_DAG;
29352      SDValue RBXSave = DAG.getCopyFromReg(swapInH.getValue(0), dl,
29353                                           Regs64bit ? X86::RBX : X86::EBX,
29354                                           HalfT, swapInH.getValue(1));
29355      SDValue Ops[] = {/*Chain*/ RBXSave.getValue(1), N->getOperand(1), swapInL,
29356                       RBXSave,
29357                       /*Glue*/ RBXSave.getValue(2)};
29358      Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
29359    } else {
29360      unsigned Opcode =
29361          Regs64bit ? X86ISD::LCMPXCHG16_DAG : X86ISD::LCMPXCHG8_DAG;
29362      swapInL = DAG.getCopyToReg(swapInH.getValue(0), dl,
29363                                 Regs64bit ? X86::RBX : X86::EBX, swapInL,
29364                                 swapInH.getValue(1));
29365      SDValue Ops[] = {swapInL.getValue(0), N->getOperand(1),
29366                       swapInL.getValue(1)};
29367      Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
29368    }
29369    SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
29370                                        Regs64bit ? X86::RAX : X86::EAX,
29371                                        HalfT, Result.getValue(1));
29372    SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
29373                                        Regs64bit ? X86::RDX : X86::EDX,
29374                                        HalfT, cpOutL.getValue(2));
29375    SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
29376
29377    SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
29378                                        MVT::i32, cpOutH.getValue(2));
29379    SDValue Success = getSETCC(X86::COND_E, EFLAGS, dl, DAG);
29380    Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
29381
29382    Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
29383    Results.push_back(Success);
29384    Results.push_back(EFLAGS.getValue(1));
29385    return;
29386  }
29387  case ISD::ATOMIC_LOAD: {
29388    assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
29389    bool NoImplicitFloatOps =
29390        DAG.getMachineFunction().getFunction().hasFnAttribute(
29391            Attribute::NoImplicitFloat);
29392    if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps) {
29393      auto *Node = cast<AtomicSDNode>(N);
29394      if (Subtarget.hasSSE2()) {
29395        // Use a VZEXT_LOAD which will be selected as MOVQ. Then extract the
29396        // lower 64-bits.
29397        SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
29398        SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
29399        SDValue Ld = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
29400                                             MVT::i64, Node->getMemOperand());
29401        SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Ld,
29402                                  DAG.getIntPtrConstant(0, dl));
29403        Results.push_back(Res);
29404        Results.push_back(Ld.getValue(1));
29405        return;
29406      }
29407      if (Subtarget.hasX87()) {
29408        // First load this into an 80-bit X87 register. This will put the whole
29409        // integer into the significand.
29410        // FIXME: Do we need to glue? See FIXME comment in BuildFILD.
29411        SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other, MVT::Glue);
29412        SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
29413        SDValue Result = DAG.getMemIntrinsicNode(X86ISD::FILD_FLAG,
29414                                                 dl, Tys, Ops, MVT::i64,
29415                                                 Node->getMemOperand());
29416        SDValue Chain = Result.getValue(1);
29417        SDValue InFlag = Result.getValue(2);
29418
29419        // Now store the X87 register to a stack temporary and convert to i64.
29420        // This store is not atomic and doesn't need to be.
29421        // FIXME: We don't need a stack temporary if the result of the load
29422        // is already being stored. We could just directly store there.
29423        SDValue StackPtr = DAG.CreateStackTemporary(MVT::i64);
29424        int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
29425        MachinePointerInfo MPI =
29426            MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
29427        SDValue StoreOps[] = { Chain, Result, StackPtr, InFlag };
29428        Chain = DAG.getMemIntrinsicNode(X86ISD::FIST, dl,
29429                                        DAG.getVTList(MVT::Other), StoreOps,
29430                                        MVT::i64, MPI, 0 /*Align*/,
29431                                        MachineMemOperand::MOStore);
29432
29433        // Finally load the value back from the stack temporary and return it.
29434        // This load is not atomic and doesn't need to be.
29435        // This load will be further type legalized.
29436        Result = DAG.getLoad(MVT::i64, dl, Chain, StackPtr, MPI);
29437        Results.push_back(Result);
29438        Results.push_back(Result.getValue(1));
29439        return;
29440      }
29441    }
29442    // TODO: Use MOVLPS when SSE1 is available?
29443    // Delegate to generic TypeLegalization. Situations we can really handle
29444    // should have already been dealt with by AtomicExpandPass.cpp.
29445    break;
29446  }
29447  case ISD::ATOMIC_SWAP:
29448  case ISD::ATOMIC_LOAD_ADD:
29449  case ISD::ATOMIC_LOAD_SUB:
29450  case ISD::ATOMIC_LOAD_AND:
29451  case ISD::ATOMIC_LOAD_OR:
29452  case ISD::ATOMIC_LOAD_XOR:
29453  case ISD::ATOMIC_LOAD_NAND:
29454  case ISD::ATOMIC_LOAD_MIN:
29455  case ISD::ATOMIC_LOAD_MAX:
29456  case ISD::ATOMIC_LOAD_UMIN:
29457  case ISD::ATOMIC_LOAD_UMAX:
29458    // Delegate to generic TypeLegalization. Situations we can really handle
29459    // should have already been dealt with by AtomicExpandPass.cpp.
29460    break;
29461
29462  case ISD::BITCAST: {
29463    assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
29464    EVT DstVT = N->getValueType(0);
29465    EVT SrcVT = N->getOperand(0).getValueType();
29466
29467    // If this is a bitcast from a v64i1 k-register to a i64 on a 32-bit target
29468    // we can split using the k-register rather than memory.
29469    if (SrcVT == MVT::v64i1 && DstVT == MVT::i64 && Subtarget.hasBWI()) {
29470      assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
29471      SDValue Lo, Hi;
29472      std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
29473      Lo = DAG.getBitcast(MVT::i32, Lo);
29474      Hi = DAG.getBitcast(MVT::i32, Hi);
29475      SDValue Res = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
29476      Results.push_back(Res);
29477      return;
29478    }
29479
29480    // Custom splitting for BWI types when AVX512F is available but BWI isn't.
29481    if ((DstVT == MVT::v32i16 || DstVT == MVT::v64i8) &&
29482        SrcVT.isVector() && isTypeLegal(SrcVT)) {
29483      SDValue Lo, Hi;
29484      std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
29485      MVT CastVT = (DstVT == MVT::v32i16) ? MVT::v16i16 : MVT::v32i8;
29486      Lo = DAG.getBitcast(CastVT, Lo);
29487      Hi = DAG.getBitcast(CastVT, Hi);
29488      SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, DstVT, Lo, Hi);
29489      Results.push_back(Res);
29490      return;
29491    }
29492
29493    if (DstVT.isVector() && SrcVT == MVT::x86mmx) {
29494      assert(getTypeAction(*DAG.getContext(), DstVT) == TypeWidenVector &&
29495             "Unexpected type action!");
29496      EVT WideVT = getTypeToTransformTo(*DAG.getContext(), DstVT);
29497      SDValue Res = DAG.getNode(X86ISD::MOVQ2DQ, dl, WideVT, N->getOperand(0));
29498      Results.push_back(Res);
29499      return;
29500    }
29501
29502    return;
29503  }
29504  case ISD::MGATHER: {
29505    EVT VT = N->getValueType(0);
29506    if ((VT == MVT::v2f32 || VT == MVT::v2i32) &&
29507        (Subtarget.hasVLX() || !Subtarget.hasAVX512())) {
29508      auto *Gather = cast<MaskedGatherSDNode>(N);
29509      SDValue Index = Gather->getIndex();
29510      if (Index.getValueType() != MVT::v2i64)
29511        return;
29512      assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
29513             "Unexpected type action!");
29514      EVT WideVT = getTypeToTransformTo(*DAG.getContext(), VT);
29515      SDValue Mask = Gather->getMask();
29516      assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
29517      SDValue PassThru = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT,
29518                                     Gather->getPassThru(),
29519                                     DAG.getUNDEF(VT));
29520      if (!Subtarget.hasVLX()) {
29521        // We need to widen the mask, but the instruction will only use 2
29522        // of its elements. So we can use undef.
29523        Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask,
29524                           DAG.getUNDEF(MVT::v2i1));
29525        Mask = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Mask);
29526      }
29527      SDValue Ops[] = { Gather->getChain(), PassThru, Mask,
29528                        Gather->getBasePtr(), Index, Gather->getScale() };
29529      SDValue Res = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
29530        DAG.getVTList(WideVT, Mask.getValueType(), MVT::Other), Ops, dl,
29531        Gather->getMemoryVT(), Gather->getMemOperand());
29532      Results.push_back(Res);
29533      Results.push_back(Res.getValue(2));
29534      return;
29535    }
29536    return;
29537  }
29538  case ISD::LOAD: {
29539    // Use an f64/i64 load and a scalar_to_vector for v2f32/v2i32 loads. This
29540    // avoids scalarizing in 32-bit mode. In 64-bit mode this avoids a int->fp
29541    // cast since type legalization will try to use an i64 load.
29542    MVT VT = N->getSimpleValueType(0);
29543    assert(VT.isVector() && VT.getSizeInBits() == 64 && "Unexpected VT");
29544    assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
29545           "Unexpected type action!");
29546    if (!ISD::isNON_EXTLoad(N))
29547      return;
29548    auto *Ld = cast<LoadSDNode>(N);
29549    if (Subtarget.hasSSE2()) {
29550      MVT LdVT = Subtarget.is64Bit() && VT.isInteger() ? MVT::i64 : MVT::f64;
29551      SDValue Res = DAG.getLoad(LdVT, dl, Ld->getChain(), Ld->getBasePtr(),
29552                                Ld->getPointerInfo(), Ld->getAlignment(),
29553                                Ld->getMemOperand()->getFlags());
29554      SDValue Chain = Res.getValue(1);
29555      MVT VecVT = MVT::getVectorVT(LdVT, 2);
29556      Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Res);
29557      EVT WideVT = getTypeToTransformTo(*DAG.getContext(), VT);
29558      Res = DAG.getBitcast(WideVT, Res);
29559      Results.push_back(Res);
29560      Results.push_back(Chain);
29561      return;
29562    }
29563    assert(Subtarget.hasSSE1() && "Expected SSE");
29564    SDVTList Tys = DAG.getVTList(MVT::v4f32, MVT::Other);
29565    SDValue Ops[] = {Ld->getChain(), Ld->getBasePtr()};
29566    SDValue Res = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
29567                                          MVT::i64, Ld->getMemOperand());
29568    Results.push_back(Res);
29569    Results.push_back(Res.getValue(1));
29570    return;
29571  }
29572  case ISD::ADDRSPACECAST: {
29573    SDValue Src = N->getOperand(0);
29574    EVT DstVT = N->getValueType(0);
29575    AddrSpaceCastSDNode *CastN = cast<AddrSpaceCastSDNode>(N);
29576    unsigned SrcAS = CastN->getSrcAddressSpace();
29577
29578    assert(SrcAS != CastN->getDestAddressSpace() &&
29579           "addrspacecast must be between different address spaces");
29580
29581    SDValue Res;
29582    if (SrcAS == X86AS::PTR32_UPTR && DstVT == MVT::i64)
29583      Res = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Src);
29584    else if (DstVT == MVT::i64)
29585      Res = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Src);
29586    else if (DstVT == MVT::i32)
29587      Res = DAG.getNode(ISD::TRUNCATE, dl, DstVT, Src);
29588    else
29589      report_fatal_error("Unrecognized addrspacecast type legalization");
29590
29591    Results.push_back(Res);
29592    return;
29593  }
29594  }
29595}
29596
29597const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
29598  switch ((X86ISD::NodeType)Opcode) {
29599  case X86ISD::FIRST_NUMBER:       break;
29600  case X86ISD::BSF:                return "X86ISD::BSF";
29601  case X86ISD::BSR:                return "X86ISD::BSR";
29602  case X86ISD::SHLD:               return "X86ISD::SHLD";
29603  case X86ISD::SHRD:               return "X86ISD::SHRD";
29604  case X86ISD::FAND:               return "X86ISD::FAND";
29605  case X86ISD::FANDN:              return "X86ISD::FANDN";
29606  case X86ISD::FOR:                return "X86ISD::FOR";
29607  case X86ISD::FXOR:               return "X86ISD::FXOR";
29608  case X86ISD::FILD:               return "X86ISD::FILD";
29609  case X86ISD::FILD_FLAG:          return "X86ISD::FILD_FLAG";
29610  case X86ISD::FIST:               return "X86ISD::FIST";
29611  case X86ISD::FP_TO_INT_IN_MEM:   return "X86ISD::FP_TO_INT_IN_MEM";
29612  case X86ISD::FLD:                return "X86ISD::FLD";
29613  case X86ISD::FST:                return "X86ISD::FST";
29614  case X86ISD::CALL:               return "X86ISD::CALL";
29615  case X86ISD::BT:                 return "X86ISD::BT";
29616  case X86ISD::CMP:                return "X86ISD::CMP";
29617  case X86ISD::STRICT_FCMP:        return "X86ISD::STRICT_FCMP";
29618  case X86ISD::STRICT_FCMPS:       return "X86ISD::STRICT_FCMPS";
29619  case X86ISD::COMI:               return "X86ISD::COMI";
29620  case X86ISD::UCOMI:              return "X86ISD::UCOMI";
29621  case X86ISD::CMPM:               return "X86ISD::CMPM";
29622  case X86ISD::STRICT_CMPM:        return "X86ISD::STRICT_CMPM";
29623  case X86ISD::CMPM_SAE:           return "X86ISD::CMPM_SAE";
29624  case X86ISD::SETCC:              return "X86ISD::SETCC";
29625  case X86ISD::SETCC_CARRY:        return "X86ISD::SETCC_CARRY";
29626  case X86ISD::FSETCC:             return "X86ISD::FSETCC";
29627  case X86ISD::FSETCCM:            return "X86ISD::FSETCCM";
29628  case X86ISD::FSETCCM_SAE:        return "X86ISD::FSETCCM_SAE";
29629  case X86ISD::CMOV:               return "X86ISD::CMOV";
29630  case X86ISD::BRCOND:             return "X86ISD::BRCOND";
29631  case X86ISD::RET_FLAG:           return "X86ISD::RET_FLAG";
29632  case X86ISD::IRET:               return "X86ISD::IRET";
29633  case X86ISD::REP_STOS:           return "X86ISD::REP_STOS";
29634  case X86ISD::REP_MOVS:           return "X86ISD::REP_MOVS";
29635  case X86ISD::GlobalBaseReg:      return "X86ISD::GlobalBaseReg";
29636  case X86ISD::Wrapper:            return "X86ISD::Wrapper";
29637  case X86ISD::WrapperRIP:         return "X86ISD::WrapperRIP";
29638  case X86ISD::MOVQ2DQ:            return "X86ISD::MOVQ2DQ";
29639  case X86ISD::MOVDQ2Q:            return "X86ISD::MOVDQ2Q";
29640  case X86ISD::MMX_MOVD2W:         return "X86ISD::MMX_MOVD2W";
29641  case X86ISD::MMX_MOVW2D:         return "X86ISD::MMX_MOVW2D";
29642  case X86ISD::PEXTRB:             return "X86ISD::PEXTRB";
29643  case X86ISD::PEXTRW:             return "X86ISD::PEXTRW";
29644  case X86ISD::INSERTPS:           return "X86ISD::INSERTPS";
29645  case X86ISD::PINSRB:             return "X86ISD::PINSRB";
29646  case X86ISD::PINSRW:             return "X86ISD::PINSRW";
29647  case X86ISD::PSHUFB:             return "X86ISD::PSHUFB";
29648  case X86ISD::ANDNP:              return "X86ISD::ANDNP";
29649  case X86ISD::BLENDI:             return "X86ISD::BLENDI";
29650  case X86ISD::BLENDV:             return "X86ISD::BLENDV";
29651  case X86ISD::HADD:               return "X86ISD::HADD";
29652  case X86ISD::HSUB:               return "X86ISD::HSUB";
29653  case X86ISD::FHADD:              return "X86ISD::FHADD";
29654  case X86ISD::FHSUB:              return "X86ISD::FHSUB";
29655  case X86ISD::CONFLICT:           return "X86ISD::CONFLICT";
29656  case X86ISD::FMAX:               return "X86ISD::FMAX";
29657  case X86ISD::FMAXS:              return "X86ISD::FMAXS";
29658  case X86ISD::FMAX_SAE:           return "X86ISD::FMAX_SAE";
29659  case X86ISD::FMAXS_SAE:          return "X86ISD::FMAXS_SAE";
29660  case X86ISD::FMIN:               return "X86ISD::FMIN";
29661  case X86ISD::FMINS:              return "X86ISD::FMINS";
29662  case X86ISD::FMIN_SAE:           return "X86ISD::FMIN_SAE";
29663  case X86ISD::FMINS_SAE:          return "X86ISD::FMINS_SAE";
29664  case X86ISD::FMAXC:              return "X86ISD::FMAXC";
29665  case X86ISD::FMINC:              return "X86ISD::FMINC";
29666  case X86ISD::FRSQRT:             return "X86ISD::FRSQRT";
29667  case X86ISD::FRCP:               return "X86ISD::FRCP";
29668  case X86ISD::EXTRQI:             return "X86ISD::EXTRQI";
29669  case X86ISD::INSERTQI:           return "X86ISD::INSERTQI";
29670  case X86ISD::TLSADDR:            return "X86ISD::TLSADDR";
29671  case X86ISD::TLSBASEADDR:        return "X86ISD::TLSBASEADDR";
29672  case X86ISD::TLSCALL:            return "X86ISD::TLSCALL";
29673  case X86ISD::EH_SJLJ_SETJMP:     return "X86ISD::EH_SJLJ_SETJMP";
29674  case X86ISD::EH_SJLJ_LONGJMP:    return "X86ISD::EH_SJLJ_LONGJMP";
29675  case X86ISD::EH_SJLJ_SETUP_DISPATCH:
29676    return "X86ISD::EH_SJLJ_SETUP_DISPATCH";
29677  case X86ISD::EH_RETURN:          return "X86ISD::EH_RETURN";
29678  case X86ISD::TC_RETURN:          return "X86ISD::TC_RETURN";
29679  case X86ISD::FNSTCW16m:          return "X86ISD::FNSTCW16m";
29680  case X86ISD::FNSTSW16r:          return "X86ISD::FNSTSW16r";
29681  case X86ISD::LCMPXCHG_DAG:       return "X86ISD::LCMPXCHG_DAG";
29682  case X86ISD::LCMPXCHG8_DAG:      return "X86ISD::LCMPXCHG8_DAG";
29683  case X86ISD::LCMPXCHG16_DAG:     return "X86ISD::LCMPXCHG16_DAG";
29684  case X86ISD::LCMPXCHG8_SAVE_EBX_DAG:
29685    return "X86ISD::LCMPXCHG8_SAVE_EBX_DAG";
29686  case X86ISD::LCMPXCHG16_SAVE_RBX_DAG:
29687    return "X86ISD::LCMPXCHG16_SAVE_RBX_DAG";
29688  case X86ISD::LADD:               return "X86ISD::LADD";
29689  case X86ISD::LSUB:               return "X86ISD::LSUB";
29690  case X86ISD::LOR:                return "X86ISD::LOR";
29691  case X86ISD::LXOR:               return "X86ISD::LXOR";
29692  case X86ISD::LAND:               return "X86ISD::LAND";
29693  case X86ISD::VZEXT_MOVL:         return "X86ISD::VZEXT_MOVL";
29694  case X86ISD::VZEXT_LOAD:         return "X86ISD::VZEXT_LOAD";
29695  case X86ISD::VEXTRACT_STORE:     return "X86ISD::VEXTRACT_STORE";
29696  case X86ISD::VTRUNC:             return "X86ISD::VTRUNC";
29697  case X86ISD::VTRUNCS:            return "X86ISD::VTRUNCS";
29698  case X86ISD::VTRUNCUS:           return "X86ISD::VTRUNCUS";
29699  case X86ISD::VMTRUNC:            return "X86ISD::VMTRUNC";
29700  case X86ISD::VMTRUNCS:           return "X86ISD::VMTRUNCS";
29701  case X86ISD::VMTRUNCUS:          return "X86ISD::VMTRUNCUS";
29702  case X86ISD::VTRUNCSTORES:       return "X86ISD::VTRUNCSTORES";
29703  case X86ISD::VTRUNCSTOREUS:      return "X86ISD::VTRUNCSTOREUS";
29704  case X86ISD::VMTRUNCSTORES:      return "X86ISD::VMTRUNCSTORES";
29705  case X86ISD::VMTRUNCSTOREUS:     return "X86ISD::VMTRUNCSTOREUS";
29706  case X86ISD::VFPEXT:             return "X86ISD::VFPEXT";
29707  case X86ISD::STRICT_VFPEXT:      return "X86ISD::STRICT_VFPEXT";
29708  case X86ISD::VFPEXT_SAE:         return "X86ISD::VFPEXT_SAE";
29709  case X86ISD::VFPEXTS:            return "X86ISD::VFPEXTS";
29710  case X86ISD::VFPEXTS_SAE:        return "X86ISD::VFPEXTS_SAE";
29711  case X86ISD::VFPROUND:           return "X86ISD::VFPROUND";
29712  case X86ISD::STRICT_VFPROUND:    return "X86ISD::STRICT_VFPROUND";
29713  case X86ISD::VMFPROUND:          return "X86ISD::VMFPROUND";
29714  case X86ISD::VFPROUND_RND:       return "X86ISD::VFPROUND_RND";
29715  case X86ISD::VFPROUNDS:          return "X86ISD::VFPROUNDS";
29716  case X86ISD::VFPROUNDS_RND:      return "X86ISD::VFPROUNDS_RND";
29717  case X86ISD::VSHLDQ:             return "X86ISD::VSHLDQ";
29718  case X86ISD::VSRLDQ:             return "X86ISD::VSRLDQ";
29719  case X86ISD::VSHL:               return "X86ISD::VSHL";
29720  case X86ISD::VSRL:               return "X86ISD::VSRL";
29721  case X86ISD::VSRA:               return "X86ISD::VSRA";
29722  case X86ISD::VSHLI:              return "X86ISD::VSHLI";
29723  case X86ISD::VSRLI:              return "X86ISD::VSRLI";
29724  case X86ISD::VSRAI:              return "X86ISD::VSRAI";
29725  case X86ISD::VSHLV:              return "X86ISD::VSHLV";
29726  case X86ISD::VSRLV:              return "X86ISD::VSRLV";
29727  case X86ISD::VSRAV:              return "X86ISD::VSRAV";
29728  case X86ISD::VROTLI:             return "X86ISD::VROTLI";
29729  case X86ISD::VROTRI:             return "X86ISD::VROTRI";
29730  case X86ISD::VPPERM:             return "X86ISD::VPPERM";
29731  case X86ISD::CMPP:               return "X86ISD::CMPP";
29732  case X86ISD::STRICT_CMPP:        return "X86ISD::STRICT_CMPP";
29733  case X86ISD::PCMPEQ:             return "X86ISD::PCMPEQ";
29734  case X86ISD::PCMPGT:             return "X86ISD::PCMPGT";
29735  case X86ISD::PHMINPOS:           return "X86ISD::PHMINPOS";
29736  case X86ISD::ADD:                return "X86ISD::ADD";
29737  case X86ISD::SUB:                return "X86ISD::SUB";
29738  case X86ISD::ADC:                return "X86ISD::ADC";
29739  case X86ISD::SBB:                return "X86ISD::SBB";
29740  case X86ISD::SMUL:               return "X86ISD::SMUL";
29741  case X86ISD::UMUL:               return "X86ISD::UMUL";
29742  case X86ISD::OR:                 return "X86ISD::OR";
29743  case X86ISD::XOR:                return "X86ISD::XOR";
29744  case X86ISD::AND:                return "X86ISD::AND";
29745  case X86ISD::BEXTR:              return "X86ISD::BEXTR";
29746  case X86ISD::BZHI:               return "X86ISD::BZHI";
29747  case X86ISD::MUL_IMM:            return "X86ISD::MUL_IMM";
29748  case X86ISD::MOVMSK:             return "X86ISD::MOVMSK";
29749  case X86ISD::PTEST:              return "X86ISD::PTEST";
29750  case X86ISD::TESTP:              return "X86ISD::TESTP";
29751  case X86ISD::KORTEST:            return "X86ISD::KORTEST";
29752  case X86ISD::KTEST:              return "X86ISD::KTEST";
29753  case X86ISD::KADD:               return "X86ISD::KADD";
29754  case X86ISD::KSHIFTL:            return "X86ISD::KSHIFTL";
29755  case X86ISD::KSHIFTR:            return "X86ISD::KSHIFTR";
29756  case X86ISD::PACKSS:             return "X86ISD::PACKSS";
29757  case X86ISD::PACKUS:             return "X86ISD::PACKUS";
29758  case X86ISD::PALIGNR:            return "X86ISD::PALIGNR";
29759  case X86ISD::VALIGN:             return "X86ISD::VALIGN";
29760  case X86ISD::VSHLD:              return "X86ISD::VSHLD";
29761  case X86ISD::VSHRD:              return "X86ISD::VSHRD";
29762  case X86ISD::VSHLDV:             return "X86ISD::VSHLDV";
29763  case X86ISD::VSHRDV:             return "X86ISD::VSHRDV";
29764  case X86ISD::PSHUFD:             return "X86ISD::PSHUFD";
29765  case X86ISD::PSHUFHW:            return "X86ISD::PSHUFHW";
29766  case X86ISD::PSHUFLW:            return "X86ISD::PSHUFLW";
29767  case X86ISD::SHUFP:              return "X86ISD::SHUFP";
29768  case X86ISD::SHUF128:            return "X86ISD::SHUF128";
29769  case X86ISD::MOVLHPS:            return "X86ISD::MOVLHPS";
29770  case X86ISD::MOVHLPS:            return "X86ISD::MOVHLPS";
29771  case X86ISD::MOVDDUP:            return "X86ISD::MOVDDUP";
29772  case X86ISD::MOVSHDUP:           return "X86ISD::MOVSHDUP";
29773  case X86ISD::MOVSLDUP:           return "X86ISD::MOVSLDUP";
29774  case X86ISD::MOVSD:              return "X86ISD::MOVSD";
29775  case X86ISD::MOVSS:              return "X86ISD::MOVSS";
29776  case X86ISD::UNPCKL:             return "X86ISD::UNPCKL";
29777  case X86ISD::UNPCKH:             return "X86ISD::UNPCKH";
29778  case X86ISD::VBROADCAST:         return "X86ISD::VBROADCAST";
29779  case X86ISD::VBROADCAST_LOAD:    return "X86ISD::VBROADCAST_LOAD";
29780  case X86ISD::VBROADCASTM:        return "X86ISD::VBROADCASTM";
29781  case X86ISD::SUBV_BROADCAST:     return "X86ISD::SUBV_BROADCAST";
29782  case X86ISD::VPERMILPV:          return "X86ISD::VPERMILPV";
29783  case X86ISD::VPERMILPI:          return "X86ISD::VPERMILPI";
29784  case X86ISD::VPERM2X128:         return "X86ISD::VPERM2X128";
29785  case X86ISD::VPERMV:             return "X86ISD::VPERMV";
29786  case X86ISD::VPERMV3:            return "X86ISD::VPERMV3";
29787  case X86ISD::VPERMI:             return "X86ISD::VPERMI";
29788  case X86ISD::VPTERNLOG:          return "X86ISD::VPTERNLOG";
29789  case X86ISD::VFIXUPIMM:          return "X86ISD::VFIXUPIMM";
29790  case X86ISD::VFIXUPIMM_SAE:      return "X86ISD::VFIXUPIMM_SAE";
29791  case X86ISD::VFIXUPIMMS:         return "X86ISD::VFIXUPIMMS";
29792  case X86ISD::VFIXUPIMMS_SAE:     return "X86ISD::VFIXUPIMMS_SAE";
29793  case X86ISD::VRANGE:             return "X86ISD::VRANGE";
29794  case X86ISD::VRANGE_SAE:         return "X86ISD::VRANGE_SAE";
29795  case X86ISD::VRANGES:            return "X86ISD::VRANGES";
29796  case X86ISD::VRANGES_SAE:        return "X86ISD::VRANGES_SAE";
29797  case X86ISD::PMULUDQ:            return "X86ISD::PMULUDQ";
29798  case X86ISD::PMULDQ:             return "X86ISD::PMULDQ";
29799  case X86ISD::PSADBW:             return "X86ISD::PSADBW";
29800  case X86ISD::DBPSADBW:           return "X86ISD::DBPSADBW";
29801  case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
29802  case X86ISD::VAARG_64:           return "X86ISD::VAARG_64";
29803  case X86ISD::WIN_ALLOCA:         return "X86ISD::WIN_ALLOCA";
29804  case X86ISD::MEMBARRIER:         return "X86ISD::MEMBARRIER";
29805  case X86ISD::MFENCE:             return "X86ISD::MFENCE";
29806  case X86ISD::SEG_ALLOCA:         return "X86ISD::SEG_ALLOCA";
29807  case X86ISD::SAHF:               return "X86ISD::SAHF";
29808  case X86ISD::RDRAND:             return "X86ISD::RDRAND";
29809  case X86ISD::RDSEED:             return "X86ISD::RDSEED";
29810  case X86ISD::RDPKRU:             return "X86ISD::RDPKRU";
29811  case X86ISD::WRPKRU:             return "X86ISD::WRPKRU";
29812  case X86ISD::VPMADDUBSW:         return "X86ISD::VPMADDUBSW";
29813  case X86ISD::VPMADDWD:           return "X86ISD::VPMADDWD";
29814  case X86ISD::VPSHA:              return "X86ISD::VPSHA";
29815  case X86ISD::VPSHL:              return "X86ISD::VPSHL";
29816  case X86ISD::VPCOM:              return "X86ISD::VPCOM";
29817  case X86ISD::VPCOMU:             return "X86ISD::VPCOMU";
29818  case X86ISD::VPERMIL2:           return "X86ISD::VPERMIL2";
29819  case X86ISD::FMSUB:              return "X86ISD::FMSUB";
29820  case X86ISD::FNMADD:             return "X86ISD::FNMADD";
29821  case X86ISD::FNMSUB:             return "X86ISD::FNMSUB";
29822  case X86ISD::FMADDSUB:           return "X86ISD::FMADDSUB";
29823  case X86ISD::FMSUBADD:           return "X86ISD::FMSUBADD";
29824  case X86ISD::FMADD_RND:          return "X86ISD::FMADD_RND";
29825  case X86ISD::FNMADD_RND:         return "X86ISD::FNMADD_RND";
29826  case X86ISD::FMSUB_RND:          return "X86ISD::FMSUB_RND";
29827  case X86ISD::FNMSUB_RND:         return "X86ISD::FNMSUB_RND";
29828  case X86ISD::FMADDSUB_RND:       return "X86ISD::FMADDSUB_RND";
29829  case X86ISD::FMSUBADD_RND:       return "X86ISD::FMSUBADD_RND";
29830  case X86ISD::VPMADD52H:          return "X86ISD::VPMADD52H";
29831  case X86ISD::VPMADD52L:          return "X86ISD::VPMADD52L";
29832  case X86ISD::VRNDSCALE:          return "X86ISD::VRNDSCALE";
29833  case X86ISD::STRICT_VRNDSCALE:   return "X86ISD::STRICT_VRNDSCALE";
29834  case X86ISD::VRNDSCALE_SAE:      return "X86ISD::VRNDSCALE_SAE";
29835  case X86ISD::VRNDSCALES:         return "X86ISD::VRNDSCALES";
29836  case X86ISD::VRNDSCALES_SAE:     return "X86ISD::VRNDSCALES_SAE";
29837  case X86ISD::VREDUCE:            return "X86ISD::VREDUCE";
29838  case X86ISD::VREDUCE_SAE:        return "X86ISD::VREDUCE_SAE";
29839  case X86ISD::VREDUCES:           return "X86ISD::VREDUCES";
29840  case X86ISD::VREDUCES_SAE:       return "X86ISD::VREDUCES_SAE";
29841  case X86ISD::VGETMANT:           return "X86ISD::VGETMANT";
29842  case X86ISD::VGETMANT_SAE:       return "X86ISD::VGETMANT_SAE";
29843  case X86ISD::VGETMANTS:          return "X86ISD::VGETMANTS";
29844  case X86ISD::VGETMANTS_SAE:      return "X86ISD::VGETMANTS_SAE";
29845  case X86ISD::PCMPESTR:           return "X86ISD::PCMPESTR";
29846  case X86ISD::PCMPISTR:           return "X86ISD::PCMPISTR";
29847  case X86ISD::XTEST:              return "X86ISD::XTEST";
29848  case X86ISD::COMPRESS:           return "X86ISD::COMPRESS";
29849  case X86ISD::EXPAND:             return "X86ISD::EXPAND";
29850  case X86ISD::SELECTS:            return "X86ISD::SELECTS";
29851  case X86ISD::ADDSUB:             return "X86ISD::ADDSUB";
29852  case X86ISD::RCP14:              return "X86ISD::RCP14";
29853  case X86ISD::RCP14S:             return "X86ISD::RCP14S";
29854  case X86ISD::RCP28:              return "X86ISD::RCP28";
29855  case X86ISD::RCP28_SAE:          return "X86ISD::RCP28_SAE";
29856  case X86ISD::RCP28S:             return "X86ISD::RCP28S";
29857  case X86ISD::RCP28S_SAE:         return "X86ISD::RCP28S_SAE";
29858  case X86ISD::EXP2:               return "X86ISD::EXP2";
29859  case X86ISD::EXP2_SAE:           return "X86ISD::EXP2_SAE";
29860  case X86ISD::RSQRT14:            return "X86ISD::RSQRT14";
29861  case X86ISD::RSQRT14S:           return "X86ISD::RSQRT14S";
29862  case X86ISD::RSQRT28:            return "X86ISD::RSQRT28";
29863  case X86ISD::RSQRT28_SAE:        return "X86ISD::RSQRT28_SAE";
29864  case X86ISD::RSQRT28S:           return "X86ISD::RSQRT28S";
29865  case X86ISD::RSQRT28S_SAE:       return "X86ISD::RSQRT28S_SAE";
29866  case X86ISD::FADD_RND:           return "X86ISD::FADD_RND";
29867  case X86ISD::FADDS:              return "X86ISD::FADDS";
29868  case X86ISD::FADDS_RND:          return "X86ISD::FADDS_RND";
29869  case X86ISD::FSUB_RND:           return "X86ISD::FSUB_RND";
29870  case X86ISD::FSUBS:              return "X86ISD::FSUBS";
29871  case X86ISD::FSUBS_RND:          return "X86ISD::FSUBS_RND";
29872  case X86ISD::FMUL_RND:           return "X86ISD::FMUL_RND";
29873  case X86ISD::FMULS:              return "X86ISD::FMULS";
29874  case X86ISD::FMULS_RND:          return "X86ISD::FMULS_RND";
29875  case X86ISD::FDIV_RND:           return "X86ISD::FDIV_RND";
29876  case X86ISD::FDIVS:              return "X86ISD::FDIVS";
29877  case X86ISD::FDIVS_RND:          return "X86ISD::FDIVS_RND";
29878  case X86ISD::FSQRT_RND:          return "X86ISD::FSQRT_RND";
29879  case X86ISD::FSQRTS:             return "X86ISD::FSQRTS";
29880  case X86ISD::FSQRTS_RND:         return "X86ISD::FSQRTS_RND";
29881  case X86ISD::FGETEXP:            return "X86ISD::FGETEXP";
29882  case X86ISD::FGETEXP_SAE:        return "X86ISD::FGETEXP_SAE";
29883  case X86ISD::FGETEXPS:           return "X86ISD::FGETEXPS";
29884  case X86ISD::FGETEXPS_SAE:       return "X86ISD::FGETEXPS_SAE";
29885  case X86ISD::SCALEF:             return "X86ISD::SCALEF";
29886  case X86ISD::SCALEF_RND:         return "X86ISD::SCALEF_RND";
29887  case X86ISD::SCALEFS:            return "X86ISD::SCALEFS";
29888  case X86ISD::SCALEFS_RND:        return "X86ISD::SCALEFS_RND";
29889  case X86ISD::AVG:                return "X86ISD::AVG";
29890  case X86ISD::MULHRS:             return "X86ISD::MULHRS";
29891  case X86ISD::SINT_TO_FP_RND:     return "X86ISD::SINT_TO_FP_RND";
29892  case X86ISD::UINT_TO_FP_RND:     return "X86ISD::UINT_TO_FP_RND";
29893  case X86ISD::CVTTP2SI:           return "X86ISD::CVTTP2SI";
29894  case X86ISD::CVTTP2UI:           return "X86ISD::CVTTP2UI";
29895  case X86ISD::STRICT_CVTTP2SI:    return "X86ISD::STRICT_CVTTP2SI";
29896  case X86ISD::STRICT_CVTTP2UI:    return "X86ISD::STRICT_CVTTP2UI";
29897  case X86ISD::MCVTTP2SI:          return "X86ISD::MCVTTP2SI";
29898  case X86ISD::MCVTTP2UI:          return "X86ISD::MCVTTP2UI";
29899  case X86ISD::CVTTP2SI_SAE:       return "X86ISD::CVTTP2SI_SAE";
29900  case X86ISD::CVTTP2UI_SAE:       return "X86ISD::CVTTP2UI_SAE";
29901  case X86ISD::CVTTS2SI:           return "X86ISD::CVTTS2SI";
29902  case X86ISD::CVTTS2UI:           return "X86ISD::CVTTS2UI";
29903  case X86ISD::CVTTS2SI_SAE:       return "X86ISD::CVTTS2SI_SAE";
29904  case X86ISD::CVTTS2UI_SAE:       return "X86ISD::CVTTS2UI_SAE";
29905  case X86ISD::CVTSI2P:            return "X86ISD::CVTSI2P";
29906  case X86ISD::CVTUI2P:            return "X86ISD::CVTUI2P";
29907  case X86ISD::STRICT_CVTSI2P:     return "X86ISD::STRICT_CVTSI2P";
29908  case X86ISD::STRICT_CVTUI2P:     return "X86ISD::STRICT_CVTUI2P";
29909  case X86ISD::MCVTSI2P:           return "X86ISD::MCVTSI2P";
29910  case X86ISD::MCVTUI2P:           return "X86ISD::MCVTUI2P";
29911  case X86ISD::VFPCLASS:           return "X86ISD::VFPCLASS";
29912  case X86ISD::VFPCLASSS:          return "X86ISD::VFPCLASSS";
29913  case X86ISD::MULTISHIFT:         return "X86ISD::MULTISHIFT";
29914  case X86ISD::SCALAR_SINT_TO_FP:     return "X86ISD::SCALAR_SINT_TO_FP";
29915  case X86ISD::SCALAR_SINT_TO_FP_RND: return "X86ISD::SCALAR_SINT_TO_FP_RND";
29916  case X86ISD::SCALAR_UINT_TO_FP:     return "X86ISD::SCALAR_UINT_TO_FP";
29917  case X86ISD::SCALAR_UINT_TO_FP_RND: return "X86ISD::SCALAR_UINT_TO_FP_RND";
29918  case X86ISD::CVTPS2PH:           return "X86ISD::CVTPS2PH";
29919  case X86ISD::MCVTPS2PH:          return "X86ISD::MCVTPS2PH";
29920  case X86ISD::CVTPH2PS:           return "X86ISD::CVTPH2PS";
29921  case X86ISD::CVTPH2PS_SAE:       return "X86ISD::CVTPH2PS_SAE";
29922  case X86ISD::CVTP2SI:            return "X86ISD::CVTP2SI";
29923  case X86ISD::CVTP2UI:            return "X86ISD::CVTP2UI";
29924  case X86ISD::MCVTP2SI:           return "X86ISD::MCVTP2SI";
29925  case X86ISD::MCVTP2UI:           return "X86ISD::MCVTP2UI";
29926  case X86ISD::CVTP2SI_RND:        return "X86ISD::CVTP2SI_RND";
29927  case X86ISD::CVTP2UI_RND:        return "X86ISD::CVTP2UI_RND";
29928  case X86ISD::CVTS2SI:            return "X86ISD::CVTS2SI";
29929  case X86ISD::CVTS2UI:            return "X86ISD::CVTS2UI";
29930  case X86ISD::CVTS2SI_RND:        return "X86ISD::CVTS2SI_RND";
29931  case X86ISD::CVTS2UI_RND:        return "X86ISD::CVTS2UI_RND";
29932  case X86ISD::CVTNE2PS2BF16:      return "X86ISD::CVTNE2PS2BF16";
29933  case X86ISD::CVTNEPS2BF16:       return "X86ISD::CVTNEPS2BF16";
29934  case X86ISD::MCVTNEPS2BF16:      return "X86ISD::MCVTNEPS2BF16";
29935  case X86ISD::DPBF16PS:           return "X86ISD::DPBF16PS";
29936  case X86ISD::LWPINS:             return "X86ISD::LWPINS";
29937  case X86ISD::MGATHER:            return "X86ISD::MGATHER";
29938  case X86ISD::MSCATTER:           return "X86ISD::MSCATTER";
29939  case X86ISD::VPDPBUSD:           return "X86ISD::VPDPBUSD";
29940  case X86ISD::VPDPBUSDS:          return "X86ISD::VPDPBUSDS";
29941  case X86ISD::VPDPWSSD:           return "X86ISD::VPDPWSSD";
29942  case X86ISD::VPDPWSSDS:          return "X86ISD::VPDPWSSDS";
29943  case X86ISD::VPSHUFBITQMB:       return "X86ISD::VPSHUFBITQMB";
29944  case X86ISD::GF2P8MULB:          return "X86ISD::GF2P8MULB";
29945  case X86ISD::GF2P8AFFINEQB:      return "X86ISD::GF2P8AFFINEQB";
29946  case X86ISD::GF2P8AFFINEINVQB:   return "X86ISD::GF2P8AFFINEINVQB";
29947  case X86ISD::NT_CALL:            return "X86ISD::NT_CALL";
29948  case X86ISD::NT_BRIND:           return "X86ISD::NT_BRIND";
29949  case X86ISD::UMWAIT:             return "X86ISD::UMWAIT";
29950  case X86ISD::TPAUSE:             return "X86ISD::TPAUSE";
29951  case X86ISD::ENQCMD:             return "X86ISD:ENQCMD";
29952  case X86ISD::ENQCMDS:            return "X86ISD:ENQCMDS";
29953  case X86ISD::VP2INTERSECT:       return "X86ISD::VP2INTERSECT";
29954  }
29955  return nullptr;
29956}
29957
29958/// Return true if the addressing mode represented by AM is legal for this
29959/// target, for a load/store of the specified type.
29960bool X86TargetLowering::isLegalAddressingMode(const DataLayout &DL,
29961                                              const AddrMode &AM, Type *Ty,
29962                                              unsigned AS,
29963                                              Instruction *I) const {
29964  // X86 supports extremely general addressing modes.
29965  CodeModel::Model M = getTargetMachine().getCodeModel();
29966
29967  // X86 allows a sign-extended 32-bit immediate field as a displacement.
29968  if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
29969    return false;
29970
29971  if (AM.BaseGV) {
29972    unsigned GVFlags = Subtarget.classifyGlobalReference(AM.BaseGV);
29973
29974    // If a reference to this global requires an extra load, we can't fold it.
29975    if (isGlobalStubReference(GVFlags))
29976      return false;
29977
29978    // If BaseGV requires a register for the PIC base, we cannot also have a
29979    // BaseReg specified.
29980    if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
29981      return false;
29982
29983    // If lower 4G is not available, then we must use rip-relative addressing.
29984    if ((M != CodeModel::Small || isPositionIndependent()) &&
29985        Subtarget.is64Bit() && (AM.BaseOffs || AM.Scale > 1))
29986      return false;
29987  }
29988
29989  switch (AM.Scale) {
29990  case 0:
29991  case 1:
29992  case 2:
29993  case 4:
29994  case 8:
29995    // These scales always work.
29996    break;
29997  case 3:
29998  case 5:
29999  case 9:
30000    // These scales are formed with basereg+scalereg.  Only accept if there is
30001    // no basereg yet.
30002    if (AM.HasBaseReg)
30003      return false;
30004    break;
30005  default:  // Other stuff never works.
30006    return false;
30007  }
30008
30009  return true;
30010}
30011
30012bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
30013  unsigned Bits = Ty->getScalarSizeInBits();
30014
30015  // 8-bit shifts are always expensive, but versions with a scalar amount aren't
30016  // particularly cheaper than those without.
30017  if (Bits == 8)
30018    return false;
30019
30020  // XOP has v16i8/v8i16/v4i32/v2i64 variable vector shifts.
30021  if (Subtarget.hasXOP() && Ty->getPrimitiveSizeInBits() == 128 &&
30022      (Bits == 8 || Bits == 16 || Bits == 32 || Bits == 64))
30023    return false;
30024
30025  // AVX2 has vpsllv[dq] instructions (and other shifts) that make variable
30026  // shifts just as cheap as scalar ones.
30027  if (Subtarget.hasAVX2() && (Bits == 32 || Bits == 64))
30028    return false;
30029
30030  // AVX512BW has shifts such as vpsllvw.
30031  if (Subtarget.hasBWI() && Bits == 16)
30032      return false;
30033
30034  // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
30035  // fully general vector.
30036  return true;
30037}
30038
30039bool X86TargetLowering::isBinOp(unsigned Opcode) const {
30040  switch (Opcode) {
30041  // These are non-commutative binops.
30042  // TODO: Add more X86ISD opcodes once we have test coverage.
30043  case X86ISD::ANDNP:
30044  case X86ISD::PCMPGT:
30045  case X86ISD::FMAX:
30046  case X86ISD::FMIN:
30047  case X86ISD::FANDN:
30048    return true;
30049  }
30050
30051  return TargetLoweringBase::isBinOp(Opcode);
30052}
30053
30054bool X86TargetLowering::isCommutativeBinOp(unsigned Opcode) const {
30055  switch (Opcode) {
30056  // TODO: Add more X86ISD opcodes once we have test coverage.
30057  case X86ISD::PCMPEQ:
30058  case X86ISD::PMULDQ:
30059  case X86ISD::PMULUDQ:
30060  case X86ISD::FMAXC:
30061  case X86ISD::FMINC:
30062  case X86ISD::FAND:
30063  case X86ISD::FOR:
30064  case X86ISD::FXOR:
30065    return true;
30066  }
30067
30068  return TargetLoweringBase::isCommutativeBinOp(Opcode);
30069}
30070
30071bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
30072  if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
30073    return false;
30074  unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
30075  unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
30076  return NumBits1 > NumBits2;
30077}
30078
30079bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
30080  if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
30081    return false;
30082
30083  if (!isTypeLegal(EVT::getEVT(Ty1)))
30084    return false;
30085
30086  assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
30087
30088  // Assuming the caller doesn't have a zeroext or signext return parameter,
30089  // truncation all the way down to i1 is valid.
30090  return true;
30091}
30092
30093bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
30094  return isInt<32>(Imm);
30095}
30096
30097bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
30098  // Can also use sub to handle negated immediates.
30099  return isInt<32>(Imm);
30100}
30101
30102bool X86TargetLowering::isLegalStoreImmediate(int64_t Imm) const {
30103  return isInt<32>(Imm);
30104}
30105
30106bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
30107  if (!VT1.isInteger() || !VT2.isInteger())
30108    return false;
30109  unsigned NumBits1 = VT1.getSizeInBits();
30110  unsigned NumBits2 = VT2.getSizeInBits();
30111  return NumBits1 > NumBits2;
30112}
30113
30114bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
30115  // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
30116  return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget.is64Bit();
30117}
30118
30119bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
30120  // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
30121  return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget.is64Bit();
30122}
30123
30124bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
30125  EVT VT1 = Val.getValueType();
30126  if (isZExtFree(VT1, VT2))
30127    return true;
30128
30129  if (Val.getOpcode() != ISD::LOAD)
30130    return false;
30131
30132  if (!VT1.isSimple() || !VT1.isInteger() ||
30133      !VT2.isSimple() || !VT2.isInteger())
30134    return false;
30135
30136  switch (VT1.getSimpleVT().SimpleTy) {
30137  default: break;
30138  case MVT::i8:
30139  case MVT::i16:
30140  case MVT::i32:
30141    // X86 has 8, 16, and 32-bit zero-extending loads.
30142    return true;
30143  }
30144
30145  return false;
30146}
30147
30148bool X86TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
30149  if (isa<MaskedLoadSDNode>(ExtVal.getOperand(0)))
30150    return false;
30151
30152  EVT SrcVT = ExtVal.getOperand(0).getValueType();
30153
30154  // There is no extending load for vXi1.
30155  if (SrcVT.getScalarType() == MVT::i1)
30156    return false;
30157
30158  return true;
30159}
30160
30161bool X86TargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
30162                                                   EVT VT) const {
30163  if (!Subtarget.hasAnyFMA())
30164    return false;
30165
30166  VT = VT.getScalarType();
30167
30168  if (!VT.isSimple())
30169    return false;
30170
30171  switch (VT.getSimpleVT().SimpleTy) {
30172  case MVT::f32:
30173  case MVT::f64:
30174    return true;
30175  default:
30176    break;
30177  }
30178
30179  return false;
30180}
30181
30182bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
30183  // i16 instructions are longer (0x66 prefix) and potentially slower.
30184  return !(VT1 == MVT::i32 && VT2 == MVT::i16);
30185}
30186
30187/// Targets can use this to indicate that they only support *some*
30188/// VECTOR_SHUFFLE operations, those with specific masks.
30189/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
30190/// are assumed to be legal.
30191bool X86TargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
30192  if (!VT.isSimple())
30193    return false;
30194
30195  // Not for i1 vectors
30196  if (VT.getSimpleVT().getScalarType() == MVT::i1)
30197    return false;
30198
30199  // Very little shuffling can be done for 64-bit vectors right now.
30200  if (VT.getSimpleVT().getSizeInBits() == 64)
30201    return false;
30202
30203  // We only care that the types being shuffled are legal. The lowering can
30204  // handle any possible shuffle mask that results.
30205  return isTypeLegal(VT.getSimpleVT());
30206}
30207
30208bool X86TargetLowering::isVectorClearMaskLegal(ArrayRef<int> Mask,
30209                                               EVT VT) const {
30210  // Don't convert an 'and' into a shuffle that we don't directly support.
30211  // vpblendw and vpshufb for 256-bit vectors are not available on AVX1.
30212  if (!Subtarget.hasAVX2())
30213    if (VT == MVT::v32i8 || VT == MVT::v16i16)
30214      return false;
30215
30216  // Just delegate to the generic legality, clear masks aren't special.
30217  return isShuffleMaskLegal(Mask, VT);
30218}
30219
30220bool X86TargetLowering::areJTsAllowed(const Function *Fn) const {
30221  // If the subtarget is using retpolines, we need to not generate jump tables.
30222  if (Subtarget.useRetpolineIndirectBranches())
30223    return false;
30224
30225  // Otherwise, fallback on the generic logic.
30226  return TargetLowering::areJTsAllowed(Fn);
30227}
30228
30229//===----------------------------------------------------------------------===//
30230//                           X86 Scheduler Hooks
30231//===----------------------------------------------------------------------===//
30232
30233/// Utility function to emit xbegin specifying the start of an RTM region.
30234static MachineBasicBlock *emitXBegin(MachineInstr &MI, MachineBasicBlock *MBB,
30235                                     const TargetInstrInfo *TII) {
30236  DebugLoc DL = MI.getDebugLoc();
30237
30238  const BasicBlock *BB = MBB->getBasicBlock();
30239  MachineFunction::iterator I = ++MBB->getIterator();
30240
30241  // For the v = xbegin(), we generate
30242  //
30243  // thisMBB:
30244  //  xbegin sinkMBB
30245  //
30246  // mainMBB:
30247  //  s0 = -1
30248  //
30249  // fallBB:
30250  //  eax = # XABORT_DEF
30251  //  s1 = eax
30252  //
30253  // sinkMBB:
30254  //  v = phi(s0/mainBB, s1/fallBB)
30255
30256  MachineBasicBlock *thisMBB = MBB;
30257  MachineFunction *MF = MBB->getParent();
30258  MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
30259  MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
30260  MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
30261  MF->insert(I, mainMBB);
30262  MF->insert(I, fallMBB);
30263  MF->insert(I, sinkMBB);
30264
30265  // Transfer the remainder of BB and its successor edges to sinkMBB.
30266  sinkMBB->splice(sinkMBB->begin(), MBB,
30267                  std::next(MachineBasicBlock::iterator(MI)), MBB->end());
30268  sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
30269
30270  MachineRegisterInfo &MRI = MF->getRegInfo();
30271  Register DstReg = MI.getOperand(0).getReg();
30272  const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
30273  Register mainDstReg = MRI.createVirtualRegister(RC);
30274  Register fallDstReg = MRI.createVirtualRegister(RC);
30275
30276  // thisMBB:
30277  //  xbegin fallMBB
30278  //  # fallthrough to mainMBB
30279  //  # abortion to fallMBB
30280  BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(fallMBB);
30281  thisMBB->addSuccessor(mainMBB);
30282  thisMBB->addSuccessor(fallMBB);
30283
30284  // mainMBB:
30285  //  mainDstReg := -1
30286  BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), mainDstReg).addImm(-1);
30287  BuildMI(mainMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
30288  mainMBB->addSuccessor(sinkMBB);
30289
30290  // fallMBB:
30291  //  ; pseudo instruction to model hardware's definition from XABORT
30292  //  EAX := XABORT_DEF
30293  //  fallDstReg := EAX
30294  BuildMI(fallMBB, DL, TII->get(X86::XABORT_DEF));
30295  BuildMI(fallMBB, DL, TII->get(TargetOpcode::COPY), fallDstReg)
30296      .addReg(X86::EAX);
30297  fallMBB->addSuccessor(sinkMBB);
30298
30299  // sinkMBB:
30300  //  DstReg := phi(mainDstReg/mainBB, fallDstReg/fallBB)
30301  BuildMI(*sinkMBB, sinkMBB->begin(), DL, TII->get(X86::PHI), DstReg)
30302      .addReg(mainDstReg).addMBB(mainMBB)
30303      .addReg(fallDstReg).addMBB(fallMBB);
30304
30305  MI.eraseFromParent();
30306  return sinkMBB;
30307}
30308
30309
30310
30311MachineBasicBlock *
30312X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
30313                                                 MachineBasicBlock *MBB) const {
30314  // Emit va_arg instruction on X86-64.
30315
30316  // Operands to this pseudo-instruction:
30317  // 0  ) Output        : destination address (reg)
30318  // 1-5) Input         : va_list address (addr, i64mem)
30319  // 6  ) ArgSize       : Size (in bytes) of vararg type
30320  // 7  ) ArgMode       : 0=overflow only, 1=use gp_offset, 2=use fp_offset
30321  // 8  ) Align         : Alignment of type
30322  // 9  ) EFLAGS (implicit-def)
30323
30324  assert(MI.getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
30325  static_assert(X86::AddrNumOperands == 5,
30326                "VAARG_64 assumes 5 address operands");
30327
30328  Register DestReg = MI.getOperand(0).getReg();
30329  MachineOperand &Base = MI.getOperand(1);
30330  MachineOperand &Scale = MI.getOperand(2);
30331  MachineOperand &Index = MI.getOperand(3);
30332  MachineOperand &Disp = MI.getOperand(4);
30333  MachineOperand &Segment = MI.getOperand(5);
30334  unsigned ArgSize = MI.getOperand(6).getImm();
30335  unsigned ArgMode = MI.getOperand(7).getImm();
30336  unsigned Align = MI.getOperand(8).getImm();
30337
30338  MachineFunction *MF = MBB->getParent();
30339
30340  // Memory Reference
30341  assert(MI.hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
30342
30343  MachineMemOperand *OldMMO = MI.memoperands().front();
30344
30345  // Clone the MMO into two separate MMOs for loading and storing
30346  MachineMemOperand *LoadOnlyMMO = MF->getMachineMemOperand(
30347      OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOStore);
30348  MachineMemOperand *StoreOnlyMMO = MF->getMachineMemOperand(
30349      OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOLoad);
30350
30351  // Machine Information
30352  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
30353  MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
30354  const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
30355  const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
30356  DebugLoc DL = MI.getDebugLoc();
30357
30358  // struct va_list {
30359  //   i32   gp_offset
30360  //   i32   fp_offset
30361  //   i64   overflow_area (address)
30362  //   i64   reg_save_area (address)
30363  // }
30364  // sizeof(va_list) = 24
30365  // alignment(va_list) = 8
30366
30367  unsigned TotalNumIntRegs = 6;
30368  unsigned TotalNumXMMRegs = 8;
30369  bool UseGPOffset = (ArgMode == 1);
30370  bool UseFPOffset = (ArgMode == 2);
30371  unsigned MaxOffset = TotalNumIntRegs * 8 +
30372                       (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
30373
30374  /* Align ArgSize to a multiple of 8 */
30375  unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
30376  bool NeedsAlign = (Align > 8);
30377
30378  MachineBasicBlock *thisMBB = MBB;
30379  MachineBasicBlock *overflowMBB;
30380  MachineBasicBlock *offsetMBB;
30381  MachineBasicBlock *endMBB;
30382
30383  unsigned OffsetDestReg = 0;    // Argument address computed by offsetMBB
30384  unsigned OverflowDestReg = 0;  // Argument address computed by overflowMBB
30385  unsigned OffsetReg = 0;
30386
30387  if (!UseGPOffset && !UseFPOffset) {
30388    // If we only pull from the overflow region, we don't create a branch.
30389    // We don't need to alter control flow.
30390    OffsetDestReg = 0; // unused
30391    OverflowDestReg = DestReg;
30392
30393    offsetMBB = nullptr;
30394    overflowMBB = thisMBB;
30395    endMBB = thisMBB;
30396  } else {
30397    // First emit code to check if gp_offset (or fp_offset) is below the bound.
30398    // If so, pull the argument from reg_save_area. (branch to offsetMBB)
30399    // If not, pull from overflow_area. (branch to overflowMBB)
30400    //
30401    //       thisMBB
30402    //         |     .
30403    //         |        .
30404    //     offsetMBB   overflowMBB
30405    //         |        .
30406    //         |     .
30407    //        endMBB
30408
30409    // Registers for the PHI in endMBB
30410    OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
30411    OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
30412
30413    const BasicBlock *LLVM_BB = MBB->getBasicBlock();
30414    overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
30415    offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
30416    endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
30417
30418    MachineFunction::iterator MBBIter = ++MBB->getIterator();
30419
30420    // Insert the new basic blocks
30421    MF->insert(MBBIter, offsetMBB);
30422    MF->insert(MBBIter, overflowMBB);
30423    MF->insert(MBBIter, endMBB);
30424
30425    // Transfer the remainder of MBB and its successor edges to endMBB.
30426    endMBB->splice(endMBB->begin(), thisMBB,
30427                   std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
30428    endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
30429
30430    // Make offsetMBB and overflowMBB successors of thisMBB
30431    thisMBB->addSuccessor(offsetMBB);
30432    thisMBB->addSuccessor(overflowMBB);
30433
30434    // endMBB is a successor of both offsetMBB and overflowMBB
30435    offsetMBB->addSuccessor(endMBB);
30436    overflowMBB->addSuccessor(endMBB);
30437
30438    // Load the offset value into a register
30439    OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
30440    BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
30441        .add(Base)
30442        .add(Scale)
30443        .add(Index)
30444        .addDisp(Disp, UseFPOffset ? 4 : 0)
30445        .add(Segment)
30446        .setMemRefs(LoadOnlyMMO);
30447
30448    // Check if there is enough room left to pull this argument.
30449    BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
30450      .addReg(OffsetReg)
30451      .addImm(MaxOffset + 8 - ArgSizeA8);
30452
30453    // Branch to "overflowMBB" if offset >= max
30454    // Fall through to "offsetMBB" otherwise
30455    BuildMI(thisMBB, DL, TII->get(X86::JCC_1))
30456      .addMBB(overflowMBB).addImm(X86::COND_AE);
30457  }
30458
30459  // In offsetMBB, emit code to use the reg_save_area.
30460  if (offsetMBB) {
30461    assert(OffsetReg != 0);
30462
30463    // Read the reg_save_area address.
30464    Register RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
30465    BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
30466        .add(Base)
30467        .add(Scale)
30468        .add(Index)
30469        .addDisp(Disp, 16)
30470        .add(Segment)
30471        .setMemRefs(LoadOnlyMMO);
30472
30473    // Zero-extend the offset
30474    Register OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
30475    BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
30476        .addImm(0)
30477        .addReg(OffsetReg)
30478        .addImm(X86::sub_32bit);
30479
30480    // Add the offset to the reg_save_area to get the final address.
30481    BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
30482      .addReg(OffsetReg64)
30483      .addReg(RegSaveReg);
30484
30485    // Compute the offset for the next argument
30486    Register NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
30487    BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
30488      .addReg(OffsetReg)
30489      .addImm(UseFPOffset ? 16 : 8);
30490
30491    // Store it back into the va_list.
30492    BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
30493        .add(Base)
30494        .add(Scale)
30495        .add(Index)
30496        .addDisp(Disp, UseFPOffset ? 4 : 0)
30497        .add(Segment)
30498        .addReg(NextOffsetReg)
30499        .setMemRefs(StoreOnlyMMO);
30500
30501    // Jump to endMBB
30502    BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
30503      .addMBB(endMBB);
30504  }
30505
30506  //
30507  // Emit code to use overflow area
30508  //
30509
30510  // Load the overflow_area address into a register.
30511  Register OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
30512  BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
30513      .add(Base)
30514      .add(Scale)
30515      .add(Index)
30516      .addDisp(Disp, 8)
30517      .add(Segment)
30518      .setMemRefs(LoadOnlyMMO);
30519
30520  // If we need to align it, do so. Otherwise, just copy the address
30521  // to OverflowDestReg.
30522  if (NeedsAlign) {
30523    // Align the overflow address
30524    assert(isPowerOf2_32(Align) && "Alignment must be a power of 2");
30525    Register TmpReg = MRI.createVirtualRegister(AddrRegClass);
30526
30527    // aligned_addr = (addr + (align-1)) & ~(align-1)
30528    BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
30529      .addReg(OverflowAddrReg)
30530      .addImm(Align-1);
30531
30532    BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
30533      .addReg(TmpReg)
30534      .addImm(~(uint64_t)(Align-1));
30535  } else {
30536    BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
30537      .addReg(OverflowAddrReg);
30538  }
30539
30540  // Compute the next overflow address after this argument.
30541  // (the overflow address should be kept 8-byte aligned)
30542  Register NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
30543  BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
30544    .addReg(OverflowDestReg)
30545    .addImm(ArgSizeA8);
30546
30547  // Store the new overflow address.
30548  BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
30549      .add(Base)
30550      .add(Scale)
30551      .add(Index)
30552      .addDisp(Disp, 8)
30553      .add(Segment)
30554      .addReg(NextAddrReg)
30555      .setMemRefs(StoreOnlyMMO);
30556
30557  // If we branched, emit the PHI to the front of endMBB.
30558  if (offsetMBB) {
30559    BuildMI(*endMBB, endMBB->begin(), DL,
30560            TII->get(X86::PHI), DestReg)
30561      .addReg(OffsetDestReg).addMBB(offsetMBB)
30562      .addReg(OverflowDestReg).addMBB(overflowMBB);
30563  }
30564
30565  // Erase the pseudo instruction
30566  MI.eraseFromParent();
30567
30568  return endMBB;
30569}
30570
30571MachineBasicBlock *X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
30572    MachineInstr &MI, MachineBasicBlock *MBB) const {
30573  // Emit code to save XMM registers to the stack. The ABI says that the
30574  // number of registers to save is given in %al, so it's theoretically
30575  // possible to do an indirect jump trick to avoid saving all of them,
30576  // however this code takes a simpler approach and just executes all
30577  // of the stores if %al is non-zero. It's less code, and it's probably
30578  // easier on the hardware branch predictor, and stores aren't all that
30579  // expensive anyway.
30580
30581  // Create the new basic blocks. One block contains all the XMM stores,
30582  // and one block is the final destination regardless of whether any
30583  // stores were performed.
30584  const BasicBlock *LLVM_BB = MBB->getBasicBlock();
30585  MachineFunction *F = MBB->getParent();
30586  MachineFunction::iterator MBBIter = ++MBB->getIterator();
30587  MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
30588  MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
30589  F->insert(MBBIter, XMMSaveMBB);
30590  F->insert(MBBIter, EndMBB);
30591
30592  // Transfer the remainder of MBB and its successor edges to EndMBB.
30593  EndMBB->splice(EndMBB->begin(), MBB,
30594                 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
30595  EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
30596
30597  // The original block will now fall through to the XMM save block.
30598  MBB->addSuccessor(XMMSaveMBB);
30599  // The XMMSaveMBB will fall through to the end block.
30600  XMMSaveMBB->addSuccessor(EndMBB);
30601
30602  // Now add the instructions.
30603  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
30604  DebugLoc DL = MI.getDebugLoc();
30605
30606  Register CountReg = MI.getOperand(0).getReg();
30607  int64_t RegSaveFrameIndex = MI.getOperand(1).getImm();
30608  int64_t VarArgsFPOffset = MI.getOperand(2).getImm();
30609
30610  if (!Subtarget.isCallingConvWin64(F->getFunction().getCallingConv())) {
30611    // If %al is 0, branch around the XMM save block.
30612    BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
30613    BuildMI(MBB, DL, TII->get(X86::JCC_1)).addMBB(EndMBB).addImm(X86::COND_E);
30614    MBB->addSuccessor(EndMBB);
30615  }
30616
30617  // Make sure the last operand is EFLAGS, which gets clobbered by the branch
30618  // that was just emitted, but clearly shouldn't be "saved".
30619  assert((MI.getNumOperands() <= 3 ||
30620          !MI.getOperand(MI.getNumOperands() - 1).isReg() ||
30621          MI.getOperand(MI.getNumOperands() - 1).getReg() == X86::EFLAGS) &&
30622         "Expected last argument to be EFLAGS");
30623  unsigned MOVOpc = Subtarget.hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr;
30624  // In the XMM save block, save all the XMM argument registers.
30625  for (int i = 3, e = MI.getNumOperands() - 1; i != e; ++i) {
30626    int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
30627    MachineMemOperand *MMO = F->getMachineMemOperand(
30628        MachinePointerInfo::getFixedStack(*F, RegSaveFrameIndex, Offset),
30629        MachineMemOperand::MOStore,
30630        /*Size=*/16, /*Align=*/16);
30631    BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
30632        .addFrameIndex(RegSaveFrameIndex)
30633        .addImm(/*Scale=*/1)
30634        .addReg(/*IndexReg=*/0)
30635        .addImm(/*Disp=*/Offset)
30636        .addReg(/*Segment=*/0)
30637        .addReg(MI.getOperand(i).getReg())
30638        .addMemOperand(MMO);
30639  }
30640
30641  MI.eraseFromParent(); // The pseudo instruction is gone now.
30642
30643  return EndMBB;
30644}
30645
30646// The EFLAGS operand of SelectItr might be missing a kill marker
30647// because there were multiple uses of EFLAGS, and ISel didn't know
30648// which to mark. Figure out whether SelectItr should have had a
30649// kill marker, and set it if it should. Returns the correct kill
30650// marker value.
30651static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
30652                                     MachineBasicBlock* BB,
30653                                     const TargetRegisterInfo* TRI) {
30654  // Scan forward through BB for a use/def of EFLAGS.
30655  MachineBasicBlock::iterator miI(std::next(SelectItr));
30656  for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
30657    const MachineInstr& mi = *miI;
30658    if (mi.readsRegister(X86::EFLAGS))
30659      return false;
30660    if (mi.definesRegister(X86::EFLAGS))
30661      break; // Should have kill-flag - update below.
30662  }
30663
30664  // If we hit the end of the block, check whether EFLAGS is live into a
30665  // successor.
30666  if (miI == BB->end()) {
30667    for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
30668                                          sEnd = BB->succ_end();
30669         sItr != sEnd; ++sItr) {
30670      MachineBasicBlock* succ = *sItr;
30671      if (succ->isLiveIn(X86::EFLAGS))
30672        return false;
30673    }
30674  }
30675
30676  // We found a def, or hit the end of the basic block and EFLAGS wasn't live
30677  // out. SelectMI should have a kill flag on EFLAGS.
30678  SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
30679  return true;
30680}
30681
30682// Return true if it is OK for this CMOV pseudo-opcode to be cascaded
30683// together with other CMOV pseudo-opcodes into a single basic-block with
30684// conditional jump around it.
30685static bool isCMOVPseudo(MachineInstr &MI) {
30686  switch (MI.getOpcode()) {
30687  case X86::CMOV_FR32:
30688  case X86::CMOV_FR32X:
30689  case X86::CMOV_FR64:
30690  case X86::CMOV_FR64X:
30691  case X86::CMOV_GR8:
30692  case X86::CMOV_GR16:
30693  case X86::CMOV_GR32:
30694  case X86::CMOV_RFP32:
30695  case X86::CMOV_RFP64:
30696  case X86::CMOV_RFP80:
30697  case X86::CMOV_VR128:
30698  case X86::CMOV_VR128X:
30699  case X86::CMOV_VR256:
30700  case X86::CMOV_VR256X:
30701  case X86::CMOV_VR512:
30702  case X86::CMOV_VK2:
30703  case X86::CMOV_VK4:
30704  case X86::CMOV_VK8:
30705  case X86::CMOV_VK16:
30706  case X86::CMOV_VK32:
30707  case X86::CMOV_VK64:
30708    return true;
30709
30710  default:
30711    return false;
30712  }
30713}
30714
30715// Helper function, which inserts PHI functions into SinkMBB:
30716//   %Result(i) = phi [ %FalseValue(i), FalseMBB ], [ %TrueValue(i), TrueMBB ],
30717// where %FalseValue(i) and %TrueValue(i) are taken from the consequent CMOVs
30718// in [MIItBegin, MIItEnd) range. It returns the last MachineInstrBuilder for
30719// the last PHI function inserted.
30720static MachineInstrBuilder createPHIsForCMOVsInSinkBB(
30721    MachineBasicBlock::iterator MIItBegin, MachineBasicBlock::iterator MIItEnd,
30722    MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
30723    MachineBasicBlock *SinkMBB) {
30724  MachineFunction *MF = TrueMBB->getParent();
30725  const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
30726  DebugLoc DL = MIItBegin->getDebugLoc();
30727
30728  X86::CondCode CC = X86::CondCode(MIItBegin->getOperand(3).getImm());
30729  X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
30730
30731  MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();
30732
30733  // As we are creating the PHIs, we have to be careful if there is more than
30734  // one.  Later CMOVs may reference the results of earlier CMOVs, but later
30735  // PHIs have to reference the individual true/false inputs from earlier PHIs.
30736  // That also means that PHI construction must work forward from earlier to
30737  // later, and that the code must maintain a mapping from earlier PHI's
30738  // destination registers, and the registers that went into the PHI.
30739  DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
30740  MachineInstrBuilder MIB;
30741
30742  for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
30743    Register DestReg = MIIt->getOperand(0).getReg();
30744    Register Op1Reg = MIIt->getOperand(1).getReg();
30745    Register Op2Reg = MIIt->getOperand(2).getReg();
30746
30747    // If this CMOV we are generating is the opposite condition from
30748    // the jump we generated, then we have to swap the operands for the
30749    // PHI that is going to be generated.
30750    if (MIIt->getOperand(3).getImm() == OppCC)
30751      std::swap(Op1Reg, Op2Reg);
30752
30753    if (RegRewriteTable.find(Op1Reg) != RegRewriteTable.end())
30754      Op1Reg = RegRewriteTable[Op1Reg].first;
30755
30756    if (RegRewriteTable.find(Op2Reg) != RegRewriteTable.end())
30757      Op2Reg = RegRewriteTable[Op2Reg].second;
30758
30759    MIB = BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(X86::PHI), DestReg)
30760              .addReg(Op1Reg)
30761              .addMBB(FalseMBB)
30762              .addReg(Op2Reg)
30763              .addMBB(TrueMBB);
30764
30765    // Add this PHI to the rewrite table.
30766    RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
30767  }
30768
30769  return MIB;
30770}
30771
30772// Lower cascaded selects in form of (SecondCmov (FirstCMOV F, T, cc1), T, cc2).
30773MachineBasicBlock *
30774X86TargetLowering::EmitLoweredCascadedSelect(MachineInstr &FirstCMOV,
30775                                             MachineInstr &SecondCascadedCMOV,
30776                                             MachineBasicBlock *ThisMBB) const {
30777  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
30778  DebugLoc DL = FirstCMOV.getDebugLoc();
30779
30780  // We lower cascaded CMOVs such as
30781  //
30782  //   (SecondCascadedCMOV (FirstCMOV F, T, cc1), T, cc2)
30783  //
30784  // to two successive branches.
30785  //
30786  // Without this, we would add a PHI between the two jumps, which ends up
30787  // creating a few copies all around. For instance, for
30788  //
30789  //    (sitofp (zext (fcmp une)))
30790  //
30791  // we would generate:
30792  //
30793  //         ucomiss %xmm1, %xmm0
30794  //         movss  <1.0f>, %xmm0
30795  //         movaps  %xmm0, %xmm1
30796  //         jne     .LBB5_2
30797  //         xorps   %xmm1, %xmm1
30798  // .LBB5_2:
30799  //         jp      .LBB5_4
30800  //         movaps  %xmm1, %xmm0
30801  // .LBB5_4:
30802  //         retq
30803  //
30804  // because this custom-inserter would have generated:
30805  //
30806  //   A
30807  //   | \
30808  //   |  B
30809  //   | /
30810  //   C
30811  //   | \
30812  //   |  D
30813  //   | /
30814  //   E
30815  //
30816  // A: X = ...; Y = ...
30817  // B: empty
30818  // C: Z = PHI [X, A], [Y, B]
30819  // D: empty
30820  // E: PHI [X, C], [Z, D]
30821  //
30822  // If we lower both CMOVs in a single step, we can instead generate:
30823  //
30824  //   A
30825  //   | \
30826  //   |  C
30827  //   | /|
30828  //   |/ |
30829  //   |  |
30830  //   |  D
30831  //   | /
30832  //   E
30833  //
30834  // A: X = ...; Y = ...
30835  // D: empty
30836  // E: PHI [X, A], [X, C], [Y, D]
30837  //
30838  // Which, in our sitofp/fcmp example, gives us something like:
30839  //
30840  //         ucomiss %xmm1, %xmm0
30841  //         movss  <1.0f>, %xmm0
30842  //         jne     .LBB5_4
30843  //         jp      .LBB5_4
30844  //         xorps   %xmm0, %xmm0
30845  // .LBB5_4:
30846  //         retq
30847  //
30848
30849  // We lower cascaded CMOV into two successive branches to the same block.
30850  // EFLAGS is used by both, so mark it as live in the second.
30851  const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
30852  MachineFunction *F = ThisMBB->getParent();
30853  MachineBasicBlock *FirstInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
30854  MachineBasicBlock *SecondInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
30855  MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
30856
30857  MachineFunction::iterator It = ++ThisMBB->getIterator();
30858  F->insert(It, FirstInsertedMBB);
30859  F->insert(It, SecondInsertedMBB);
30860  F->insert(It, SinkMBB);
30861
30862  // For a cascaded CMOV, we lower it to two successive branches to
30863  // the same block (SinkMBB).  EFLAGS is used by both, so mark it as live in
30864  // the FirstInsertedMBB.
30865  FirstInsertedMBB->addLiveIn(X86::EFLAGS);
30866
30867  // If the EFLAGS register isn't dead in the terminator, then claim that it's
30868  // live into the sink and copy blocks.
30869  const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
30870  if (!SecondCascadedCMOV.killsRegister(X86::EFLAGS) &&
30871      !checkAndUpdateEFLAGSKill(SecondCascadedCMOV, ThisMBB, TRI)) {
30872    SecondInsertedMBB->addLiveIn(X86::EFLAGS);
30873    SinkMBB->addLiveIn(X86::EFLAGS);
30874  }
30875
30876  // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
30877  SinkMBB->splice(SinkMBB->begin(), ThisMBB,
30878                  std::next(MachineBasicBlock::iterator(FirstCMOV)),
30879                  ThisMBB->end());
30880  SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
30881
30882  // Fallthrough block for ThisMBB.
30883  ThisMBB->addSuccessor(FirstInsertedMBB);
30884  // The true block target of the first branch is always SinkMBB.
30885  ThisMBB->addSuccessor(SinkMBB);
30886  // Fallthrough block for FirstInsertedMBB.
30887  FirstInsertedMBB->addSuccessor(SecondInsertedMBB);
30888  // The true block for the branch of FirstInsertedMBB.
30889  FirstInsertedMBB->addSuccessor(SinkMBB);
30890  // This is fallthrough.
30891  SecondInsertedMBB->addSuccessor(SinkMBB);
30892
30893  // Create the conditional branch instructions.
30894  X86::CondCode FirstCC = X86::CondCode(FirstCMOV.getOperand(3).getImm());
30895  BuildMI(ThisMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(FirstCC);
30896
30897  X86::CondCode SecondCC =
30898      X86::CondCode(SecondCascadedCMOV.getOperand(3).getImm());
30899  BuildMI(FirstInsertedMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(SecondCC);
30900
30901  //  SinkMBB:
30902  //   %Result = phi [ %FalseValue, SecondInsertedMBB ], [ %TrueValue, ThisMBB ]
30903  Register DestReg = FirstCMOV.getOperand(0).getReg();
30904  Register Op1Reg = FirstCMOV.getOperand(1).getReg();
30905  Register Op2Reg = FirstCMOV.getOperand(2).getReg();
30906  MachineInstrBuilder MIB =
30907      BuildMI(*SinkMBB, SinkMBB->begin(), DL, TII->get(X86::PHI), DestReg)
30908          .addReg(Op1Reg)
30909          .addMBB(SecondInsertedMBB)
30910          .addReg(Op2Reg)
30911          .addMBB(ThisMBB);
30912
30913  // The second SecondInsertedMBB provides the same incoming value as the
30914  // FirstInsertedMBB (the True operand of the SELECT_CC/CMOV nodes).
30915  MIB.addReg(FirstCMOV.getOperand(2).getReg()).addMBB(FirstInsertedMBB);
30916  // Copy the PHI result to the register defined by the second CMOV.
30917  BuildMI(*SinkMBB, std::next(MachineBasicBlock::iterator(MIB.getInstr())), DL,
30918          TII->get(TargetOpcode::COPY),
30919          SecondCascadedCMOV.getOperand(0).getReg())
30920      .addReg(FirstCMOV.getOperand(0).getReg());
30921
30922  // Now remove the CMOVs.
30923  FirstCMOV.eraseFromParent();
30924  SecondCascadedCMOV.eraseFromParent();
30925
30926  return SinkMBB;
30927}
30928
30929MachineBasicBlock *
30930X86TargetLowering::EmitLoweredSelect(MachineInstr &MI,
30931                                     MachineBasicBlock *ThisMBB) const {
30932  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
30933  DebugLoc DL = MI.getDebugLoc();
30934
30935  // To "insert" a SELECT_CC instruction, we actually have to insert the
30936  // diamond control-flow pattern.  The incoming instruction knows the
30937  // destination vreg to set, the condition code register to branch on, the
30938  // true/false values to select between and a branch opcode to use.
30939
30940  //  ThisMBB:
30941  //  ...
30942  //   TrueVal = ...
30943  //   cmpTY ccX, r1, r2
30944  //   bCC copy1MBB
30945  //   fallthrough --> FalseMBB
30946
30947  // This code lowers all pseudo-CMOV instructions. Generally it lowers these
30948  // as described above, by inserting a BB, and then making a PHI at the join
30949  // point to select the true and false operands of the CMOV in the PHI.
30950  //
30951  // The code also handles two different cases of multiple CMOV opcodes
30952  // in a row.
30953  //
30954  // Case 1:
30955  // In this case, there are multiple CMOVs in a row, all which are based on
30956  // the same condition setting (or the exact opposite condition setting).
30957  // In this case we can lower all the CMOVs using a single inserted BB, and
30958  // then make a number of PHIs at the join point to model the CMOVs. The only
30959  // trickiness here, is that in a case like:
30960  //
30961  // t2 = CMOV cond1 t1, f1
30962  // t3 = CMOV cond1 t2, f2
30963  //
30964  // when rewriting this into PHIs, we have to perform some renaming on the
30965  // temps since you cannot have a PHI operand refer to a PHI result earlier
30966  // in the same block.  The "simple" but wrong lowering would be:
30967  //
30968  // t2 = PHI t1(BB1), f1(BB2)
30969  // t3 = PHI t2(BB1), f2(BB2)
30970  //
30971  // but clearly t2 is not defined in BB1, so that is incorrect. The proper
30972  // renaming is to note that on the path through BB1, t2 is really just a
30973  // copy of t1, and do that renaming, properly generating:
30974  //
30975  // t2 = PHI t1(BB1), f1(BB2)
30976  // t3 = PHI t1(BB1), f2(BB2)
30977  //
30978  // Case 2:
30979  // CMOV ((CMOV F, T, cc1), T, cc2) is checked here and handled by a separate
30980  // function - EmitLoweredCascadedSelect.
30981
30982  X86::CondCode CC = X86::CondCode(MI.getOperand(3).getImm());
30983  X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
30984  MachineInstr *LastCMOV = &MI;
30985  MachineBasicBlock::iterator NextMIIt = MachineBasicBlock::iterator(MI);
30986
30987  // Check for case 1, where there are multiple CMOVs with the same condition
30988  // first.  Of the two cases of multiple CMOV lowerings, case 1 reduces the
30989  // number of jumps the most.
30990
30991  if (isCMOVPseudo(MI)) {
30992    // See if we have a string of CMOVS with the same condition. Skip over
30993    // intervening debug insts.
30994    while (NextMIIt != ThisMBB->end() && isCMOVPseudo(*NextMIIt) &&
30995           (NextMIIt->getOperand(3).getImm() == CC ||
30996            NextMIIt->getOperand(3).getImm() == OppCC)) {
30997      LastCMOV = &*NextMIIt;
30998      ++NextMIIt;
30999      NextMIIt = skipDebugInstructionsForward(NextMIIt, ThisMBB->end());
31000    }
31001  }
31002
31003  // This checks for case 2, but only do this if we didn't already find
31004  // case 1, as indicated by LastCMOV == MI.
31005  if (LastCMOV == &MI && NextMIIt != ThisMBB->end() &&
31006      NextMIIt->getOpcode() == MI.getOpcode() &&
31007      NextMIIt->getOperand(2).getReg() == MI.getOperand(2).getReg() &&
31008      NextMIIt->getOperand(1).getReg() == MI.getOperand(0).getReg() &&
31009      NextMIIt->getOperand(1).isKill()) {
31010    return EmitLoweredCascadedSelect(MI, *NextMIIt, ThisMBB);
31011  }
31012
31013  const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
31014  MachineFunction *F = ThisMBB->getParent();
31015  MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
31016  MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
31017
31018  MachineFunction::iterator It = ++ThisMBB->getIterator();
31019  F->insert(It, FalseMBB);
31020  F->insert(It, SinkMBB);
31021
31022  // If the EFLAGS register isn't dead in the terminator, then claim that it's
31023  // live into the sink and copy blocks.
31024  const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
31025  if (!LastCMOV->killsRegister(X86::EFLAGS) &&
31026      !checkAndUpdateEFLAGSKill(LastCMOV, ThisMBB, TRI)) {
31027    FalseMBB->addLiveIn(X86::EFLAGS);
31028    SinkMBB->addLiveIn(X86::EFLAGS);
31029  }
31030
31031  // Transfer any debug instructions inside the CMOV sequence to the sunk block.
31032  auto DbgEnd = MachineBasicBlock::iterator(LastCMOV);
31033  auto DbgIt = MachineBasicBlock::iterator(MI);
31034  while (DbgIt != DbgEnd) {
31035    auto Next = std::next(DbgIt);
31036    if (DbgIt->isDebugInstr())
31037      SinkMBB->push_back(DbgIt->removeFromParent());
31038    DbgIt = Next;
31039  }
31040
31041  // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
31042  SinkMBB->splice(SinkMBB->end(), ThisMBB,
31043                  std::next(MachineBasicBlock::iterator(LastCMOV)),
31044                  ThisMBB->end());
31045  SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
31046
31047  // Fallthrough block for ThisMBB.
31048  ThisMBB->addSuccessor(FalseMBB);
31049  // The true block target of the first (or only) branch is always a SinkMBB.
31050  ThisMBB->addSuccessor(SinkMBB);
31051  // Fallthrough block for FalseMBB.
31052  FalseMBB->addSuccessor(SinkMBB);
31053
31054  // Create the conditional branch instruction.
31055  BuildMI(ThisMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(CC);
31056
31057  //  SinkMBB:
31058  //   %Result = phi [ %FalseValue, FalseMBB ], [ %TrueValue, ThisMBB ]
31059  //  ...
31060  MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI);
31061  MachineBasicBlock::iterator MIItEnd =
31062      std::next(MachineBasicBlock::iterator(LastCMOV));
31063  createPHIsForCMOVsInSinkBB(MIItBegin, MIItEnd, ThisMBB, FalseMBB, SinkMBB);
31064
31065  // Now remove the CMOV(s).
31066  ThisMBB->erase(MIItBegin, MIItEnd);
31067
31068  return SinkMBB;
31069}
31070
31071MachineBasicBlock *
31072X86TargetLowering::EmitLoweredSegAlloca(MachineInstr &MI,
31073                                        MachineBasicBlock *BB) const {
31074  MachineFunction *MF = BB->getParent();
31075  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
31076  DebugLoc DL = MI.getDebugLoc();
31077  const BasicBlock *LLVM_BB = BB->getBasicBlock();
31078
31079  assert(MF->shouldSplitStack());
31080
31081  const bool Is64Bit = Subtarget.is64Bit();
31082  const bool IsLP64 = Subtarget.isTarget64BitLP64();
31083
31084  const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
31085  const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
31086
31087  // BB:
31088  //  ... [Till the alloca]
31089  // If stacklet is not large enough, jump to mallocMBB
31090  //
31091  // bumpMBB:
31092  //  Allocate by subtracting from RSP
31093  //  Jump to continueMBB
31094  //
31095  // mallocMBB:
31096  //  Allocate by call to runtime
31097  //
31098  // continueMBB:
31099  //  ...
31100  //  [rest of original BB]
31101  //
31102
31103  MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
31104  MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
31105  MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
31106
31107  MachineRegisterInfo &MRI = MF->getRegInfo();
31108  const TargetRegisterClass *AddrRegClass =
31109      getRegClassFor(getPointerTy(MF->getDataLayout()));
31110
31111  unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
31112           bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
31113           tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
31114           SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
31115           sizeVReg = MI.getOperand(1).getReg(),
31116           physSPReg =
31117               IsLP64 || Subtarget.isTargetNaCl64() ? X86::RSP : X86::ESP;
31118
31119  MachineFunction::iterator MBBIter = ++BB->getIterator();
31120
31121  MF->insert(MBBIter, bumpMBB);
31122  MF->insert(MBBIter, mallocMBB);
31123  MF->insert(MBBIter, continueMBB);
31124
31125  continueMBB->splice(continueMBB->begin(), BB,
31126                      std::next(MachineBasicBlock::iterator(MI)), BB->end());
31127  continueMBB->transferSuccessorsAndUpdatePHIs(BB);
31128
31129  // Add code to the main basic block to check if the stack limit has been hit,
31130  // and if so, jump to mallocMBB otherwise to bumpMBB.
31131  BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
31132  BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
31133    .addReg(tmpSPVReg).addReg(sizeVReg);
31134  BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
31135    .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
31136    .addReg(SPLimitVReg);
31137  BuildMI(BB, DL, TII->get(X86::JCC_1)).addMBB(mallocMBB).addImm(X86::COND_G);
31138
31139  // bumpMBB simply decreases the stack pointer, since we know the current
31140  // stacklet has enough space.
31141  BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
31142    .addReg(SPLimitVReg);
31143  BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
31144    .addReg(SPLimitVReg);
31145  BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
31146
31147  // Calls into a routine in libgcc to allocate more space from the heap.
31148  const uint32_t *RegMask =
31149      Subtarget.getRegisterInfo()->getCallPreservedMask(*MF, CallingConv::C);
31150  if (IsLP64) {
31151    BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
31152      .addReg(sizeVReg);
31153    BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
31154      .addExternalSymbol("__morestack_allocate_stack_space")
31155      .addRegMask(RegMask)
31156      .addReg(X86::RDI, RegState::Implicit)
31157      .addReg(X86::RAX, RegState::ImplicitDefine);
31158  } else if (Is64Bit) {
31159    BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
31160      .addReg(sizeVReg);
31161    BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
31162      .addExternalSymbol("__morestack_allocate_stack_space")
31163      .addRegMask(RegMask)
31164      .addReg(X86::EDI, RegState::Implicit)
31165      .addReg(X86::EAX, RegState::ImplicitDefine);
31166  } else {
31167    BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
31168      .addImm(12);
31169    BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
31170    BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
31171      .addExternalSymbol("__morestack_allocate_stack_space")
31172      .addRegMask(RegMask)
31173      .addReg(X86::EAX, RegState::ImplicitDefine);
31174  }
31175
31176  if (!Is64Bit)
31177    BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
31178      .addImm(16);
31179
31180  BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
31181    .addReg(IsLP64 ? X86::RAX : X86::EAX);
31182  BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
31183
31184  // Set up the CFG correctly.
31185  BB->addSuccessor(bumpMBB);
31186  BB->addSuccessor(mallocMBB);
31187  mallocMBB->addSuccessor(continueMBB);
31188  bumpMBB->addSuccessor(continueMBB);
31189
31190  // Take care of the PHI nodes.
31191  BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
31192          MI.getOperand(0).getReg())
31193      .addReg(mallocPtrVReg)
31194      .addMBB(mallocMBB)
31195      .addReg(bumpSPPtrVReg)
31196      .addMBB(bumpMBB);
31197
31198  // Delete the original pseudo instruction.
31199  MI.eraseFromParent();
31200
31201  // And we're done.
31202  return continueMBB;
31203}
31204
31205MachineBasicBlock *
31206X86TargetLowering::EmitLoweredCatchRet(MachineInstr &MI,
31207                                       MachineBasicBlock *BB) const {
31208  MachineFunction *MF = BB->getParent();
31209  const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
31210  MachineBasicBlock *TargetMBB = MI.getOperand(0).getMBB();
31211  DebugLoc DL = MI.getDebugLoc();
31212
31213  assert(!isAsynchronousEHPersonality(
31214             classifyEHPersonality(MF->getFunction().getPersonalityFn())) &&
31215         "SEH does not use catchret!");
31216
31217  // Only 32-bit EH needs to worry about manually restoring stack pointers.
31218  if (!Subtarget.is32Bit())
31219    return BB;
31220
31221  // C++ EH creates a new target block to hold the restore code, and wires up
31222  // the new block to the return destination with a normal JMP_4.
31223  MachineBasicBlock *RestoreMBB =
31224      MF->CreateMachineBasicBlock(BB->getBasicBlock());
31225  assert(BB->succ_size() == 1);
31226  MF->insert(std::next(BB->getIterator()), RestoreMBB);
31227  RestoreMBB->transferSuccessorsAndUpdatePHIs(BB);
31228  BB->addSuccessor(RestoreMBB);
31229  MI.getOperand(0).setMBB(RestoreMBB);
31230
31231  auto RestoreMBBI = RestoreMBB->begin();
31232  BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::EH_RESTORE));
31233  BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::JMP_4)).addMBB(TargetMBB);
31234  return BB;
31235}
31236
31237MachineBasicBlock *
31238X86TargetLowering::EmitLoweredCatchPad(MachineInstr &MI,
31239                                       MachineBasicBlock *BB) const {
31240  MachineFunction *MF = BB->getParent();
31241  const Constant *PerFn = MF->getFunction().getPersonalityFn();
31242  bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(PerFn));
31243  // Only 32-bit SEH requires special handling for catchpad.
31244  if (IsSEH && Subtarget.is32Bit()) {
31245    const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
31246    DebugLoc DL = MI.getDebugLoc();
31247    BuildMI(*BB, MI, DL, TII.get(X86::EH_RESTORE));
31248  }
31249  MI.eraseFromParent();
31250  return BB;
31251}
31252
31253MachineBasicBlock *
31254X86TargetLowering::EmitLoweredTLSAddr(MachineInstr &MI,
31255                                      MachineBasicBlock *BB) const {
31256  // So, here we replace TLSADDR with the sequence:
31257  // adjust_stackdown -> TLSADDR -> adjust_stackup.
31258  // We need this because TLSADDR is lowered into calls
31259  // inside MC, therefore without the two markers shrink-wrapping
31260  // may push the prologue/epilogue pass them.
31261  const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
31262  DebugLoc DL = MI.getDebugLoc();
31263  MachineFunction &MF = *BB->getParent();
31264
31265  // Emit CALLSEQ_START right before the instruction.
31266  unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
31267  MachineInstrBuilder CallseqStart =
31268    BuildMI(MF, DL, TII.get(AdjStackDown)).addImm(0).addImm(0).addImm(0);
31269  BB->insert(MachineBasicBlock::iterator(MI), CallseqStart);
31270
31271  // Emit CALLSEQ_END right after the instruction.
31272  // We don't call erase from parent because we want to keep the
31273  // original instruction around.
31274  unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
31275  MachineInstrBuilder CallseqEnd =
31276    BuildMI(MF, DL, TII.get(AdjStackUp)).addImm(0).addImm(0);
31277  BB->insertAfter(MachineBasicBlock::iterator(MI), CallseqEnd);
31278
31279  return BB;
31280}
31281
31282MachineBasicBlock *
31283X86TargetLowering::EmitLoweredTLSCall(MachineInstr &MI,
31284                                      MachineBasicBlock *BB) const {
31285  // This is pretty easy.  We're taking the value that we received from
31286  // our load from the relocation, sticking it in either RDI (x86-64)
31287  // or EAX and doing an indirect call.  The return value will then
31288  // be in the normal return register.
31289  MachineFunction *F = BB->getParent();
31290  const X86InstrInfo *TII = Subtarget.getInstrInfo();
31291  DebugLoc DL = MI.getDebugLoc();
31292
31293  assert(Subtarget.isTargetDarwin() && "Darwin only instr emitted?");
31294  assert(MI.getOperand(3).isGlobal() && "This should be a global");
31295
31296  // Get a register mask for the lowered call.
31297  // FIXME: The 32-bit calls have non-standard calling conventions. Use a
31298  // proper register mask.
31299  const uint32_t *RegMask =
31300      Subtarget.is64Bit() ?
31301      Subtarget.getRegisterInfo()->getDarwinTLSCallPreservedMask() :
31302      Subtarget.getRegisterInfo()->getCallPreservedMask(*F, CallingConv::C);
31303  if (Subtarget.is64Bit()) {
31304    MachineInstrBuilder MIB =
31305        BuildMI(*BB, MI, DL, TII->get(X86::MOV64rm), X86::RDI)
31306            .addReg(X86::RIP)
31307            .addImm(0)
31308            .addReg(0)
31309            .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
31310                              MI.getOperand(3).getTargetFlags())
31311            .addReg(0);
31312    MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
31313    addDirectMem(MIB, X86::RDI);
31314    MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
31315  } else if (!isPositionIndependent()) {
31316    MachineInstrBuilder MIB =
31317        BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
31318            .addReg(0)
31319            .addImm(0)
31320            .addReg(0)
31321            .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
31322                              MI.getOperand(3).getTargetFlags())
31323            .addReg(0);
31324    MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
31325    addDirectMem(MIB, X86::EAX);
31326    MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
31327  } else {
31328    MachineInstrBuilder MIB =
31329        BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
31330            .addReg(TII->getGlobalBaseReg(F))
31331            .addImm(0)
31332            .addReg(0)
31333            .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
31334                              MI.getOperand(3).getTargetFlags())
31335            .addReg(0);
31336    MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
31337    addDirectMem(MIB, X86::EAX);
31338    MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
31339  }
31340
31341  MI.eraseFromParent(); // The pseudo instruction is gone now.
31342  return BB;
31343}
31344
31345static unsigned getOpcodeForRetpoline(unsigned RPOpc) {
31346  switch (RPOpc) {
31347  case X86::RETPOLINE_CALL32:
31348    return X86::CALLpcrel32;
31349  case X86::RETPOLINE_CALL64:
31350    return X86::CALL64pcrel32;
31351  case X86::RETPOLINE_TCRETURN32:
31352    return X86::TCRETURNdi;
31353  case X86::RETPOLINE_TCRETURN64:
31354    return X86::TCRETURNdi64;
31355  }
31356  llvm_unreachable("not retpoline opcode");
31357}
31358
31359static const char *getRetpolineSymbol(const X86Subtarget &Subtarget,
31360                                      unsigned Reg) {
31361  if (Subtarget.useRetpolineExternalThunk()) {
31362    // When using an external thunk for retpolines, we pick names that match the
31363    // names GCC happens to use as well. This helps simplify the implementation
31364    // of the thunks for kernels where they have no easy ability to create
31365    // aliases and are doing non-trivial configuration of the thunk's body. For
31366    // example, the Linux kernel will do boot-time hot patching of the thunk
31367    // bodies and cannot easily export aliases of these to loaded modules.
31368    //
31369    // Note that at any point in the future, we may need to change the semantics
31370    // of how we implement retpolines and at that time will likely change the
31371    // name of the called thunk. Essentially, there is no hard guarantee that
31372    // LLVM will generate calls to specific thunks, we merely make a best-effort
31373    // attempt to help out kernels and other systems where duplicating the
31374    // thunks is costly.
31375    switch (Reg) {
31376    case X86::EAX:
31377      assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
31378      return "__x86_indirect_thunk_eax";
31379    case X86::ECX:
31380      assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
31381      return "__x86_indirect_thunk_ecx";
31382    case X86::EDX:
31383      assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
31384      return "__x86_indirect_thunk_edx";
31385    case X86::EDI:
31386      assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
31387      return "__x86_indirect_thunk_edi";
31388    case X86::R11:
31389      assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
31390      return "__x86_indirect_thunk_r11";
31391    }
31392    llvm_unreachable("unexpected reg for retpoline");
31393  }
31394
31395  // When targeting an internal COMDAT thunk use an LLVM-specific name.
31396  switch (Reg) {
31397  case X86::EAX:
31398    assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
31399    return "__llvm_retpoline_eax";
31400  case X86::ECX:
31401    assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
31402    return "__llvm_retpoline_ecx";
31403  case X86::EDX:
31404    assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
31405    return "__llvm_retpoline_edx";
31406  case X86::EDI:
31407    assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
31408    return "__llvm_retpoline_edi";
31409  case X86::R11:
31410    assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
31411    return "__llvm_retpoline_r11";
31412  }
31413  llvm_unreachable("unexpected reg for retpoline");
31414}
31415
31416MachineBasicBlock *
31417X86TargetLowering::EmitLoweredRetpoline(MachineInstr &MI,
31418                                        MachineBasicBlock *BB) const {
31419  // Copy the virtual register into the R11 physical register and
31420  // call the retpoline thunk.
31421  DebugLoc DL = MI.getDebugLoc();
31422  const X86InstrInfo *TII = Subtarget.getInstrInfo();
31423  Register CalleeVReg = MI.getOperand(0).getReg();
31424  unsigned Opc = getOpcodeForRetpoline(MI.getOpcode());
31425
31426  // Find an available scratch register to hold the callee. On 64-bit, we can
31427  // just use R11, but we scan for uses anyway to ensure we don't generate
31428  // incorrect code. On 32-bit, we use one of EAX, ECX, or EDX that isn't
31429  // already a register use operand to the call to hold the callee. If none
31430  // are available, use EDI instead. EDI is chosen because EBX is the PIC base
31431  // register and ESI is the base pointer to realigned stack frames with VLAs.
31432  SmallVector<unsigned, 3> AvailableRegs;
31433  if (Subtarget.is64Bit())
31434    AvailableRegs.push_back(X86::R11);
31435  else
31436    AvailableRegs.append({X86::EAX, X86::ECX, X86::EDX, X86::EDI});
31437
31438  // Zero out any registers that are already used.
31439  for (const auto &MO : MI.operands()) {
31440    if (MO.isReg() && MO.isUse())
31441      for (unsigned &Reg : AvailableRegs)
31442        if (Reg == MO.getReg())
31443          Reg = 0;
31444  }
31445
31446  // Choose the first remaining non-zero available register.
31447  unsigned AvailableReg = 0;
31448  for (unsigned MaybeReg : AvailableRegs) {
31449    if (MaybeReg) {
31450      AvailableReg = MaybeReg;
31451      break;
31452    }
31453  }
31454  if (!AvailableReg)
31455    report_fatal_error("calling convention incompatible with retpoline, no "
31456                       "available registers");
31457
31458  const char *Symbol = getRetpolineSymbol(Subtarget, AvailableReg);
31459
31460  BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), AvailableReg)
31461      .addReg(CalleeVReg);
31462  MI.getOperand(0).ChangeToES(Symbol);
31463  MI.setDesc(TII->get(Opc));
31464  MachineInstrBuilder(*BB->getParent(), &MI)
31465      .addReg(AvailableReg, RegState::Implicit | RegState::Kill);
31466  return BB;
31467}
31468
31469/// SetJmp implies future control flow change upon calling the corresponding
31470/// LongJmp.
31471/// Instead of using the 'return' instruction, the long jump fixes the stack and
31472/// performs an indirect branch. To do so it uses the registers that were stored
31473/// in the jump buffer (when calling SetJmp).
31474/// In case the shadow stack is enabled we need to fix it as well, because some
31475/// return addresses will be skipped.
31476/// The function will save the SSP for future fixing in the function
31477/// emitLongJmpShadowStackFix.
31478/// \sa emitLongJmpShadowStackFix
31479/// \param [in] MI The temporary Machine Instruction for the builtin.
31480/// \param [in] MBB The Machine Basic Block that will be modified.
31481void X86TargetLowering::emitSetJmpShadowStackFix(MachineInstr &MI,
31482                                                 MachineBasicBlock *MBB) const {
31483  DebugLoc DL = MI.getDebugLoc();
31484  MachineFunction *MF = MBB->getParent();
31485  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
31486  MachineRegisterInfo &MRI = MF->getRegInfo();
31487  MachineInstrBuilder MIB;
31488
31489  // Memory Reference.
31490  SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
31491                                           MI.memoperands_end());
31492
31493  // Initialize a register with zero.
31494  MVT PVT = getPointerTy(MF->getDataLayout());
31495  const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
31496  Register ZReg = MRI.createVirtualRegister(PtrRC);
31497  unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr;
31498  BuildMI(*MBB, MI, DL, TII->get(XorRROpc))
31499      .addDef(ZReg)
31500      .addReg(ZReg, RegState::Undef)
31501      .addReg(ZReg, RegState::Undef);
31502
31503  // Read the current SSP Register value to the zeroed register.
31504  Register SSPCopyReg = MRI.createVirtualRegister(PtrRC);
31505  unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
31506  BuildMI(*MBB, MI, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
31507
31508  // Write the SSP register value to offset 3 in input memory buffer.
31509  unsigned PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
31510  MIB = BuildMI(*MBB, MI, DL, TII->get(PtrStoreOpc));
31511  const int64_t SSPOffset = 3 * PVT.getStoreSize();
31512  const unsigned MemOpndSlot = 1;
31513  for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
31514    if (i == X86::AddrDisp)
31515      MIB.addDisp(MI.getOperand(MemOpndSlot + i), SSPOffset);
31516    else
31517      MIB.add(MI.getOperand(MemOpndSlot + i));
31518  }
31519  MIB.addReg(SSPCopyReg);
31520  MIB.setMemRefs(MMOs);
31521}
31522
31523MachineBasicBlock *
31524X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
31525                                    MachineBasicBlock *MBB) const {
31526  DebugLoc DL = MI.getDebugLoc();
31527  MachineFunction *MF = MBB->getParent();
31528  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
31529  const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
31530  MachineRegisterInfo &MRI = MF->getRegInfo();
31531
31532  const BasicBlock *BB = MBB->getBasicBlock();
31533  MachineFunction::iterator I = ++MBB->getIterator();
31534
31535  // Memory Reference
31536  SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
31537                                           MI.memoperands_end());
31538
31539  unsigned DstReg;
31540  unsigned MemOpndSlot = 0;
31541
31542  unsigned CurOp = 0;
31543
31544  DstReg = MI.getOperand(CurOp++).getReg();
31545  const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
31546  assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
31547  (void)TRI;
31548  Register mainDstReg = MRI.createVirtualRegister(RC);
31549  Register restoreDstReg = MRI.createVirtualRegister(RC);
31550
31551  MemOpndSlot = CurOp;
31552
31553  MVT PVT = getPointerTy(MF->getDataLayout());
31554  assert((PVT == MVT::i64 || PVT == MVT::i32) &&
31555         "Invalid Pointer Size!");
31556
31557  // For v = setjmp(buf), we generate
31558  //
31559  // thisMBB:
31560  //  buf[LabelOffset] = restoreMBB <-- takes address of restoreMBB
31561  //  SjLjSetup restoreMBB
31562  //
31563  // mainMBB:
31564  //  v_main = 0
31565  //
31566  // sinkMBB:
31567  //  v = phi(main, restore)
31568  //
31569  // restoreMBB:
31570  //  if base pointer being used, load it from frame
31571  //  v_restore = 1
31572
31573  MachineBasicBlock *thisMBB = MBB;
31574  MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
31575  MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
31576  MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
31577  MF->insert(I, mainMBB);
31578  MF->insert(I, sinkMBB);
31579  MF->push_back(restoreMBB);
31580  restoreMBB->setHasAddressTaken();
31581
31582  MachineInstrBuilder MIB;
31583
31584  // Transfer the remainder of BB and its successor edges to sinkMBB.
31585  sinkMBB->splice(sinkMBB->begin(), MBB,
31586                  std::next(MachineBasicBlock::iterator(MI)), MBB->end());
31587  sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
31588
31589  // thisMBB:
31590  unsigned PtrStoreOpc = 0;
31591  unsigned LabelReg = 0;
31592  const int64_t LabelOffset = 1 * PVT.getStoreSize();
31593  bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
31594                     !isPositionIndependent();
31595
31596  // Prepare IP either in reg or imm.
31597  if (!UseImmLabel) {
31598    PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
31599    const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
31600    LabelReg = MRI.createVirtualRegister(PtrRC);
31601    if (Subtarget.is64Bit()) {
31602      MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
31603              .addReg(X86::RIP)
31604              .addImm(0)
31605              .addReg(0)
31606              .addMBB(restoreMBB)
31607              .addReg(0);
31608    } else {
31609      const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
31610      MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
31611              .addReg(XII->getGlobalBaseReg(MF))
31612              .addImm(0)
31613              .addReg(0)
31614              .addMBB(restoreMBB, Subtarget.classifyBlockAddressReference())
31615              .addReg(0);
31616    }
31617  } else
31618    PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
31619  // Store IP
31620  MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
31621  for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
31622    if (i == X86::AddrDisp)
31623      MIB.addDisp(MI.getOperand(MemOpndSlot + i), LabelOffset);
31624    else
31625      MIB.add(MI.getOperand(MemOpndSlot + i));
31626  }
31627  if (!UseImmLabel)
31628    MIB.addReg(LabelReg);
31629  else
31630    MIB.addMBB(restoreMBB);
31631  MIB.setMemRefs(MMOs);
31632
31633  if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
31634    emitSetJmpShadowStackFix(MI, thisMBB);
31635  }
31636
31637  // Setup
31638  MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
31639          .addMBB(restoreMBB);
31640
31641  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
31642  MIB.addRegMask(RegInfo->getNoPreservedMask());
31643  thisMBB->addSuccessor(mainMBB);
31644  thisMBB->addSuccessor(restoreMBB);
31645
31646  // mainMBB:
31647  //  EAX = 0
31648  BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
31649  mainMBB->addSuccessor(sinkMBB);
31650
31651  // sinkMBB:
31652  BuildMI(*sinkMBB, sinkMBB->begin(), DL,
31653          TII->get(X86::PHI), DstReg)
31654    .addReg(mainDstReg).addMBB(mainMBB)
31655    .addReg(restoreDstReg).addMBB(restoreMBB);
31656
31657  // restoreMBB:
31658  if (RegInfo->hasBasePointer(*MF)) {
31659    const bool Uses64BitFramePtr =
31660        Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
31661    X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
31662    X86FI->setRestoreBasePointer(MF);
31663    Register FramePtr = RegInfo->getFrameRegister(*MF);
31664    Register BasePtr = RegInfo->getBaseRegister();
31665    unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
31666    addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
31667                 FramePtr, true, X86FI->getRestoreBasePointerOffset())
31668      .setMIFlag(MachineInstr::FrameSetup);
31669  }
31670  BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
31671  BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
31672  restoreMBB->addSuccessor(sinkMBB);
31673
31674  MI.eraseFromParent();
31675  return sinkMBB;
31676}
31677
31678/// Fix the shadow stack using the previously saved SSP pointer.
31679/// \sa emitSetJmpShadowStackFix
31680/// \param [in] MI The temporary Machine Instruction for the builtin.
31681/// \param [in] MBB The Machine Basic Block that will be modified.
31682/// \return The sink MBB that will perform the future indirect branch.
31683MachineBasicBlock *
31684X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI,
31685                                             MachineBasicBlock *MBB) const {
31686  DebugLoc DL = MI.getDebugLoc();
31687  MachineFunction *MF = MBB->getParent();
31688  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
31689  MachineRegisterInfo &MRI = MF->getRegInfo();
31690
31691  // Memory Reference
31692  SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
31693                                           MI.memoperands_end());
31694
31695  MVT PVT = getPointerTy(MF->getDataLayout());
31696  const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
31697
31698  // checkSspMBB:
31699  //         xor vreg1, vreg1
31700  //         rdssp vreg1
31701  //         test vreg1, vreg1
31702  //         je sinkMBB   # Jump if Shadow Stack is not supported
31703  // fallMBB:
31704  //         mov buf+24/12(%rip), vreg2
31705  //         sub vreg1, vreg2
31706  //         jbe sinkMBB  # No need to fix the Shadow Stack
31707  // fixShadowMBB:
31708  //         shr 3/2, vreg2
31709  //         incssp vreg2  # fix the SSP according to the lower 8 bits
31710  //         shr 8, vreg2
31711  //         je sinkMBB
31712  // fixShadowLoopPrepareMBB:
31713  //         shl vreg2
31714  //         mov 128, vreg3
31715  // fixShadowLoopMBB:
31716  //         incssp vreg3
31717  //         dec vreg2
31718  //         jne fixShadowLoopMBB # Iterate until you finish fixing
31719  //                              # the Shadow Stack
31720  // sinkMBB:
31721
31722  MachineFunction::iterator I = ++MBB->getIterator();
31723  const BasicBlock *BB = MBB->getBasicBlock();
31724
31725  MachineBasicBlock *checkSspMBB = MF->CreateMachineBasicBlock(BB);
31726  MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
31727  MachineBasicBlock *fixShadowMBB = MF->CreateMachineBasicBlock(BB);
31728  MachineBasicBlock *fixShadowLoopPrepareMBB = MF->CreateMachineBasicBlock(BB);
31729  MachineBasicBlock *fixShadowLoopMBB = MF->CreateMachineBasicBlock(BB);
31730  MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
31731  MF->insert(I, checkSspMBB);
31732  MF->insert(I, fallMBB);
31733  MF->insert(I, fixShadowMBB);
31734  MF->insert(I, fixShadowLoopPrepareMBB);
31735  MF->insert(I, fixShadowLoopMBB);
31736  MF->insert(I, sinkMBB);
31737
31738  // Transfer the remainder of BB and its successor edges to sinkMBB.
31739  sinkMBB->splice(sinkMBB->begin(), MBB, MachineBasicBlock::iterator(MI),
31740                  MBB->end());
31741  sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
31742
31743  MBB->addSuccessor(checkSspMBB);
31744
31745  // Initialize a register with zero.
31746  Register ZReg = MRI.createVirtualRegister(PtrRC);
31747  unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr;
31748  BuildMI(checkSspMBB, DL, TII->get(XorRROpc))
31749      .addDef(ZReg)
31750      .addReg(ZReg, RegState::Undef)
31751      .addReg(ZReg, RegState::Undef);
31752
31753  // Read the current SSP Register value to the zeroed register.
31754  Register SSPCopyReg = MRI.createVirtualRegister(PtrRC);
31755  unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
31756  BuildMI(checkSspMBB, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
31757
31758  // Check whether the result of the SSP register is zero and jump directly
31759  // to the sink.
31760  unsigned TestRROpc = (PVT == MVT::i64) ? X86::TEST64rr : X86::TEST32rr;
31761  BuildMI(checkSspMBB, DL, TII->get(TestRROpc))
31762      .addReg(SSPCopyReg)
31763      .addReg(SSPCopyReg);
31764  BuildMI(checkSspMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_E);
31765  checkSspMBB->addSuccessor(sinkMBB);
31766  checkSspMBB->addSuccessor(fallMBB);
31767
31768  // Reload the previously saved SSP register value.
31769  Register PrevSSPReg = MRI.createVirtualRegister(PtrRC);
31770  unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
31771  const int64_t SPPOffset = 3 * PVT.getStoreSize();
31772  MachineInstrBuilder MIB =
31773      BuildMI(fallMBB, DL, TII->get(PtrLoadOpc), PrevSSPReg);
31774  for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
31775    const MachineOperand &MO = MI.getOperand(i);
31776    if (i == X86::AddrDisp)
31777      MIB.addDisp(MO, SPPOffset);
31778    else if (MO.isReg()) // Don't add the whole operand, we don't want to
31779                         // preserve kill flags.
31780      MIB.addReg(MO.getReg());
31781    else
31782      MIB.add(MO);
31783  }
31784  MIB.setMemRefs(MMOs);
31785
31786  // Subtract the current SSP from the previous SSP.
31787  Register SspSubReg = MRI.createVirtualRegister(PtrRC);
31788  unsigned SubRROpc = (PVT == MVT::i64) ? X86::SUB64rr : X86::SUB32rr;
31789  BuildMI(fallMBB, DL, TII->get(SubRROpc), SspSubReg)
31790      .addReg(PrevSSPReg)
31791      .addReg(SSPCopyReg);
31792
31793  // Jump to sink in case PrevSSPReg <= SSPCopyReg.
31794  BuildMI(fallMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_BE);
31795  fallMBB->addSuccessor(sinkMBB);
31796  fallMBB->addSuccessor(fixShadowMBB);
31797
31798  // Shift right by 2/3 for 32/64 because incssp multiplies the argument by 4/8.
31799  unsigned ShrRIOpc = (PVT == MVT::i64) ? X86::SHR64ri : X86::SHR32ri;
31800  unsigned Offset = (PVT == MVT::i64) ? 3 : 2;
31801  Register SspFirstShrReg = MRI.createVirtualRegister(PtrRC);
31802  BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspFirstShrReg)
31803      .addReg(SspSubReg)
31804      .addImm(Offset);
31805
31806  // Increase SSP when looking only on the lower 8 bits of the delta.
31807  unsigned IncsspOpc = (PVT == MVT::i64) ? X86::INCSSPQ : X86::INCSSPD;
31808  BuildMI(fixShadowMBB, DL, TII->get(IncsspOpc)).addReg(SspFirstShrReg);
31809
31810  // Reset the lower 8 bits.
31811  Register SspSecondShrReg = MRI.createVirtualRegister(PtrRC);
31812  BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspSecondShrReg)
31813      .addReg(SspFirstShrReg)
31814      .addImm(8);
31815
31816  // Jump if the result of the shift is zero.
31817  BuildMI(fixShadowMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_E);
31818  fixShadowMBB->addSuccessor(sinkMBB);
31819  fixShadowMBB->addSuccessor(fixShadowLoopPrepareMBB);
31820
31821  // Do a single shift left.
31822  unsigned ShlR1Opc = (PVT == MVT::i64) ? X86::SHL64r1 : X86::SHL32r1;
31823  Register SspAfterShlReg = MRI.createVirtualRegister(PtrRC);
31824  BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(ShlR1Opc), SspAfterShlReg)
31825      .addReg(SspSecondShrReg);
31826
31827  // Save the value 128 to a register (will be used next with incssp).
31828  Register Value128InReg = MRI.createVirtualRegister(PtrRC);
31829  unsigned MovRIOpc = (PVT == MVT::i64) ? X86::MOV64ri32 : X86::MOV32ri;
31830  BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(MovRIOpc), Value128InReg)
31831      .addImm(128);
31832  fixShadowLoopPrepareMBB->addSuccessor(fixShadowLoopMBB);
31833
31834  // Since incssp only looks at the lower 8 bits, we might need to do several
31835  // iterations of incssp until we finish fixing the shadow stack.
31836  Register DecReg = MRI.createVirtualRegister(PtrRC);
31837  Register CounterReg = MRI.createVirtualRegister(PtrRC);
31838  BuildMI(fixShadowLoopMBB, DL, TII->get(X86::PHI), CounterReg)
31839      .addReg(SspAfterShlReg)
31840      .addMBB(fixShadowLoopPrepareMBB)
31841      .addReg(DecReg)
31842      .addMBB(fixShadowLoopMBB);
31843
31844  // Every iteration we increase the SSP by 128.
31845  BuildMI(fixShadowLoopMBB, DL, TII->get(IncsspOpc)).addReg(Value128InReg);
31846
31847  // Every iteration we decrement the counter by 1.
31848  unsigned DecROpc = (PVT == MVT::i64) ? X86::DEC64r : X86::DEC32r;
31849  BuildMI(fixShadowLoopMBB, DL, TII->get(DecROpc), DecReg).addReg(CounterReg);
31850
31851  // Jump if the counter is not zero yet.
31852  BuildMI(fixShadowLoopMBB, DL, TII->get(X86::JCC_1)).addMBB(fixShadowLoopMBB).addImm(X86::COND_NE);
31853  fixShadowLoopMBB->addSuccessor(sinkMBB);
31854  fixShadowLoopMBB->addSuccessor(fixShadowLoopMBB);
31855
31856  return sinkMBB;
31857}
31858
31859MachineBasicBlock *
31860X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
31861                                     MachineBasicBlock *MBB) const {
31862  DebugLoc DL = MI.getDebugLoc();
31863  MachineFunction *MF = MBB->getParent();
31864  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
31865  MachineRegisterInfo &MRI = MF->getRegInfo();
31866
31867  // Memory Reference
31868  SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
31869                                           MI.memoperands_end());
31870
31871  MVT PVT = getPointerTy(MF->getDataLayout());
31872  assert((PVT == MVT::i64 || PVT == MVT::i32) &&
31873         "Invalid Pointer Size!");
31874
31875  const TargetRegisterClass *RC =
31876    (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
31877  Register Tmp = MRI.createVirtualRegister(RC);
31878  // Since FP is only updated here but NOT referenced, it's treated as GPR.
31879  const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
31880  unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
31881  Register SP = RegInfo->getStackRegister();
31882
31883  MachineInstrBuilder MIB;
31884
31885  const int64_t LabelOffset = 1 * PVT.getStoreSize();
31886  const int64_t SPOffset = 2 * PVT.getStoreSize();
31887
31888  unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
31889  unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
31890
31891  MachineBasicBlock *thisMBB = MBB;
31892
31893  // When CET and shadow stack is enabled, we need to fix the Shadow Stack.
31894  if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
31895    thisMBB = emitLongJmpShadowStackFix(MI, thisMBB);
31896  }
31897
31898  // Reload FP
31899  MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), FP);
31900  for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
31901    const MachineOperand &MO = MI.getOperand(i);
31902    if (MO.isReg()) // Don't add the whole operand, we don't want to
31903                    // preserve kill flags.
31904      MIB.addReg(MO.getReg());
31905    else
31906      MIB.add(MO);
31907  }
31908  MIB.setMemRefs(MMOs);
31909
31910  // Reload IP
31911  MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
31912  for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
31913    const MachineOperand &MO = MI.getOperand(i);
31914    if (i == X86::AddrDisp)
31915      MIB.addDisp(MO, LabelOffset);
31916    else if (MO.isReg()) // Don't add the whole operand, we don't want to
31917                         // preserve kill flags.
31918      MIB.addReg(MO.getReg());
31919    else
31920      MIB.add(MO);
31921  }
31922  MIB.setMemRefs(MMOs);
31923
31924  // Reload SP
31925  MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), SP);
31926  for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
31927    if (i == X86::AddrDisp)
31928      MIB.addDisp(MI.getOperand(i), SPOffset);
31929    else
31930      MIB.add(MI.getOperand(i)); // We can preserve the kill flags here, it's
31931                                 // the last instruction of the expansion.
31932  }
31933  MIB.setMemRefs(MMOs);
31934
31935  // Jump
31936  BuildMI(*thisMBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
31937
31938  MI.eraseFromParent();
31939  return thisMBB;
31940}
31941
31942void X86TargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
31943                                               MachineBasicBlock *MBB,
31944                                               MachineBasicBlock *DispatchBB,
31945                                               int FI) const {
31946  DebugLoc DL = MI.getDebugLoc();
31947  MachineFunction *MF = MBB->getParent();
31948  MachineRegisterInfo *MRI = &MF->getRegInfo();
31949  const X86InstrInfo *TII = Subtarget.getInstrInfo();
31950
31951  MVT PVT = getPointerTy(MF->getDataLayout());
31952  assert((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!");
31953
31954  unsigned Op = 0;
31955  unsigned VR = 0;
31956
31957  bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
31958                     !isPositionIndependent();
31959
31960  if (UseImmLabel) {
31961    Op = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
31962  } else {
31963    const TargetRegisterClass *TRC =
31964        (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
31965    VR = MRI->createVirtualRegister(TRC);
31966    Op = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
31967
31968    if (Subtarget.is64Bit())
31969      BuildMI(*MBB, MI, DL, TII->get(X86::LEA64r), VR)
31970          .addReg(X86::RIP)
31971          .addImm(1)
31972          .addReg(0)
31973          .addMBB(DispatchBB)
31974          .addReg(0);
31975    else
31976      BuildMI(*MBB, MI, DL, TII->get(X86::LEA32r), VR)
31977          .addReg(0) /* TII->getGlobalBaseReg(MF) */
31978          .addImm(1)
31979          .addReg(0)
31980          .addMBB(DispatchBB, Subtarget.classifyBlockAddressReference())
31981          .addReg(0);
31982  }
31983
31984  MachineInstrBuilder MIB = BuildMI(*MBB, MI, DL, TII->get(Op));
31985  addFrameReference(MIB, FI, Subtarget.is64Bit() ? 56 : 36);
31986  if (UseImmLabel)
31987    MIB.addMBB(DispatchBB);
31988  else
31989    MIB.addReg(VR);
31990}
31991
31992MachineBasicBlock *
31993X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
31994                                         MachineBasicBlock *BB) const {
31995  DebugLoc DL = MI.getDebugLoc();
31996  MachineFunction *MF = BB->getParent();
31997  MachineRegisterInfo *MRI = &MF->getRegInfo();
31998  const X86InstrInfo *TII = Subtarget.getInstrInfo();
31999  int FI = MF->getFrameInfo().getFunctionContextIndex();
32000
32001  // Get a mapping of the call site numbers to all of the landing pads they're
32002  // associated with.
32003  DenseMap<unsigned, SmallVector<MachineBasicBlock *, 2>> CallSiteNumToLPad;
32004  unsigned MaxCSNum = 0;
32005  for (auto &MBB : *MF) {
32006    if (!MBB.isEHPad())
32007      continue;
32008
32009    MCSymbol *Sym = nullptr;
32010    for (const auto &MI : MBB) {
32011      if (MI.isDebugInstr())
32012        continue;
32013
32014      assert(MI.isEHLabel() && "expected EH_LABEL");
32015      Sym = MI.getOperand(0).getMCSymbol();
32016      break;
32017    }
32018
32019    if (!MF->hasCallSiteLandingPad(Sym))
32020      continue;
32021
32022    for (unsigned CSI : MF->getCallSiteLandingPad(Sym)) {
32023      CallSiteNumToLPad[CSI].push_back(&MBB);
32024      MaxCSNum = std::max(MaxCSNum, CSI);
32025    }
32026  }
32027
32028  // Get an ordered list of the machine basic blocks for the jump table.
32029  std::vector<MachineBasicBlock *> LPadList;
32030  SmallPtrSet<MachineBasicBlock *, 32> InvokeBBs;
32031  LPadList.reserve(CallSiteNumToLPad.size());
32032
32033  for (unsigned CSI = 1; CSI <= MaxCSNum; ++CSI) {
32034    for (auto &LP : CallSiteNumToLPad[CSI]) {
32035      LPadList.push_back(LP);
32036      InvokeBBs.insert(LP->pred_begin(), LP->pred_end());
32037    }
32038  }
32039
32040  assert(!LPadList.empty() &&
32041         "No landing pad destinations for the dispatch jump table!");
32042
32043  // Create the MBBs for the dispatch code.
32044
32045  // Shove the dispatch's address into the return slot in the function context.
32046  MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
32047  DispatchBB->setIsEHPad(true);
32048
32049  MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
32050  BuildMI(TrapBB, DL, TII->get(X86::TRAP));
32051  DispatchBB->addSuccessor(TrapBB);
32052
32053  MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
32054  DispatchBB->addSuccessor(DispContBB);
32055
32056  // Insert MBBs.
32057  MF->push_back(DispatchBB);
32058  MF->push_back(DispContBB);
32059  MF->push_back(TrapBB);
32060
32061  // Insert code into the entry block that creates and registers the function
32062  // context.
32063  SetupEntryBlockForSjLj(MI, BB, DispatchBB, FI);
32064
32065  // Create the jump table and associated information
32066  unsigned JTE = getJumpTableEncoding();
32067  MachineJumpTableInfo *JTI = MF->getOrCreateJumpTableInfo(JTE);
32068  unsigned MJTI = JTI->createJumpTableIndex(LPadList);
32069
32070  const X86RegisterInfo &RI = TII->getRegisterInfo();
32071  // Add a register mask with no preserved registers.  This results in all
32072  // registers being marked as clobbered.
32073  if (RI.hasBasePointer(*MF)) {
32074    const bool FPIs64Bit =
32075        Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
32076    X86MachineFunctionInfo *MFI = MF->getInfo<X86MachineFunctionInfo>();
32077    MFI->setRestoreBasePointer(MF);
32078
32079    Register FP = RI.getFrameRegister(*MF);
32080    Register BP = RI.getBaseRegister();
32081    unsigned Op = FPIs64Bit ? X86::MOV64rm : X86::MOV32rm;
32082    addRegOffset(BuildMI(DispatchBB, DL, TII->get(Op), BP), FP, true,
32083                 MFI->getRestoreBasePointerOffset())
32084        .addRegMask(RI.getNoPreservedMask());
32085  } else {
32086    BuildMI(DispatchBB, DL, TII->get(X86::NOOP))
32087        .addRegMask(RI.getNoPreservedMask());
32088  }
32089
32090  // IReg is used as an index in a memory operand and therefore can't be SP
32091  Register IReg = MRI->createVirtualRegister(&X86::GR32_NOSPRegClass);
32092  addFrameReference(BuildMI(DispatchBB, DL, TII->get(X86::MOV32rm), IReg), FI,
32093                    Subtarget.is64Bit() ? 8 : 4);
32094  BuildMI(DispatchBB, DL, TII->get(X86::CMP32ri))
32095      .addReg(IReg)
32096      .addImm(LPadList.size());
32097  BuildMI(DispatchBB, DL, TII->get(X86::JCC_1)).addMBB(TrapBB).addImm(X86::COND_AE);
32098
32099  if (Subtarget.is64Bit()) {
32100    Register BReg = MRI->createVirtualRegister(&X86::GR64RegClass);
32101    Register IReg64 = MRI->createVirtualRegister(&X86::GR64_NOSPRegClass);
32102
32103    // leaq .LJTI0_0(%rip), BReg
32104    BuildMI(DispContBB, DL, TII->get(X86::LEA64r), BReg)
32105        .addReg(X86::RIP)
32106        .addImm(1)
32107        .addReg(0)
32108        .addJumpTableIndex(MJTI)
32109        .addReg(0);
32110    // movzx IReg64, IReg
32111    BuildMI(DispContBB, DL, TII->get(TargetOpcode::SUBREG_TO_REG), IReg64)
32112        .addImm(0)
32113        .addReg(IReg)
32114        .addImm(X86::sub_32bit);
32115
32116    switch (JTE) {
32117    case MachineJumpTableInfo::EK_BlockAddress:
32118      // jmpq *(BReg,IReg64,8)
32119      BuildMI(DispContBB, DL, TII->get(X86::JMP64m))
32120          .addReg(BReg)
32121          .addImm(8)
32122          .addReg(IReg64)
32123          .addImm(0)
32124          .addReg(0);
32125      break;
32126    case MachineJumpTableInfo::EK_LabelDifference32: {
32127      Register OReg = MRI->createVirtualRegister(&X86::GR32RegClass);
32128      Register OReg64 = MRI->createVirtualRegister(&X86::GR64RegClass);
32129      Register TReg = MRI->createVirtualRegister(&X86::GR64RegClass);
32130
32131      // movl (BReg,IReg64,4), OReg
32132      BuildMI(DispContBB, DL, TII->get(X86::MOV32rm), OReg)
32133          .addReg(BReg)
32134          .addImm(4)
32135          .addReg(IReg64)
32136          .addImm(0)
32137          .addReg(0);
32138      // movsx OReg64, OReg
32139      BuildMI(DispContBB, DL, TII->get(X86::MOVSX64rr32), OReg64).addReg(OReg);
32140      // addq BReg, OReg64, TReg
32141      BuildMI(DispContBB, DL, TII->get(X86::ADD64rr), TReg)
32142          .addReg(OReg64)
32143          .addReg(BReg);
32144      // jmpq *TReg
32145      BuildMI(DispContBB, DL, TII->get(X86::JMP64r)).addReg(TReg);
32146      break;
32147    }
32148    default:
32149      llvm_unreachable("Unexpected jump table encoding");
32150    }
32151  } else {
32152    // jmpl *.LJTI0_0(,IReg,4)
32153    BuildMI(DispContBB, DL, TII->get(X86::JMP32m))
32154        .addReg(0)
32155        .addImm(4)
32156        .addReg(IReg)
32157        .addJumpTableIndex(MJTI)
32158        .addReg(0);
32159  }
32160
32161  // Add the jump table entries as successors to the MBB.
32162  SmallPtrSet<MachineBasicBlock *, 8> SeenMBBs;
32163  for (auto &LP : LPadList)
32164    if (SeenMBBs.insert(LP).second)
32165      DispContBB->addSuccessor(LP);
32166
32167  // N.B. the order the invoke BBs are processed in doesn't matter here.
32168  SmallVector<MachineBasicBlock *, 64> MBBLPads;
32169  const MCPhysReg *SavedRegs = MF->getRegInfo().getCalleeSavedRegs();
32170  for (MachineBasicBlock *MBB : InvokeBBs) {
32171    // Remove the landing pad successor from the invoke block and replace it
32172    // with the new dispatch block.
32173    // Keep a copy of Successors since it's modified inside the loop.
32174    SmallVector<MachineBasicBlock *, 8> Successors(MBB->succ_rbegin(),
32175                                                   MBB->succ_rend());
32176    // FIXME: Avoid quadratic complexity.
32177    for (auto MBBS : Successors) {
32178      if (MBBS->isEHPad()) {
32179        MBB->removeSuccessor(MBBS);
32180        MBBLPads.push_back(MBBS);
32181      }
32182    }
32183
32184    MBB->addSuccessor(DispatchBB);
32185
32186    // Find the invoke call and mark all of the callee-saved registers as
32187    // 'implicit defined' so that they're spilled.  This prevents code from
32188    // moving instructions to before the EH block, where they will never be
32189    // executed.
32190    for (auto &II : reverse(*MBB)) {
32191      if (!II.isCall())
32192        continue;
32193
32194      DenseMap<unsigned, bool> DefRegs;
32195      for (auto &MOp : II.operands())
32196        if (MOp.isReg())
32197          DefRegs[MOp.getReg()] = true;
32198
32199      MachineInstrBuilder MIB(*MF, &II);
32200      for (unsigned RegIdx = 0; SavedRegs[RegIdx]; ++RegIdx) {
32201        unsigned Reg = SavedRegs[RegIdx];
32202        if (!DefRegs[Reg])
32203          MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
32204      }
32205
32206      break;
32207    }
32208  }
32209
32210  // Mark all former landing pads as non-landing pads.  The dispatch is the only
32211  // landing pad now.
32212  for (auto &LP : MBBLPads)
32213    LP->setIsEHPad(false);
32214
32215  // The instruction is gone now.
32216  MI.eraseFromParent();
32217  return BB;
32218}
32219
32220MachineBasicBlock *
32221X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
32222                                               MachineBasicBlock *BB) const {
32223  MachineFunction *MF = BB->getParent();
32224  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
32225  DebugLoc DL = MI.getDebugLoc();
32226
32227  switch (MI.getOpcode()) {
32228  default: llvm_unreachable("Unexpected instr type to insert");
32229  case X86::TLS_addr32:
32230  case X86::TLS_addr64:
32231  case X86::TLS_base_addr32:
32232  case X86::TLS_base_addr64:
32233    return EmitLoweredTLSAddr(MI, BB);
32234  case X86::RETPOLINE_CALL32:
32235  case X86::RETPOLINE_CALL64:
32236  case X86::RETPOLINE_TCRETURN32:
32237  case X86::RETPOLINE_TCRETURN64:
32238    return EmitLoweredRetpoline(MI, BB);
32239  case X86::CATCHRET:
32240    return EmitLoweredCatchRet(MI, BB);
32241  case X86::CATCHPAD:
32242    return EmitLoweredCatchPad(MI, BB);
32243  case X86::SEG_ALLOCA_32:
32244  case X86::SEG_ALLOCA_64:
32245    return EmitLoweredSegAlloca(MI, BB);
32246  case X86::TLSCall_32:
32247  case X86::TLSCall_64:
32248    return EmitLoweredTLSCall(MI, BB);
32249  case X86::CMOV_FR32:
32250  case X86::CMOV_FR32X:
32251  case X86::CMOV_FR64:
32252  case X86::CMOV_FR64X:
32253  case X86::CMOV_GR8:
32254  case X86::CMOV_GR16:
32255  case X86::CMOV_GR32:
32256  case X86::CMOV_RFP32:
32257  case X86::CMOV_RFP64:
32258  case X86::CMOV_RFP80:
32259  case X86::CMOV_VR128:
32260  case X86::CMOV_VR128X:
32261  case X86::CMOV_VR256:
32262  case X86::CMOV_VR256X:
32263  case X86::CMOV_VR512:
32264  case X86::CMOV_VK2:
32265  case X86::CMOV_VK4:
32266  case X86::CMOV_VK8:
32267  case X86::CMOV_VK16:
32268  case X86::CMOV_VK32:
32269  case X86::CMOV_VK64:
32270    return EmitLoweredSelect(MI, BB);
32271
32272  case X86::RDFLAGS32:
32273  case X86::RDFLAGS64: {
32274    unsigned PushF =
32275        MI.getOpcode() == X86::RDFLAGS32 ? X86::PUSHF32 : X86::PUSHF64;
32276    unsigned Pop = MI.getOpcode() == X86::RDFLAGS32 ? X86::POP32r : X86::POP64r;
32277    MachineInstr *Push = BuildMI(*BB, MI, DL, TII->get(PushF));
32278    // Permit reads of the EFLAGS and DF registers without them being defined.
32279    // This intrinsic exists to read external processor state in flags, such as
32280    // the trap flag, interrupt flag, and direction flag, none of which are
32281    // modeled by the backend.
32282    assert(Push->getOperand(2).getReg() == X86::EFLAGS &&
32283           "Unexpected register in operand!");
32284    Push->getOperand(2).setIsUndef();
32285    assert(Push->getOperand(3).getReg() == X86::DF &&
32286           "Unexpected register in operand!");
32287    Push->getOperand(3).setIsUndef();
32288    BuildMI(*BB, MI, DL, TII->get(Pop), MI.getOperand(0).getReg());
32289
32290    MI.eraseFromParent(); // The pseudo is gone now.
32291    return BB;
32292  }
32293
32294  case X86::WRFLAGS32:
32295  case X86::WRFLAGS64: {
32296    unsigned Push =
32297        MI.getOpcode() == X86::WRFLAGS32 ? X86::PUSH32r : X86::PUSH64r;
32298    unsigned PopF =
32299        MI.getOpcode() == X86::WRFLAGS32 ? X86::POPF32 : X86::POPF64;
32300    BuildMI(*BB, MI, DL, TII->get(Push)).addReg(MI.getOperand(0).getReg());
32301    BuildMI(*BB, MI, DL, TII->get(PopF));
32302
32303    MI.eraseFromParent(); // The pseudo is gone now.
32304    return BB;
32305  }
32306
32307  case X86::FP32_TO_INT16_IN_MEM:
32308  case X86::FP32_TO_INT32_IN_MEM:
32309  case X86::FP32_TO_INT64_IN_MEM:
32310  case X86::FP64_TO_INT16_IN_MEM:
32311  case X86::FP64_TO_INT32_IN_MEM:
32312  case X86::FP64_TO_INT64_IN_MEM:
32313  case X86::FP80_TO_INT16_IN_MEM:
32314  case X86::FP80_TO_INT32_IN_MEM:
32315  case X86::FP80_TO_INT64_IN_MEM: {
32316    // Change the floating point control register to use "round towards zero"
32317    // mode when truncating to an integer value.
32318    int OrigCWFrameIdx = MF->getFrameInfo().CreateStackObject(2, 2, false);
32319    addFrameReference(BuildMI(*BB, MI, DL,
32320                              TII->get(X86::FNSTCW16m)), OrigCWFrameIdx);
32321
32322    // Load the old value of the control word...
32323    Register OldCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
32324    addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOVZX32rm16), OldCW),
32325                      OrigCWFrameIdx);
32326
32327    // OR 0b11 into bit 10 and 11. 0b11 is the encoding for round toward zero.
32328    Register NewCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
32329    BuildMI(*BB, MI, DL, TII->get(X86::OR32ri), NewCW)
32330      .addReg(OldCW, RegState::Kill).addImm(0xC00);
32331
32332    // Extract to 16 bits.
32333    Register NewCW16 =
32334        MF->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
32335    BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), NewCW16)
32336      .addReg(NewCW, RegState::Kill, X86::sub_16bit);
32337
32338    // Prepare memory for FLDCW.
32339    int NewCWFrameIdx = MF->getFrameInfo().CreateStackObject(2, 2, false);
32340    addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)),
32341                      NewCWFrameIdx)
32342      .addReg(NewCW16, RegState::Kill);
32343
32344    // Reload the modified control word now...
32345    addFrameReference(BuildMI(*BB, MI, DL,
32346                              TII->get(X86::FLDCW16m)), NewCWFrameIdx);
32347
32348    // Get the X86 opcode to use.
32349    unsigned Opc;
32350    switch (MI.getOpcode()) {
32351    default: llvm_unreachable("illegal opcode!");
32352    case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
32353    case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
32354    case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
32355    case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
32356    case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
32357    case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
32358    case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
32359    case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
32360    case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
32361    }
32362
32363    X86AddressMode AM = getAddressFromInstr(&MI, 0);
32364    addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
32365        .addReg(MI.getOperand(X86::AddrNumOperands).getReg());
32366
32367    // Reload the original control word now.
32368    addFrameReference(BuildMI(*BB, MI, DL,
32369                              TII->get(X86::FLDCW16m)), OrigCWFrameIdx);
32370
32371    MI.eraseFromParent(); // The pseudo instruction is gone now.
32372    return BB;
32373  }
32374
32375  // xbegin
32376  case X86::XBEGIN:
32377    return emitXBegin(MI, BB, Subtarget.getInstrInfo());
32378
32379  case X86::VASTART_SAVE_XMM_REGS:
32380    return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
32381
32382  case X86::VAARG_64:
32383    return EmitVAARG64WithCustomInserter(MI, BB);
32384
32385  case X86::EH_SjLj_SetJmp32:
32386  case X86::EH_SjLj_SetJmp64:
32387    return emitEHSjLjSetJmp(MI, BB);
32388
32389  case X86::EH_SjLj_LongJmp32:
32390  case X86::EH_SjLj_LongJmp64:
32391    return emitEHSjLjLongJmp(MI, BB);
32392
32393  case X86::Int_eh_sjlj_setup_dispatch:
32394    return EmitSjLjDispatchBlock(MI, BB);
32395
32396  case TargetOpcode::STATEPOINT:
32397    // As an implementation detail, STATEPOINT shares the STACKMAP format at
32398    // this point in the process.  We diverge later.
32399    return emitPatchPoint(MI, BB);
32400
32401  case TargetOpcode::STACKMAP:
32402  case TargetOpcode::PATCHPOINT:
32403    return emitPatchPoint(MI, BB);
32404
32405  case TargetOpcode::PATCHABLE_EVENT_CALL:
32406    return emitXRayCustomEvent(MI, BB);
32407
32408  case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
32409    return emitXRayTypedEvent(MI, BB);
32410
32411  case X86::LCMPXCHG8B: {
32412    const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
32413    // In addition to 4 E[ABCD] registers implied by encoding, CMPXCHG8B
32414    // requires a memory operand. If it happens that current architecture is
32415    // i686 and for current function we need a base pointer
32416    // - which is ESI for i686 - register allocator would not be able to
32417    // allocate registers for an address in form of X(%reg, %reg, Y)
32418    // - there never would be enough unreserved registers during regalloc
32419    // (without the need for base ptr the only option would be X(%edi, %esi, Y).
32420    // We are giving a hand to register allocator by precomputing the address in
32421    // a new vreg using LEA.
32422
32423    // If it is not i686 or there is no base pointer - nothing to do here.
32424    if (!Subtarget.is32Bit() || !TRI->hasBasePointer(*MF))
32425      return BB;
32426
32427    // Even though this code does not necessarily needs the base pointer to
32428    // be ESI, we check for that. The reason: if this assert fails, there are
32429    // some changes happened in the compiler base pointer handling, which most
32430    // probably have to be addressed somehow here.
32431    assert(TRI->getBaseRegister() == X86::ESI &&
32432           "LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a "
32433           "base pointer in mind");
32434
32435    MachineRegisterInfo &MRI = MF->getRegInfo();
32436    MVT SPTy = getPointerTy(MF->getDataLayout());
32437    const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
32438    Register computedAddrVReg = MRI.createVirtualRegister(AddrRegClass);
32439
32440    X86AddressMode AM = getAddressFromInstr(&MI, 0);
32441    // Regalloc does not need any help when the memory operand of CMPXCHG8B
32442    // does not use index register.
32443    if (AM.IndexReg == X86::NoRegister)
32444      return BB;
32445
32446    // After X86TargetLowering::ReplaceNodeResults CMPXCHG8B is glued to its
32447    // four operand definitions that are E[ABCD] registers. We skip them and
32448    // then insert the LEA.
32449    MachineBasicBlock::reverse_iterator RMBBI(MI.getReverseIterator());
32450    while (RMBBI != BB->rend() && (RMBBI->definesRegister(X86::EAX) ||
32451                                   RMBBI->definesRegister(X86::EBX) ||
32452                                   RMBBI->definesRegister(X86::ECX) ||
32453                                   RMBBI->definesRegister(X86::EDX))) {
32454      ++RMBBI;
32455    }
32456    MachineBasicBlock::iterator MBBI(RMBBI);
32457    addFullAddress(
32458        BuildMI(*BB, *MBBI, DL, TII->get(X86::LEA32r), computedAddrVReg), AM);
32459
32460    setDirectAddressInInstr(&MI, 0, computedAddrVReg);
32461
32462    return BB;
32463  }
32464  case X86::LCMPXCHG16B:
32465    return BB;
32466  case X86::LCMPXCHG8B_SAVE_EBX:
32467  case X86::LCMPXCHG16B_SAVE_RBX: {
32468    unsigned BasePtr =
32469        MI.getOpcode() == X86::LCMPXCHG8B_SAVE_EBX ? X86::EBX : X86::RBX;
32470    if (!BB->isLiveIn(BasePtr))
32471      BB->addLiveIn(BasePtr);
32472    return BB;
32473  }
32474  }
32475}
32476
32477//===----------------------------------------------------------------------===//
32478//                           X86 Optimization Hooks
32479//===----------------------------------------------------------------------===//
32480
32481bool
32482X86TargetLowering::targetShrinkDemandedConstant(SDValue Op,
32483                                                const APInt &Demanded,
32484                                                TargetLoweringOpt &TLO) const {
32485  // Only optimize Ands to prevent shrinking a constant that could be
32486  // matched by movzx.
32487  if (Op.getOpcode() != ISD::AND)
32488    return false;
32489
32490  EVT VT = Op.getValueType();
32491
32492  // Ignore vectors.
32493  if (VT.isVector())
32494    return false;
32495
32496  unsigned Size = VT.getSizeInBits();
32497
32498  // Make sure the RHS really is a constant.
32499  ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
32500  if (!C)
32501    return false;
32502
32503  const APInt &Mask = C->getAPIntValue();
32504
32505  // Clear all non-demanded bits initially.
32506  APInt ShrunkMask = Mask & Demanded;
32507
32508  // Find the width of the shrunk mask.
32509  unsigned Width = ShrunkMask.getActiveBits();
32510
32511  // If the mask is all 0s there's nothing to do here.
32512  if (Width == 0)
32513    return false;
32514
32515  // Find the next power of 2 width, rounding up to a byte.
32516  Width = PowerOf2Ceil(std::max(Width, 8U));
32517  // Truncate the width to size to handle illegal types.
32518  Width = std::min(Width, Size);
32519
32520  // Calculate a possible zero extend mask for this constant.
32521  APInt ZeroExtendMask = APInt::getLowBitsSet(Size, Width);
32522
32523  // If we aren't changing the mask, just return true to keep it and prevent
32524  // the caller from optimizing.
32525  if (ZeroExtendMask == Mask)
32526    return true;
32527
32528  // Make sure the new mask can be represented by a combination of mask bits
32529  // and non-demanded bits.
32530  if (!ZeroExtendMask.isSubsetOf(Mask | ~Demanded))
32531    return false;
32532
32533  // Replace the constant with the zero extend mask.
32534  SDLoc DL(Op);
32535  SDValue NewC = TLO.DAG.getConstant(ZeroExtendMask, DL, VT);
32536  SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
32537  return TLO.CombineTo(Op, NewOp);
32538}
32539
32540void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
32541                                                      KnownBits &Known,
32542                                                      const APInt &DemandedElts,
32543                                                      const SelectionDAG &DAG,
32544                                                      unsigned Depth) const {
32545  unsigned BitWidth = Known.getBitWidth();
32546  unsigned Opc = Op.getOpcode();
32547  EVT VT = Op.getValueType();
32548  assert((Opc >= ISD::BUILTIN_OP_END ||
32549          Opc == ISD::INTRINSIC_WO_CHAIN ||
32550          Opc == ISD::INTRINSIC_W_CHAIN ||
32551          Opc == ISD::INTRINSIC_VOID) &&
32552         "Should use MaskedValueIsZero if you don't know whether Op"
32553         " is a target node!");
32554
32555  Known.resetAll();
32556  switch (Opc) {
32557  default: break;
32558  case X86ISD::SETCC:
32559    Known.Zero.setBitsFrom(1);
32560    break;
32561  case X86ISD::MOVMSK: {
32562    unsigned NumLoBits = Op.getOperand(0).getValueType().getVectorNumElements();
32563    Known.Zero.setBitsFrom(NumLoBits);
32564    break;
32565  }
32566  case X86ISD::PEXTRB:
32567  case X86ISD::PEXTRW: {
32568    SDValue Src = Op.getOperand(0);
32569    EVT SrcVT = Src.getValueType();
32570    APInt DemandedElt = APInt::getOneBitSet(SrcVT.getVectorNumElements(),
32571                                            Op.getConstantOperandVal(1));
32572    Known = DAG.computeKnownBits(Src, DemandedElt, Depth + 1);
32573    Known = Known.zextOrTrunc(BitWidth, false);
32574    Known.Zero.setBitsFrom(SrcVT.getScalarSizeInBits());
32575    break;
32576  }
32577  case X86ISD::VSRAI:
32578  case X86ISD::VSHLI:
32579  case X86ISD::VSRLI: {
32580    unsigned ShAmt = Op.getConstantOperandVal(1);
32581    if (ShAmt >= VT.getScalarSizeInBits()) {
32582      Known.setAllZero();
32583      break;
32584    }
32585
32586    Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
32587    if (Opc == X86ISD::VSHLI) {
32588      Known.Zero <<= ShAmt;
32589      Known.One <<= ShAmt;
32590      // Low bits are known zero.
32591      Known.Zero.setLowBits(ShAmt);
32592    } else if (Opc == X86ISD::VSRLI) {
32593      Known.Zero.lshrInPlace(ShAmt);
32594      Known.One.lshrInPlace(ShAmt);
32595      // High bits are known zero.
32596      Known.Zero.setHighBits(ShAmt);
32597    } else {
32598      Known.Zero.ashrInPlace(ShAmt);
32599      Known.One.ashrInPlace(ShAmt);
32600    }
32601    break;
32602  }
32603  case X86ISD::PACKUS: {
32604    // PACKUS is just a truncation if the upper half is zero.
32605    APInt DemandedLHS, DemandedRHS;
32606    getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
32607
32608    Known.One = APInt::getAllOnesValue(BitWidth * 2);
32609    Known.Zero = APInt::getAllOnesValue(BitWidth * 2);
32610
32611    KnownBits Known2;
32612    if (!!DemandedLHS) {
32613      Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedLHS, Depth + 1);
32614      Known.One &= Known2.One;
32615      Known.Zero &= Known2.Zero;
32616    }
32617    if (!!DemandedRHS) {
32618      Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedRHS, Depth + 1);
32619      Known.One &= Known2.One;
32620      Known.Zero &= Known2.Zero;
32621    }
32622
32623    if (Known.countMinLeadingZeros() < BitWidth)
32624      Known.resetAll();
32625    Known = Known.trunc(BitWidth);
32626    break;
32627  }
32628  case X86ISD::ANDNP: {
32629    KnownBits Known2;
32630    Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
32631    Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
32632
32633    // ANDNP = (~X & Y);
32634    Known.One &= Known2.Zero;
32635    Known.Zero |= Known2.One;
32636    break;
32637  }
32638  case X86ISD::FOR: {
32639    KnownBits Known2;
32640    Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
32641    Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
32642
32643    // Output known-0 bits are only known if clear in both the LHS & RHS.
32644    Known.Zero &= Known2.Zero;
32645    // Output known-1 are known to be set if set in either the LHS | RHS.
32646    Known.One |= Known2.One;
32647    break;
32648  }
32649  case X86ISD::PSADBW: {
32650    assert(VT.getScalarType() == MVT::i64 &&
32651           Op.getOperand(0).getValueType().getScalarType() == MVT::i8 &&
32652           "Unexpected PSADBW types");
32653
32654    // PSADBW - fills low 16 bits and zeros upper 48 bits of each i64 result.
32655    Known.Zero.setBitsFrom(16);
32656    break;
32657  }
32658  case X86ISD::CMOV: {
32659    Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
32660    // If we don't know any bits, early out.
32661    if (Known.isUnknown())
32662      break;
32663    KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
32664
32665    // Only known if known in both the LHS and RHS.
32666    Known.One &= Known2.One;
32667    Known.Zero &= Known2.Zero;
32668    break;
32669  }
32670  }
32671
32672  // Handle target shuffles.
32673  // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
32674  if (isTargetShuffle(Opc)) {
32675    bool IsUnary;
32676    SmallVector<int, 64> Mask;
32677    SmallVector<SDValue, 2> Ops;
32678    if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask,
32679                             IsUnary)) {
32680      unsigned NumOps = Ops.size();
32681      unsigned NumElts = VT.getVectorNumElements();
32682      if (Mask.size() == NumElts) {
32683        SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
32684        Known.Zero.setAllBits(); Known.One.setAllBits();
32685        for (unsigned i = 0; i != NumElts; ++i) {
32686          if (!DemandedElts[i])
32687            continue;
32688          int M = Mask[i];
32689          if (M == SM_SentinelUndef) {
32690            // For UNDEF elements, we don't know anything about the common state
32691            // of the shuffle result.
32692            Known.resetAll();
32693            break;
32694          } else if (M == SM_SentinelZero) {
32695            Known.One.clearAllBits();
32696            continue;
32697          }
32698          assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
32699                 "Shuffle index out of range");
32700
32701          unsigned OpIdx = (unsigned)M / NumElts;
32702          unsigned EltIdx = (unsigned)M % NumElts;
32703          if (Ops[OpIdx].getValueType() != VT) {
32704            // TODO - handle target shuffle ops with different value types.
32705            Known.resetAll();
32706            break;
32707          }
32708          DemandedOps[OpIdx].setBit(EltIdx);
32709        }
32710        // Known bits are the values that are shared by every demanded element.
32711        for (unsigned i = 0; i != NumOps && !Known.isUnknown(); ++i) {
32712          if (!DemandedOps[i])
32713            continue;
32714          KnownBits Known2 =
32715              DAG.computeKnownBits(Ops[i], DemandedOps[i], Depth + 1);
32716          Known.One &= Known2.One;
32717          Known.Zero &= Known2.Zero;
32718        }
32719      }
32720    }
32721  }
32722}
32723
32724unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
32725    SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
32726    unsigned Depth) const {
32727  EVT VT = Op.getValueType();
32728  unsigned VTBits = VT.getScalarSizeInBits();
32729  unsigned Opcode = Op.getOpcode();
32730  switch (Opcode) {
32731  case X86ISD::SETCC_CARRY:
32732    // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
32733    return VTBits;
32734
32735  case X86ISD::VTRUNC: {
32736    // TODO: Add DemandedElts support.
32737    SDValue Src = Op.getOperand(0);
32738    unsigned NumSrcBits = Src.getScalarValueSizeInBits();
32739    assert(VTBits < NumSrcBits && "Illegal truncation input type");
32740    unsigned Tmp = DAG.ComputeNumSignBits(Src, Depth + 1);
32741    if (Tmp > (NumSrcBits - VTBits))
32742      return Tmp - (NumSrcBits - VTBits);
32743    return 1;
32744  }
32745
32746  case X86ISD::PACKSS: {
32747    // PACKSS is just a truncation if the sign bits extend to the packed size.
32748    APInt DemandedLHS, DemandedRHS;
32749    getPackDemandedElts(Op.getValueType(), DemandedElts, DemandedLHS,
32750                        DemandedRHS);
32751
32752    unsigned SrcBits = Op.getOperand(0).getScalarValueSizeInBits();
32753    unsigned Tmp0 = SrcBits, Tmp1 = SrcBits;
32754    if (!!DemandedLHS)
32755      Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
32756    if (!!DemandedRHS)
32757      Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1);
32758    unsigned Tmp = std::min(Tmp0, Tmp1);
32759    if (Tmp > (SrcBits - VTBits))
32760      return Tmp - (SrcBits - VTBits);
32761    return 1;
32762  }
32763
32764  case X86ISD::VSHLI: {
32765    SDValue Src = Op.getOperand(0);
32766    const APInt &ShiftVal = Op.getConstantOperandAPInt(1);
32767    if (ShiftVal.uge(VTBits))
32768      return VTBits; // Shifted all bits out --> zero.
32769    unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
32770    if (ShiftVal.uge(Tmp))
32771      return 1; // Shifted all sign bits out --> unknown.
32772    return Tmp - ShiftVal.getZExtValue();
32773  }
32774
32775  case X86ISD::VSRAI: {
32776    SDValue Src = Op.getOperand(0);
32777    APInt ShiftVal = Op.getConstantOperandAPInt(1);
32778    if (ShiftVal.uge(VTBits - 1))
32779      return VTBits; // Sign splat.
32780    unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
32781    ShiftVal += Tmp;
32782    return ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue();
32783  }
32784
32785  case X86ISD::PCMPGT:
32786  case X86ISD::PCMPEQ:
32787  case X86ISD::CMPP:
32788  case X86ISD::VPCOM:
32789  case X86ISD::VPCOMU:
32790    // Vector compares return zero/all-bits result values.
32791    return VTBits;
32792
32793  case X86ISD::ANDNP: {
32794    unsigned Tmp0 =
32795        DAG.ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
32796    if (Tmp0 == 1) return 1; // Early out.
32797    unsigned Tmp1 =
32798        DAG.ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
32799    return std::min(Tmp0, Tmp1);
32800  }
32801
32802  case X86ISD::CMOV: {
32803    unsigned Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), Depth+1);
32804    if (Tmp0 == 1) return 1;  // Early out.
32805    unsigned Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), Depth+1);
32806    return std::min(Tmp0, Tmp1);
32807  }
32808  }
32809
32810  // Handle target shuffles.
32811  // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
32812  if (isTargetShuffle(Opcode)) {
32813    bool IsUnary;
32814    SmallVector<int, 64> Mask;
32815    SmallVector<SDValue, 2> Ops;
32816    if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask,
32817                             IsUnary)) {
32818      unsigned NumOps = Ops.size();
32819      unsigned NumElts = VT.getVectorNumElements();
32820      if (Mask.size() == NumElts) {
32821        SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
32822        for (unsigned i = 0; i != NumElts; ++i) {
32823          if (!DemandedElts[i])
32824            continue;
32825          int M = Mask[i];
32826          if (M == SM_SentinelUndef) {
32827            // For UNDEF elements, we don't know anything about the common state
32828            // of the shuffle result.
32829            return 1;
32830          } else if (M == SM_SentinelZero) {
32831            // Zero = all sign bits.
32832            continue;
32833          }
32834          assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
32835                 "Shuffle index out of range");
32836
32837          unsigned OpIdx = (unsigned)M / NumElts;
32838          unsigned EltIdx = (unsigned)M % NumElts;
32839          if (Ops[OpIdx].getValueType() != VT) {
32840            // TODO - handle target shuffle ops with different value types.
32841            return 1;
32842          }
32843          DemandedOps[OpIdx].setBit(EltIdx);
32844        }
32845        unsigned Tmp0 = VTBits;
32846        for (unsigned i = 0; i != NumOps && Tmp0 > 1; ++i) {
32847          if (!DemandedOps[i])
32848            continue;
32849          unsigned Tmp1 =
32850              DAG.ComputeNumSignBits(Ops[i], DemandedOps[i], Depth + 1);
32851          Tmp0 = std::min(Tmp0, Tmp1);
32852        }
32853        return Tmp0;
32854      }
32855    }
32856  }
32857
32858  // Fallback case.
32859  return 1;
32860}
32861
32862SDValue X86TargetLowering::unwrapAddress(SDValue N) const {
32863  if (N->getOpcode() == X86ISD::Wrapper || N->getOpcode() == X86ISD::WrapperRIP)
32864    return N->getOperand(0);
32865  return N;
32866}
32867
32868// Attempt to match a combined shuffle mask against supported unary shuffle
32869// instructions.
32870// TODO: Investigate sharing more of this with shuffle lowering.
32871static bool matchUnaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
32872                              bool AllowFloatDomain, bool AllowIntDomain,
32873                              SDValue &V1, const SDLoc &DL, SelectionDAG &DAG,
32874                              const X86Subtarget &Subtarget, unsigned &Shuffle,
32875                              MVT &SrcVT, MVT &DstVT) {
32876  unsigned NumMaskElts = Mask.size();
32877  unsigned MaskEltSize = MaskVT.getScalarSizeInBits();
32878
32879  // Match against a VZEXT_MOVL vXi32 zero-extending instruction.
32880  if (MaskEltSize == 32 && isUndefOrEqual(Mask[0], 0) &&
32881      isUndefOrZero(Mask[1]) && isUndefInRange(Mask, 2, NumMaskElts - 2)) {
32882    Shuffle = X86ISD::VZEXT_MOVL;
32883    SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
32884    return true;
32885  }
32886
32887  // Match against a ANY/ZERO_EXTEND_VECTOR_INREG instruction.
32888  // TODO: Add 512-bit vector support (split AVX512F and AVX512BW).
32889  if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE41()) ||
32890                         (MaskVT.is256BitVector() && Subtarget.hasInt256()))) {
32891    unsigned MaxScale = 64 / MaskEltSize;
32892    for (unsigned Scale = 2; Scale <= MaxScale; Scale *= 2) {
32893      bool MatchAny = true;
32894      bool MatchZero = true;
32895      unsigned NumDstElts = NumMaskElts / Scale;
32896      for (unsigned i = 0; i != NumDstElts && (MatchAny || MatchZero); ++i) {
32897        if (!isUndefOrEqual(Mask[i * Scale], (int)i)) {
32898          MatchAny = MatchZero = false;
32899          break;
32900        }
32901        MatchAny &= isUndefInRange(Mask, (i * Scale) + 1, Scale - 1);
32902        MatchZero &= isUndefOrZeroInRange(Mask, (i * Scale) + 1, Scale - 1);
32903      }
32904      if (MatchAny || MatchZero) {
32905        assert(MatchZero && "Failed to match zext but matched aext?");
32906        unsigned SrcSize = std::max(128u, NumDstElts * MaskEltSize);
32907        MVT ScalarTy = MaskVT.isInteger() ? MaskVT.getScalarType() :
32908                                            MVT::getIntegerVT(MaskEltSize);
32909        SrcVT = MVT::getVectorVT(ScalarTy, SrcSize / MaskEltSize);
32910
32911        if (SrcVT.getSizeInBits() != MaskVT.getSizeInBits())
32912          V1 = extractSubVector(V1, 0, DAG, DL, SrcSize);
32913
32914        Shuffle = unsigned(MatchAny ? ISD::ANY_EXTEND : ISD::ZERO_EXTEND);
32915        if (SrcVT.getVectorNumElements() != NumDstElts)
32916          Shuffle = getOpcode_EXTEND_VECTOR_INREG(Shuffle);
32917
32918        DstVT = MVT::getIntegerVT(Scale * MaskEltSize);
32919        DstVT = MVT::getVectorVT(DstVT, NumDstElts);
32920        return true;
32921      }
32922    }
32923  }
32924
32925  // Match against a VZEXT_MOVL instruction, SSE1 only supports 32-bits (MOVSS).
32926  if (((MaskEltSize == 32) || (MaskEltSize == 64 && Subtarget.hasSSE2())) &&
32927      isUndefOrEqual(Mask[0], 0) &&
32928      isUndefOrZeroInRange(Mask, 1, NumMaskElts - 1)) {
32929    Shuffle = X86ISD::VZEXT_MOVL;
32930    SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
32931    return true;
32932  }
32933
32934  // Check if we have SSE3 which will let us use MOVDDUP etc. The
32935  // instructions are no slower than UNPCKLPD but has the option to
32936  // fold the input operand into even an unaligned memory load.
32937  if (MaskVT.is128BitVector() && Subtarget.hasSSE3() && AllowFloatDomain) {
32938    if (isTargetShuffleEquivalent(Mask, {0, 0})) {
32939      Shuffle = X86ISD::MOVDDUP;
32940      SrcVT = DstVT = MVT::v2f64;
32941      return true;
32942    }
32943    if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) {
32944      Shuffle = X86ISD::MOVSLDUP;
32945      SrcVT = DstVT = MVT::v4f32;
32946      return true;
32947    }
32948    if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3})) {
32949      Shuffle = X86ISD::MOVSHDUP;
32950      SrcVT = DstVT = MVT::v4f32;
32951      return true;
32952    }
32953  }
32954
32955  if (MaskVT.is256BitVector() && AllowFloatDomain) {
32956    assert(Subtarget.hasAVX() && "AVX required for 256-bit vector shuffles");
32957    if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) {
32958      Shuffle = X86ISD::MOVDDUP;
32959      SrcVT = DstVT = MVT::v4f64;
32960      return true;
32961    }
32962    if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) {
32963      Shuffle = X86ISD::MOVSLDUP;
32964      SrcVT = DstVT = MVT::v8f32;
32965      return true;
32966    }
32967    if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3, 5, 5, 7, 7})) {
32968      Shuffle = X86ISD::MOVSHDUP;
32969      SrcVT = DstVT = MVT::v8f32;
32970      return true;
32971    }
32972  }
32973
32974  if (MaskVT.is512BitVector() && AllowFloatDomain) {
32975    assert(Subtarget.hasAVX512() &&
32976           "AVX512 required for 512-bit vector shuffles");
32977    if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) {
32978      Shuffle = X86ISD::MOVDDUP;
32979      SrcVT = DstVT = MVT::v8f64;
32980      return true;
32981    }
32982    if (isTargetShuffleEquivalent(
32983            Mask, {0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14})) {
32984      Shuffle = X86ISD::MOVSLDUP;
32985      SrcVT = DstVT = MVT::v16f32;
32986      return true;
32987    }
32988    if (isTargetShuffleEquivalent(
32989            Mask, {1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15})) {
32990      Shuffle = X86ISD::MOVSHDUP;
32991      SrcVT = DstVT = MVT::v16f32;
32992      return true;
32993    }
32994  }
32995
32996  return false;
32997}
32998
32999// Attempt to match a combined shuffle mask against supported unary immediate
33000// permute instructions.
33001// TODO: Investigate sharing more of this with shuffle lowering.
33002static bool matchUnaryPermuteShuffle(MVT MaskVT, ArrayRef<int> Mask,
33003                                     const APInt &Zeroable,
33004                                     bool AllowFloatDomain, bool AllowIntDomain,
33005                                     const X86Subtarget &Subtarget,
33006                                     unsigned &Shuffle, MVT &ShuffleVT,
33007                                     unsigned &PermuteImm) {
33008  unsigned NumMaskElts = Mask.size();
33009  unsigned InputSizeInBits = MaskVT.getSizeInBits();
33010  unsigned MaskScalarSizeInBits = InputSizeInBits / NumMaskElts;
33011  MVT MaskEltVT = MVT::getIntegerVT(MaskScalarSizeInBits);
33012
33013  bool ContainsZeros =
33014      llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; });
33015
33016  // Handle VPERMI/VPERMILPD vXi64/vXi64 patterns.
33017  if (!ContainsZeros && MaskScalarSizeInBits == 64) {
33018    // Check for lane crossing permutes.
33019    if (is128BitLaneCrossingShuffleMask(MaskEltVT, Mask)) {
33020      // PERMPD/PERMQ permutes within a 256-bit vector (AVX2+).
33021      if (Subtarget.hasAVX2() && MaskVT.is256BitVector()) {
33022        Shuffle = X86ISD::VPERMI;
33023        ShuffleVT = (AllowFloatDomain ? MVT::v4f64 : MVT::v4i64);
33024        PermuteImm = getV4X86ShuffleImm(Mask);
33025        return true;
33026      }
33027      if (Subtarget.hasAVX512() && MaskVT.is512BitVector()) {
33028        SmallVector<int, 4> RepeatedMask;
33029        if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask)) {
33030          Shuffle = X86ISD::VPERMI;
33031          ShuffleVT = (AllowFloatDomain ? MVT::v8f64 : MVT::v8i64);
33032          PermuteImm = getV4X86ShuffleImm(RepeatedMask);
33033          return true;
33034        }
33035      }
33036    } else if (AllowFloatDomain && Subtarget.hasAVX()) {
33037      // VPERMILPD can permute with a non-repeating shuffle.
33038      Shuffle = X86ISD::VPERMILPI;
33039      ShuffleVT = MVT::getVectorVT(MVT::f64, Mask.size());
33040      PermuteImm = 0;
33041      for (int i = 0, e = Mask.size(); i != e; ++i) {
33042        int M = Mask[i];
33043        if (M == SM_SentinelUndef)
33044          continue;
33045        assert(((M / 2) == (i / 2)) && "Out of range shuffle mask index");
33046        PermuteImm |= (M & 1) << i;
33047      }
33048      return true;
33049    }
33050  }
33051
33052  // Handle PSHUFD/VPERMILPI vXi32/vXf32 repeated patterns.
33053  // AVX introduced the VPERMILPD/VPERMILPS float permutes, before then we
33054  // had to use 2-input SHUFPD/SHUFPS shuffles (not handled here).
33055  if ((MaskScalarSizeInBits == 64 || MaskScalarSizeInBits == 32) &&
33056      !ContainsZeros && (AllowIntDomain || Subtarget.hasAVX())) {
33057    SmallVector<int, 4> RepeatedMask;
33058    if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
33059      // Narrow the repeated mask to create 32-bit element permutes.
33060      SmallVector<int, 4> WordMask = RepeatedMask;
33061      if (MaskScalarSizeInBits == 64)
33062        scaleShuffleMask<int>(2, RepeatedMask, WordMask);
33063
33064      Shuffle = (AllowIntDomain ? X86ISD::PSHUFD : X86ISD::VPERMILPI);
33065      ShuffleVT = (AllowIntDomain ? MVT::i32 : MVT::f32);
33066      ShuffleVT = MVT::getVectorVT(ShuffleVT, InputSizeInBits / 32);
33067      PermuteImm = getV4X86ShuffleImm(WordMask);
33068      return true;
33069    }
33070  }
33071
33072  // Handle PSHUFLW/PSHUFHW vXi16 repeated patterns.
33073  if (!ContainsZeros && AllowIntDomain && MaskScalarSizeInBits == 16) {
33074    SmallVector<int, 4> RepeatedMask;
33075    if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
33076      ArrayRef<int> LoMask(RepeatedMask.data() + 0, 4);
33077      ArrayRef<int> HiMask(RepeatedMask.data() + 4, 4);
33078
33079      // PSHUFLW: permute lower 4 elements only.
33080      if (isUndefOrInRange(LoMask, 0, 4) &&
33081          isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
33082        Shuffle = X86ISD::PSHUFLW;
33083        ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
33084        PermuteImm = getV4X86ShuffleImm(LoMask);
33085        return true;
33086      }
33087
33088      // PSHUFHW: permute upper 4 elements only.
33089      if (isUndefOrInRange(HiMask, 4, 8) &&
33090          isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
33091        // Offset the HiMask so that we can create the shuffle immediate.
33092        int OffsetHiMask[4];
33093        for (int i = 0; i != 4; ++i)
33094          OffsetHiMask[i] = (HiMask[i] < 0 ? HiMask[i] : HiMask[i] - 4);
33095
33096        Shuffle = X86ISD::PSHUFHW;
33097        ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
33098        PermuteImm = getV4X86ShuffleImm(OffsetHiMask);
33099        return true;
33100      }
33101    }
33102  }
33103
33104  // Attempt to match against byte/bit shifts.
33105  // FIXME: Add 512-bit support.
33106  if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
33107                         (MaskVT.is256BitVector() && Subtarget.hasAVX2()))) {
33108    int ShiftAmt = matchShuffleAsShift(ShuffleVT, Shuffle, MaskScalarSizeInBits,
33109                                       Mask, 0, Zeroable, Subtarget);
33110    if (0 < ShiftAmt) {
33111      PermuteImm = (unsigned)ShiftAmt;
33112      return true;
33113    }
33114  }
33115
33116  return false;
33117}
33118
33119// Attempt to match a combined unary shuffle mask against supported binary
33120// shuffle instructions.
33121// TODO: Investigate sharing more of this with shuffle lowering.
33122static bool matchBinaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
33123                               bool AllowFloatDomain, bool AllowIntDomain,
33124                               SDValue &V1, SDValue &V2, const SDLoc &DL,
33125                               SelectionDAG &DAG, const X86Subtarget &Subtarget,
33126                               unsigned &Shuffle, MVT &SrcVT, MVT &DstVT,
33127                               bool IsUnary) {
33128  unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
33129
33130  if (MaskVT.is128BitVector()) {
33131    if (isTargetShuffleEquivalent(Mask, {0, 0}) && AllowFloatDomain) {
33132      V2 = V1;
33133      V1 = (SM_SentinelUndef == Mask[0] ? DAG.getUNDEF(MVT::v4f32) : V1);
33134      Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKL : X86ISD::MOVLHPS;
33135      SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
33136      return true;
33137    }
33138    if (isTargetShuffleEquivalent(Mask, {1, 1}) && AllowFloatDomain) {
33139      V2 = V1;
33140      Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKH : X86ISD::MOVHLPS;
33141      SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
33142      return true;
33143    }
33144    if (isTargetShuffleEquivalent(Mask, {0, 3}) && Subtarget.hasSSE2() &&
33145        (AllowFloatDomain || !Subtarget.hasSSE41())) {
33146      std::swap(V1, V2);
33147      Shuffle = X86ISD::MOVSD;
33148      SrcVT = DstVT = MVT::v2f64;
33149      return true;
33150    }
33151    if (isTargetShuffleEquivalent(Mask, {4, 1, 2, 3}) &&
33152        (AllowFloatDomain || !Subtarget.hasSSE41())) {
33153      Shuffle = X86ISD::MOVSS;
33154      SrcVT = DstVT = MVT::v4f32;
33155      return true;
33156    }
33157  }
33158
33159  // Attempt to match against either an unary or binary PACKSS/PACKUS shuffle.
33160  if (((MaskVT == MVT::v8i16 || MaskVT == MVT::v16i8) && Subtarget.hasSSE2()) ||
33161      ((MaskVT == MVT::v16i16 || MaskVT == MVT::v32i8) && Subtarget.hasInt256()) ||
33162      ((MaskVT == MVT::v32i16 || MaskVT == MVT::v64i8) && Subtarget.hasBWI())) {
33163    if (matchShuffleWithPACK(MaskVT, SrcVT, V1, V2, Shuffle, Mask, DAG,
33164                             Subtarget)) {
33165      DstVT = MaskVT;
33166      return true;
33167    }
33168  }
33169
33170  // Attempt to match against either a unary or binary UNPCKL/UNPCKH shuffle.
33171  if ((MaskVT == MVT::v4f32 && Subtarget.hasSSE1()) ||
33172      (MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
33173      (MaskVT.is256BitVector() && 32 <= EltSizeInBits && Subtarget.hasAVX()) ||
33174      (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
33175      (MaskVT.is512BitVector() && Subtarget.hasAVX512())) {
33176    if (matchShuffleWithUNPCK(MaskVT, V1, V2, Shuffle, IsUnary, Mask, DL, DAG,
33177                              Subtarget)) {
33178      SrcVT = DstVT = MaskVT;
33179      if (MaskVT.is256BitVector() && !Subtarget.hasAVX2())
33180        SrcVT = DstVT = (32 == EltSizeInBits ? MVT::v8f32 : MVT::v4f64);
33181      return true;
33182    }
33183  }
33184
33185  return false;
33186}
33187
33188static bool matchBinaryPermuteShuffle(
33189    MVT MaskVT, ArrayRef<int> Mask, const APInt &Zeroable,
33190    bool AllowFloatDomain, bool AllowIntDomain, SDValue &V1, SDValue &V2,
33191    const SDLoc &DL, SelectionDAG &DAG, const X86Subtarget &Subtarget,
33192    unsigned &Shuffle, MVT &ShuffleVT, unsigned &PermuteImm) {
33193  unsigned NumMaskElts = Mask.size();
33194  unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
33195
33196  // Attempt to match against PALIGNR byte rotate.
33197  if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSSE3()) ||
33198                         (MaskVT.is256BitVector() && Subtarget.hasAVX2()))) {
33199    int ByteRotation = matchShuffleAsByteRotate(MaskVT, V1, V2, Mask);
33200    if (0 < ByteRotation) {
33201      Shuffle = X86ISD::PALIGNR;
33202      ShuffleVT = MVT::getVectorVT(MVT::i8, MaskVT.getSizeInBits() / 8);
33203      PermuteImm = ByteRotation;
33204      return true;
33205    }
33206  }
33207
33208  // Attempt to combine to X86ISD::BLENDI.
33209  if ((NumMaskElts <= 8 && ((Subtarget.hasSSE41() && MaskVT.is128BitVector()) ||
33210                            (Subtarget.hasAVX() && MaskVT.is256BitVector()))) ||
33211      (MaskVT == MVT::v16i16 && Subtarget.hasAVX2())) {
33212    uint64_t BlendMask = 0;
33213    bool ForceV1Zero = false, ForceV2Zero = false;
33214    SmallVector<int, 8> TargetMask(Mask.begin(), Mask.end());
33215    if (matchShuffleAsBlend(V1, V2, TargetMask, Zeroable, ForceV1Zero,
33216                            ForceV2Zero, BlendMask)) {
33217      if (MaskVT == MVT::v16i16) {
33218        // We can only use v16i16 PBLENDW if the lanes are repeated.
33219        SmallVector<int, 8> RepeatedMask;
33220        if (isRepeatedTargetShuffleMask(128, MaskVT, TargetMask,
33221                                        RepeatedMask)) {
33222          assert(RepeatedMask.size() == 8 &&
33223                 "Repeated mask size doesn't match!");
33224          PermuteImm = 0;
33225          for (int i = 0; i < 8; ++i)
33226            if (RepeatedMask[i] >= 8)
33227              PermuteImm |= 1 << i;
33228          V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
33229          V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
33230          Shuffle = X86ISD::BLENDI;
33231          ShuffleVT = MaskVT;
33232          return true;
33233        }
33234      } else {
33235        V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
33236        V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
33237        PermuteImm = (unsigned)BlendMask;
33238        Shuffle = X86ISD::BLENDI;
33239        ShuffleVT = MaskVT;
33240        return true;
33241      }
33242    }
33243  }
33244
33245  // Attempt to combine to INSERTPS, but only if it has elements that need to
33246  // be set to zero.
33247  if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
33248      MaskVT.is128BitVector() &&
33249      llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; }) &&
33250      matchShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
33251    Shuffle = X86ISD::INSERTPS;
33252    ShuffleVT = MVT::v4f32;
33253    return true;
33254  }
33255
33256  // Attempt to combine to SHUFPD.
33257  if (AllowFloatDomain && EltSizeInBits == 64 &&
33258      ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
33259       (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
33260       (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
33261    bool ForceV1Zero = false, ForceV2Zero = false;
33262    if (matchShuffleWithSHUFPD(MaskVT, V1, V2, ForceV1Zero, ForceV2Zero,
33263                               PermuteImm, Mask, Zeroable)) {
33264      V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
33265      V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
33266      Shuffle = X86ISD::SHUFP;
33267      ShuffleVT = MVT::getVectorVT(MVT::f64, MaskVT.getSizeInBits() / 64);
33268      return true;
33269    }
33270  }
33271
33272  // Attempt to combine to SHUFPS.
33273  if (AllowFloatDomain && EltSizeInBits == 32 &&
33274      ((MaskVT.is128BitVector() && Subtarget.hasSSE1()) ||
33275       (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
33276       (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
33277    SmallVector<int, 4> RepeatedMask;
33278    if (isRepeatedTargetShuffleMask(128, MaskVT, Mask, RepeatedMask)) {
33279      // Match each half of the repeated mask, to determine if its just
33280      // referencing one of the vectors, is zeroable or entirely undef.
33281      auto MatchHalf = [&](unsigned Offset, int &S0, int &S1) {
33282        int M0 = RepeatedMask[Offset];
33283        int M1 = RepeatedMask[Offset + 1];
33284
33285        if (isUndefInRange(RepeatedMask, Offset, 2)) {
33286          return DAG.getUNDEF(MaskVT);
33287        } else if (isUndefOrZeroInRange(RepeatedMask, Offset, 2)) {
33288          S0 = (SM_SentinelUndef == M0 ? -1 : 0);
33289          S1 = (SM_SentinelUndef == M1 ? -1 : 1);
33290          return getZeroVector(MaskVT, Subtarget, DAG, DL);
33291        } else if (isUndefOrInRange(M0, 0, 4) && isUndefOrInRange(M1, 0, 4)) {
33292          S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
33293          S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
33294          return V1;
33295        } else if (isUndefOrInRange(M0, 4, 8) && isUndefOrInRange(M1, 4, 8)) {
33296          S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
33297          S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
33298          return V2;
33299        }
33300
33301        return SDValue();
33302      };
33303
33304      int ShufMask[4] = {-1, -1, -1, -1};
33305      SDValue Lo = MatchHalf(0, ShufMask[0], ShufMask[1]);
33306      SDValue Hi = MatchHalf(2, ShufMask[2], ShufMask[3]);
33307
33308      if (Lo && Hi) {
33309        V1 = Lo;
33310        V2 = Hi;
33311        Shuffle = X86ISD::SHUFP;
33312        ShuffleVT = MVT::getVectorVT(MVT::f32, MaskVT.getSizeInBits() / 32);
33313        PermuteImm = getV4X86ShuffleImm(ShufMask);
33314        return true;
33315      }
33316    }
33317  }
33318
33319  // Attempt to combine to INSERTPS more generally if X86ISD::SHUFP failed.
33320  if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
33321      MaskVT.is128BitVector() &&
33322      matchShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
33323    Shuffle = X86ISD::INSERTPS;
33324    ShuffleVT = MVT::v4f32;
33325    return true;
33326  }
33327
33328  return false;
33329}
33330
33331static SDValue combineX86ShuffleChainWithExtract(
33332    ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
33333    bool HasVariableMask, bool AllowVariableMask, SelectionDAG &DAG,
33334    const X86Subtarget &Subtarget);
33335
33336/// Combine an arbitrary chain of shuffles into a single instruction if
33337/// possible.
33338///
33339/// This is the leaf of the recursive combine below. When we have found some
33340/// chain of single-use x86 shuffle instructions and accumulated the combined
33341/// shuffle mask represented by them, this will try to pattern match that mask
33342/// into either a single instruction if there is a special purpose instruction
33343/// for this operation, or into a PSHUFB instruction which is a fully general
33344/// instruction but should only be used to replace chains over a certain depth.
33345static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
33346                                      ArrayRef<int> BaseMask, int Depth,
33347                                      bool HasVariableMask,
33348                                      bool AllowVariableMask, SelectionDAG &DAG,
33349                                      const X86Subtarget &Subtarget) {
33350  assert(!BaseMask.empty() && "Cannot combine an empty shuffle mask!");
33351  assert((Inputs.size() == 1 || Inputs.size() == 2) &&
33352         "Unexpected number of shuffle inputs!");
33353
33354  // Find the inputs that enter the chain. Note that multiple uses are OK
33355  // here, we're not going to remove the operands we find.
33356  bool UnaryShuffle = (Inputs.size() == 1);
33357  SDValue V1 = peekThroughBitcasts(Inputs[0]);
33358  SDValue V2 = (UnaryShuffle ? DAG.getUNDEF(V1.getValueType())
33359                             : peekThroughBitcasts(Inputs[1]));
33360
33361  MVT VT1 = V1.getSimpleValueType();
33362  MVT VT2 = V2.getSimpleValueType();
33363  MVT RootVT = Root.getSimpleValueType();
33364  assert(VT1.getSizeInBits() == RootVT.getSizeInBits() &&
33365         VT2.getSizeInBits() == RootVT.getSizeInBits() &&
33366         "Vector size mismatch");
33367
33368  SDLoc DL(Root);
33369  SDValue Res;
33370
33371  unsigned NumBaseMaskElts = BaseMask.size();
33372  if (NumBaseMaskElts == 1) {
33373    assert(BaseMask[0] == 0 && "Invalid shuffle index found!");
33374    return DAG.getBitcast(RootVT, V1);
33375  }
33376
33377  unsigned RootSizeInBits = RootVT.getSizeInBits();
33378  unsigned NumRootElts = RootVT.getVectorNumElements();
33379  unsigned BaseMaskEltSizeInBits = RootSizeInBits / NumBaseMaskElts;
33380  bool FloatDomain = VT1.isFloatingPoint() || VT2.isFloatingPoint() ||
33381                     (RootVT.isFloatingPoint() && Depth >= 1) ||
33382                     (RootVT.is256BitVector() && !Subtarget.hasAVX2());
33383
33384  // Don't combine if we are a AVX512/EVEX target and the mask element size
33385  // is different from the root element size - this would prevent writemasks
33386  // from being reused.
33387  // TODO - this currently prevents all lane shuffles from occurring.
33388  // TODO - check for writemasks usage instead of always preventing combining.
33389  // TODO - attempt to narrow Mask back to writemask size.
33390  bool IsEVEXShuffle =
33391      RootSizeInBits == 512 || (Subtarget.hasVLX() && RootSizeInBits >= 128);
33392
33393  // Attempt to match a subvector broadcast.
33394  // shuffle(insert_subvector(undef, sub, 0), undef, 0, 0, 0, 0)
33395  if (UnaryShuffle &&
33396      (BaseMaskEltSizeInBits == 128 || BaseMaskEltSizeInBits == 256)) {
33397    SmallVector<int, 64> BroadcastMask(NumBaseMaskElts, 0);
33398    if (isTargetShuffleEquivalent(BaseMask, BroadcastMask)) {
33399      SDValue Src = Inputs[0];
33400      if (Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
33401          Src.getOperand(0).isUndef() &&
33402          Src.getOperand(1).getValueSizeInBits() == BaseMaskEltSizeInBits &&
33403          MayFoldLoad(Src.getOperand(1)) && isNullConstant(Src.getOperand(2))) {
33404        return DAG.getBitcast(RootVT, DAG.getNode(X86ISD::SUBV_BROADCAST, DL,
33405                                                  Src.getValueType(),
33406                                                  Src.getOperand(1)));
33407      }
33408    }
33409  }
33410
33411  // TODO - handle 128/256-bit lane shuffles of 512-bit vectors.
33412
33413  // Handle 128-bit lane shuffles of 256-bit vectors.
33414  // If we have AVX2, prefer to use VPERMQ/VPERMPD for unary shuffles unless
33415  // we need to use the zeroing feature.
33416  // TODO - this should support binary shuffles.
33417  if (UnaryShuffle && RootVT.is256BitVector() && NumBaseMaskElts == 2 &&
33418      !(Subtarget.hasAVX2() && BaseMask[0] >= -1 && BaseMask[1] >= -1) &&
33419      !isSequentialOrUndefOrZeroInRange(BaseMask, 0, 2, 0)) {
33420    if (Depth == 0 && Root.getOpcode() == X86ISD::VPERM2X128)
33421      return SDValue(); // Nothing to do!
33422    MVT ShuffleVT = (FloatDomain ? MVT::v4f64 : MVT::v4i64);
33423    unsigned PermMask = 0;
33424    PermMask |= ((BaseMask[0] < 0 ? 0x8 : (BaseMask[0] & 1)) << 0);
33425    PermMask |= ((BaseMask[1] < 0 ? 0x8 : (BaseMask[1] & 1)) << 4);
33426
33427    Res = DAG.getBitcast(ShuffleVT, V1);
33428    Res = DAG.getNode(X86ISD::VPERM2X128, DL, ShuffleVT, Res,
33429                      DAG.getUNDEF(ShuffleVT),
33430                      DAG.getTargetConstant(PermMask, DL, MVT::i8));
33431    return DAG.getBitcast(RootVT, Res);
33432  }
33433
33434  // For masks that have been widened to 128-bit elements or more,
33435  // narrow back down to 64-bit elements.
33436  SmallVector<int, 64> Mask;
33437  if (BaseMaskEltSizeInBits > 64) {
33438    assert((BaseMaskEltSizeInBits % 64) == 0 && "Illegal mask size");
33439    int MaskScale = BaseMaskEltSizeInBits / 64;
33440    scaleShuffleMask<int>(MaskScale, BaseMask, Mask);
33441  } else {
33442    Mask = SmallVector<int, 64>(BaseMask.begin(), BaseMask.end());
33443  }
33444
33445  unsigned NumMaskElts = Mask.size();
33446  unsigned MaskEltSizeInBits = RootSizeInBits / NumMaskElts;
33447
33448  // Determine the effective mask value type.
33449  FloatDomain &= (32 <= MaskEltSizeInBits);
33450  MVT MaskVT = FloatDomain ? MVT::getFloatingPointVT(MaskEltSizeInBits)
33451                           : MVT::getIntegerVT(MaskEltSizeInBits);
33452  MaskVT = MVT::getVectorVT(MaskVT, NumMaskElts);
33453
33454  // Only allow legal mask types.
33455  if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT))
33456    return SDValue();
33457
33458  // Attempt to match the mask against known shuffle patterns.
33459  MVT ShuffleSrcVT, ShuffleVT;
33460  unsigned Shuffle, PermuteImm;
33461
33462  // Which shuffle domains are permitted?
33463  // Permit domain crossing at higher combine depths.
33464  // TODO: Should we indicate which domain is preferred if both are allowed?
33465  bool AllowFloatDomain = FloatDomain || (Depth >= 3);
33466  bool AllowIntDomain = (!FloatDomain || (Depth >= 3)) && Subtarget.hasSSE2() &&
33467                        (!MaskVT.is256BitVector() || Subtarget.hasAVX2());
33468
33469  // Determine zeroable mask elements.
33470  APInt KnownUndef, KnownZero;
33471  resolveZeroablesFromTargetShuffle(Mask, KnownUndef, KnownZero);
33472  APInt Zeroable = KnownUndef | KnownZero;
33473
33474  if (UnaryShuffle) {
33475    // If we are shuffling a X86ISD::VZEXT_LOAD then we can use the load
33476    // directly if we don't shuffle the lower element and we shuffle the upper
33477    // (zero) elements within themselves.
33478    if (V1.getOpcode() == X86ISD::VZEXT_LOAD &&
33479        (cast<MemIntrinsicSDNode>(V1)->getMemoryVT().getScalarSizeInBits() %
33480         MaskEltSizeInBits) == 0) {
33481      unsigned Scale =
33482          cast<MemIntrinsicSDNode>(V1)->getMemoryVT().getScalarSizeInBits() /
33483          MaskEltSizeInBits;
33484      ArrayRef<int> HiMask(Mask.data() + Scale, NumMaskElts - Scale);
33485      if (isSequentialOrUndefInRange(Mask, 0, Scale, 0) &&
33486          isUndefOrZeroOrInRange(HiMask, Scale, NumMaskElts)) {
33487        return DAG.getBitcast(RootVT, V1);
33488      }
33489    }
33490
33491    // Attempt to match against broadcast-from-vector.
33492    // Limit AVX1 to cases where we're loading+broadcasting a scalar element.
33493    if ((Subtarget.hasAVX2() || (Subtarget.hasAVX() && 32 <= MaskEltSizeInBits))
33494        && (!IsEVEXShuffle || NumRootElts == NumMaskElts)) {
33495      SmallVector<int, 64> BroadcastMask(NumMaskElts, 0);
33496      if (isTargetShuffleEquivalent(Mask, BroadcastMask)) {
33497        if (V1.getValueType() == MaskVT &&
33498            V1.getOpcode() == ISD::SCALAR_TO_VECTOR &&
33499            MayFoldLoad(V1.getOperand(0))) {
33500          if (Depth == 0 && Root.getOpcode() == X86ISD::VBROADCAST)
33501            return SDValue(); // Nothing to do!
33502          Res = V1.getOperand(0);
33503          Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
33504          return DAG.getBitcast(RootVT, Res);
33505        }
33506        if (Subtarget.hasAVX2()) {
33507          if (Depth == 0 && Root.getOpcode() == X86ISD::VBROADCAST)
33508            return SDValue(); // Nothing to do!
33509          Res = DAG.getBitcast(MaskVT, V1);
33510          Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
33511          return DAG.getBitcast(RootVT, Res);
33512        }
33513      }
33514    }
33515
33516    SDValue NewV1 = V1; // Save operand in case early exit happens.
33517    if (matchUnaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, NewV1,
33518                          DL, DAG, Subtarget, Shuffle, ShuffleSrcVT,
33519                          ShuffleVT) &&
33520        (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
33521      if (Depth == 0 && Root.getOpcode() == Shuffle)
33522        return SDValue(); // Nothing to do!
33523      Res = DAG.getBitcast(ShuffleSrcVT, NewV1);
33524      Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res);
33525      return DAG.getBitcast(RootVT, Res);
33526    }
33527
33528    if (matchUnaryPermuteShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain,
33529                                 AllowIntDomain, Subtarget, Shuffle, ShuffleVT,
33530                                 PermuteImm) &&
33531        (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
33532      if (Depth == 0 && Root.getOpcode() == Shuffle)
33533        return SDValue(); // Nothing to do!
33534      Res = DAG.getBitcast(ShuffleVT, V1);
33535      Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res,
33536                        DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
33537      return DAG.getBitcast(RootVT, Res);
33538    }
33539  }
33540
33541  SDValue NewV1 = V1; // Save operands in case early exit happens.
33542  SDValue NewV2 = V2;
33543  if (matchBinaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, NewV1,
33544                         NewV2, DL, DAG, Subtarget, Shuffle, ShuffleSrcVT,
33545                         ShuffleVT, UnaryShuffle) &&
33546      (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
33547    if (Depth == 0 && Root.getOpcode() == Shuffle)
33548      return SDValue(); // Nothing to do!
33549    NewV1 = DAG.getBitcast(ShuffleSrcVT, NewV1);
33550    NewV2 = DAG.getBitcast(ShuffleSrcVT, NewV2);
33551    Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2);
33552    return DAG.getBitcast(RootVT, Res);
33553  }
33554
33555  NewV1 = V1; // Save operands in case early exit happens.
33556  NewV2 = V2;
33557  if (matchBinaryPermuteShuffle(
33558          MaskVT, Mask, Zeroable, AllowFloatDomain, AllowIntDomain, NewV1,
33559          NewV2, DL, DAG, Subtarget, Shuffle, ShuffleVT, PermuteImm) &&
33560      (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
33561    if (Depth == 0 && Root.getOpcode() == Shuffle)
33562      return SDValue(); // Nothing to do!
33563    NewV1 = DAG.getBitcast(ShuffleVT, NewV1);
33564    NewV2 = DAG.getBitcast(ShuffleVT, NewV2);
33565    Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2,
33566                      DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
33567    return DAG.getBitcast(RootVT, Res);
33568  }
33569
33570  // Typically from here on, we need an integer version of MaskVT.
33571  MVT IntMaskVT = MVT::getIntegerVT(MaskEltSizeInBits);
33572  IntMaskVT = MVT::getVectorVT(IntMaskVT, NumMaskElts);
33573
33574  // Annoyingly, SSE4A instructions don't map into the above match helpers.
33575  if (Subtarget.hasSSE4A() && AllowIntDomain && RootSizeInBits == 128) {
33576    uint64_t BitLen, BitIdx;
33577    if (matchShuffleAsEXTRQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx,
33578                            Zeroable)) {
33579      if (Depth == 0 && Root.getOpcode() == X86ISD::EXTRQI)
33580        return SDValue(); // Nothing to do!
33581      V1 = DAG.getBitcast(IntMaskVT, V1);
33582      Res = DAG.getNode(X86ISD::EXTRQI, DL, IntMaskVT, V1,
33583                        DAG.getTargetConstant(BitLen, DL, MVT::i8),
33584                        DAG.getTargetConstant(BitIdx, DL, MVT::i8));
33585      return DAG.getBitcast(RootVT, Res);
33586    }
33587
33588    if (matchShuffleAsINSERTQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx)) {
33589      if (Depth == 0 && Root.getOpcode() == X86ISD::INSERTQI)
33590        return SDValue(); // Nothing to do!
33591      V1 = DAG.getBitcast(IntMaskVT, V1);
33592      V2 = DAG.getBitcast(IntMaskVT, V2);
33593      Res = DAG.getNode(X86ISD::INSERTQI, DL, IntMaskVT, V1, V2,
33594                        DAG.getTargetConstant(BitLen, DL, MVT::i8),
33595                        DAG.getTargetConstant(BitIdx, DL, MVT::i8));
33596      return DAG.getBitcast(RootVT, Res);
33597    }
33598  }
33599
33600  // Don't try to re-form single instruction chains under any circumstances now
33601  // that we've done encoding canonicalization for them.
33602  if (Depth < 1)
33603    return SDValue();
33604
33605  // Depth threshold above which we can efficiently use variable mask shuffles.
33606  int VariableShuffleDepth = Subtarget.hasFastVariableShuffle() ? 1 : 2;
33607  AllowVariableMask &= (Depth >= VariableShuffleDepth) || HasVariableMask;
33608
33609  bool MaskContainsZeros =
33610      any_of(Mask, [](int M) { return M == SM_SentinelZero; });
33611
33612  if (is128BitLaneCrossingShuffleMask(MaskVT, Mask)) {
33613    // If we have a single input lane-crossing shuffle then lower to VPERMV.
33614    if (UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
33615        ((Subtarget.hasAVX2() &&
33616          (MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
33617         (Subtarget.hasAVX512() &&
33618          (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
33619           MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
33620         (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
33621         (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
33622         (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
33623         (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
33624      SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
33625      Res = DAG.getBitcast(MaskVT, V1);
33626      Res = DAG.getNode(X86ISD::VPERMV, DL, MaskVT, VPermMask, Res);
33627      return DAG.getBitcast(RootVT, Res);
33628    }
33629
33630    // Lower a unary+zero lane-crossing shuffle as VPERMV3 with a zero
33631    // vector as the second source.
33632    if (UnaryShuffle && AllowVariableMask &&
33633        ((Subtarget.hasAVX512() &&
33634          (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
33635           MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
33636         (Subtarget.hasVLX() &&
33637          (MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
33638           MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
33639         (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
33640         (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
33641         (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
33642         (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
33643      // Adjust shuffle mask - replace SM_SentinelZero with second source index.
33644      for (unsigned i = 0; i != NumMaskElts; ++i)
33645        if (Mask[i] == SM_SentinelZero)
33646          Mask[i] = NumMaskElts + i;
33647
33648      SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
33649      Res = DAG.getBitcast(MaskVT, V1);
33650      SDValue Zero = getZeroVector(MaskVT, Subtarget, DAG, DL);
33651      Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, Res, VPermMask, Zero);
33652      return DAG.getBitcast(RootVT, Res);
33653    }
33654
33655    // If that failed and either input is extracted then try to combine as a
33656    // shuffle with the larger type.
33657    if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
33658            Inputs, Root, BaseMask, Depth, HasVariableMask, AllowVariableMask,
33659            DAG, Subtarget))
33660      return WideShuffle;
33661
33662    // If we have a dual input lane-crossing shuffle then lower to VPERMV3.
33663    if (AllowVariableMask && !MaskContainsZeros &&
33664        ((Subtarget.hasAVX512() &&
33665          (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
33666           MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
33667         (Subtarget.hasVLX() &&
33668          (MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
33669           MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
33670         (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
33671         (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
33672         (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
33673         (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
33674      SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
33675      V1 = DAG.getBitcast(MaskVT, V1);
33676      V2 = DAG.getBitcast(MaskVT, V2);
33677      Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, V1, VPermMask, V2);
33678      return DAG.getBitcast(RootVT, Res);
33679    }
33680    return SDValue();
33681  }
33682
33683  // See if we can combine a single input shuffle with zeros to a bit-mask,
33684  // which is much simpler than any shuffle.
33685  if (UnaryShuffle && MaskContainsZeros && AllowVariableMask &&
33686      isSequentialOrUndefOrZeroInRange(Mask, 0, NumMaskElts, 0) &&
33687      DAG.getTargetLoweringInfo().isTypeLegal(MaskVT)) {
33688    APInt Zero = APInt::getNullValue(MaskEltSizeInBits);
33689    APInt AllOnes = APInt::getAllOnesValue(MaskEltSizeInBits);
33690    APInt UndefElts(NumMaskElts, 0);
33691    SmallVector<APInt, 64> EltBits(NumMaskElts, Zero);
33692    for (unsigned i = 0; i != NumMaskElts; ++i) {
33693      int M = Mask[i];
33694      if (M == SM_SentinelUndef) {
33695        UndefElts.setBit(i);
33696        continue;
33697      }
33698      if (M == SM_SentinelZero)
33699        continue;
33700      EltBits[i] = AllOnes;
33701    }
33702    SDValue BitMask = getConstVector(EltBits, UndefElts, MaskVT, DAG, DL);
33703    Res = DAG.getBitcast(MaskVT, V1);
33704    unsigned AndOpcode =
33705        FloatDomain ? unsigned(X86ISD::FAND) : unsigned(ISD::AND);
33706    Res = DAG.getNode(AndOpcode, DL, MaskVT, Res, BitMask);
33707    return DAG.getBitcast(RootVT, Res);
33708  }
33709
33710  // If we have a single input shuffle with different shuffle patterns in the
33711  // the 128-bit lanes use the variable mask to VPERMILPS.
33712  // TODO Combine other mask types at higher depths.
33713  if (UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
33714      ((MaskVT == MVT::v8f32 && Subtarget.hasAVX()) ||
33715       (MaskVT == MVT::v16f32 && Subtarget.hasAVX512()))) {
33716    SmallVector<SDValue, 16> VPermIdx;
33717    for (int M : Mask) {
33718      SDValue Idx =
33719          M < 0 ? DAG.getUNDEF(MVT::i32) : DAG.getConstant(M % 4, DL, MVT::i32);
33720      VPermIdx.push_back(Idx);
33721    }
33722    SDValue VPermMask = DAG.getBuildVector(IntMaskVT, DL, VPermIdx);
33723    Res = DAG.getBitcast(MaskVT, V1);
33724    Res = DAG.getNode(X86ISD::VPERMILPV, DL, MaskVT, Res, VPermMask);
33725    return DAG.getBitcast(RootVT, Res);
33726  }
33727
33728  // With XOP, binary shuffles of 128/256-bit floating point vectors can combine
33729  // to VPERMIL2PD/VPERMIL2PS.
33730  if (AllowVariableMask && Subtarget.hasXOP() &&
33731      (MaskVT == MVT::v2f64 || MaskVT == MVT::v4f64 || MaskVT == MVT::v4f32 ||
33732       MaskVT == MVT::v8f32)) {
33733    // VPERMIL2 Operation.
33734    // Bits[3] - Match Bit.
33735    // Bits[2:1] - (Per Lane) PD Shuffle Mask.
33736    // Bits[2:0] - (Per Lane) PS Shuffle Mask.
33737    unsigned NumLanes = MaskVT.getSizeInBits() / 128;
33738    unsigned NumEltsPerLane = NumMaskElts / NumLanes;
33739    SmallVector<int, 8> VPerm2Idx;
33740    unsigned M2ZImm = 0;
33741    for (int M : Mask) {
33742      if (M == SM_SentinelUndef) {
33743        VPerm2Idx.push_back(-1);
33744        continue;
33745      }
33746      if (M == SM_SentinelZero) {
33747        M2ZImm = 2;
33748        VPerm2Idx.push_back(8);
33749        continue;
33750      }
33751      int Index = (M % NumEltsPerLane) + ((M / NumMaskElts) * NumEltsPerLane);
33752      Index = (MaskVT.getScalarSizeInBits() == 64 ? Index << 1 : Index);
33753      VPerm2Idx.push_back(Index);
33754    }
33755    V1 = DAG.getBitcast(MaskVT, V1);
33756    V2 = DAG.getBitcast(MaskVT, V2);
33757    SDValue VPerm2MaskOp = getConstVector(VPerm2Idx, IntMaskVT, DAG, DL, true);
33758    Res = DAG.getNode(X86ISD::VPERMIL2, DL, MaskVT, V1, V2, VPerm2MaskOp,
33759                      DAG.getTargetConstant(M2ZImm, DL, MVT::i8));
33760    return DAG.getBitcast(RootVT, Res);
33761  }
33762
33763  // If we have 3 or more shuffle instructions or a chain involving a variable
33764  // mask, we can replace them with a single PSHUFB instruction profitably.
33765  // Intel's manuals suggest only using PSHUFB if doing so replacing 5
33766  // instructions, but in practice PSHUFB tends to be *very* fast so we're
33767  // more aggressive.
33768  if (UnaryShuffle && AllowVariableMask &&
33769      ((RootVT.is128BitVector() && Subtarget.hasSSSE3()) ||
33770       (RootVT.is256BitVector() && Subtarget.hasAVX2()) ||
33771       (RootVT.is512BitVector() && Subtarget.hasBWI()))) {
33772    SmallVector<SDValue, 16> PSHUFBMask;
33773    int NumBytes = RootVT.getSizeInBits() / 8;
33774    int Ratio = NumBytes / NumMaskElts;
33775    for (int i = 0; i < NumBytes; ++i) {
33776      int M = Mask[i / Ratio];
33777      if (M == SM_SentinelUndef) {
33778        PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
33779        continue;
33780      }
33781      if (M == SM_SentinelZero) {
33782        PSHUFBMask.push_back(DAG.getConstant(255, DL, MVT::i8));
33783        continue;
33784      }
33785      M = Ratio * M + i % Ratio;
33786      assert((M / 16) == (i / 16) && "Lane crossing detected");
33787      PSHUFBMask.push_back(DAG.getConstant(M, DL, MVT::i8));
33788    }
33789    MVT ByteVT = MVT::getVectorVT(MVT::i8, NumBytes);
33790    Res = DAG.getBitcast(ByteVT, V1);
33791    SDValue PSHUFBMaskOp = DAG.getBuildVector(ByteVT, DL, PSHUFBMask);
33792    Res = DAG.getNode(X86ISD::PSHUFB, DL, ByteVT, Res, PSHUFBMaskOp);
33793    return DAG.getBitcast(RootVT, Res);
33794  }
33795
33796  // With XOP, if we have a 128-bit binary input shuffle we can always combine
33797  // to VPPERM. We match the depth requirement of PSHUFB - VPPERM is never
33798  // slower than PSHUFB on targets that support both.
33799  if (AllowVariableMask && RootVT.is128BitVector() && Subtarget.hasXOP()) {
33800    // VPPERM Mask Operation
33801    // Bits[4:0] - Byte Index (0 - 31)
33802    // Bits[7:5] - Permute Operation (0 - Source byte, 4 - ZERO)
33803    SmallVector<SDValue, 16> VPPERMMask;
33804    int NumBytes = 16;
33805    int Ratio = NumBytes / NumMaskElts;
33806    for (int i = 0; i < NumBytes; ++i) {
33807      int M = Mask[i / Ratio];
33808      if (M == SM_SentinelUndef) {
33809        VPPERMMask.push_back(DAG.getUNDEF(MVT::i8));
33810        continue;
33811      }
33812      if (M == SM_SentinelZero) {
33813        VPPERMMask.push_back(DAG.getConstant(128, DL, MVT::i8));
33814        continue;
33815      }
33816      M = Ratio * M + i % Ratio;
33817      VPPERMMask.push_back(DAG.getConstant(M, DL, MVT::i8));
33818    }
33819    MVT ByteVT = MVT::v16i8;
33820    V1 = DAG.getBitcast(ByteVT, V1);
33821    V2 = DAG.getBitcast(ByteVT, V2);
33822    SDValue VPPERMMaskOp = DAG.getBuildVector(ByteVT, DL, VPPERMMask);
33823    Res = DAG.getNode(X86ISD::VPPERM, DL, ByteVT, V1, V2, VPPERMMaskOp);
33824    return DAG.getBitcast(RootVT, Res);
33825  }
33826
33827  // If that failed and either input is extracted then try to combine as a
33828  // shuffle with the larger type.
33829  if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
33830          Inputs, Root, BaseMask, Depth, HasVariableMask, AllowVariableMask,
33831          DAG, Subtarget))
33832    return WideShuffle;
33833
33834  // If we have a dual input shuffle then lower to VPERMV3.
33835  if (!UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
33836      ((Subtarget.hasAVX512() &&
33837        (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
33838         MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
33839       (Subtarget.hasVLX() &&
33840        (MaskVT == MVT::v2f64 || MaskVT == MVT::v2i64 || MaskVT == MVT::v4f64 ||
33841         MaskVT == MVT::v4i64 || MaskVT == MVT::v4f32 || MaskVT == MVT::v4i32 ||
33842         MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
33843       (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
33844       (Subtarget.hasBWI() && Subtarget.hasVLX() &&
33845        (MaskVT == MVT::v8i16 || MaskVT == MVT::v16i16)) ||
33846       (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
33847       (Subtarget.hasVBMI() && Subtarget.hasVLX() &&
33848        (MaskVT == MVT::v16i8 || MaskVT == MVT::v32i8)))) {
33849    SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
33850    V1 = DAG.getBitcast(MaskVT, V1);
33851    V2 = DAG.getBitcast(MaskVT, V2);
33852    Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, V1, VPermMask, V2);
33853    return DAG.getBitcast(RootVT, Res);
33854  }
33855
33856  // Failed to find any combines.
33857  return SDValue();
33858}
33859
33860// Combine an arbitrary chain of shuffles + extract_subvectors into a single
33861// instruction if possible.
33862//
33863// Wrapper for combineX86ShuffleChain that extends the shuffle mask to a larger
33864// type size to attempt to combine:
33865// shuffle(extract_subvector(x,c1),extract_subvector(y,c2),m1)
33866// -->
33867// extract_subvector(shuffle(x,y,m2),0)
33868static SDValue combineX86ShuffleChainWithExtract(
33869    ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
33870    bool HasVariableMask, bool AllowVariableMask, SelectionDAG &DAG,
33871    const X86Subtarget &Subtarget) {
33872  unsigned NumMaskElts = BaseMask.size();
33873  unsigned NumInputs = Inputs.size();
33874  if (NumInputs == 0)
33875    return SDValue();
33876
33877  SmallVector<SDValue, 4> WideInputs(Inputs.begin(), Inputs.end());
33878  SmallVector<unsigned, 4> Offsets(NumInputs, 0);
33879
33880  // Peek through subvectors.
33881  // TODO: Support inter-mixed EXTRACT_SUBVECTORs + BITCASTs?
33882  unsigned WideSizeInBits = WideInputs[0].getValueSizeInBits();
33883  for (unsigned i = 0; i != NumInputs; ++i) {
33884    SDValue &Src = WideInputs[i];
33885    unsigned &Offset = Offsets[i];
33886    Src = peekThroughBitcasts(Src);
33887    EVT BaseVT = Src.getValueType();
33888    while (Src.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
33889           isa<ConstantSDNode>(Src.getOperand(1))) {
33890      Offset += Src.getConstantOperandVal(1);
33891      Src = Src.getOperand(0);
33892    }
33893    WideSizeInBits = std::max(WideSizeInBits,
33894                              (unsigned)Src.getValueSizeInBits());
33895    assert((Offset % BaseVT.getVectorNumElements()) == 0 &&
33896           "Unexpected subvector extraction");
33897    Offset /= BaseVT.getVectorNumElements();
33898    Offset *= NumMaskElts;
33899  }
33900
33901  // Bail if we're always extracting from the lowest subvectors,
33902  // combineX86ShuffleChain should match this for the current width.
33903  if (llvm::all_of(Offsets, [](unsigned Offset) { return Offset == 0; }))
33904    return SDValue();
33905
33906  EVT RootVT = Root.getValueType();
33907  unsigned RootSizeInBits = RootVT.getSizeInBits();
33908  unsigned Scale = WideSizeInBits / RootSizeInBits;
33909  assert((WideSizeInBits % RootSizeInBits) == 0 &&
33910         "Unexpected subvector extraction");
33911
33912  // If the src vector types aren't the same, see if we can extend
33913  // them to match each other.
33914  // TODO: Support different scalar types?
33915  EVT WideSVT = WideInputs[0].getValueType().getScalarType();
33916  if (llvm::any_of(WideInputs, [&WideSVT, &DAG](SDValue Op) {
33917        return !DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()) ||
33918               Op.getValueType().getScalarType() != WideSVT;
33919      }))
33920    return SDValue();
33921
33922  for (SDValue &NewInput : WideInputs) {
33923    assert((WideSizeInBits % NewInput.getValueSizeInBits()) == 0 &&
33924           "Shuffle vector size mismatch");
33925    if (WideSizeInBits > NewInput.getValueSizeInBits())
33926      NewInput = widenSubVector(NewInput, false, Subtarget, DAG,
33927                                SDLoc(NewInput), WideSizeInBits);
33928    assert(WideSizeInBits == NewInput.getValueSizeInBits() &&
33929           "Unexpected subvector extraction");
33930  }
33931
33932  // Create new mask for larger type.
33933  for (unsigned i = 1; i != NumInputs; ++i)
33934    Offsets[i] += i * Scale * NumMaskElts;
33935
33936  SmallVector<int, 64> WideMask(BaseMask.begin(), BaseMask.end());
33937  for (int &M : WideMask) {
33938    if (M < 0)
33939      continue;
33940    M = (M % NumMaskElts) + Offsets[M / NumMaskElts];
33941  }
33942  WideMask.append((Scale - 1) * NumMaskElts, SM_SentinelUndef);
33943
33944  // Remove unused/repeated shuffle source ops.
33945  resolveTargetShuffleInputsAndMask(WideInputs, WideMask);
33946  assert(!WideInputs.empty() && "Shuffle with no inputs detected");
33947
33948  if (WideInputs.size() > 2)
33949    return SDValue();
33950
33951  // Increase depth for every upper subvector we've peeked through.
33952  Depth += count_if(Offsets, [](unsigned Offset) { return Offset > 0; });
33953
33954  // Attempt to combine wider chain.
33955  // TODO: Can we use a better Root?
33956  SDValue WideRoot = WideInputs[0];
33957  if (SDValue WideShuffle = combineX86ShuffleChain(
33958          WideInputs, WideRoot, WideMask, Depth, HasVariableMask,
33959          AllowVariableMask, DAG, Subtarget)) {
33960    WideShuffle =
33961        extractSubVector(WideShuffle, 0, DAG, SDLoc(Root), RootSizeInBits);
33962    return DAG.getBitcast(RootVT, WideShuffle);
33963  }
33964  return SDValue();
33965}
33966
33967// Attempt to constant fold all of the constant source ops.
33968// Returns true if the entire shuffle is folded to a constant.
33969// TODO: Extend this to merge multiple constant Ops and update the mask.
33970static SDValue combineX86ShufflesConstants(ArrayRef<SDValue> Ops,
33971                                           ArrayRef<int> Mask, SDValue Root,
33972                                           bool HasVariableMask,
33973                                           SelectionDAG &DAG,
33974                                           const X86Subtarget &Subtarget) {
33975  MVT VT = Root.getSimpleValueType();
33976
33977  unsigned SizeInBits = VT.getSizeInBits();
33978  unsigned NumMaskElts = Mask.size();
33979  unsigned MaskSizeInBits = SizeInBits / NumMaskElts;
33980  unsigned NumOps = Ops.size();
33981
33982  // Extract constant bits from each source op.
33983  bool OneUseConstantOp = false;
33984  SmallVector<APInt, 16> UndefEltsOps(NumOps);
33985  SmallVector<SmallVector<APInt, 16>, 16> RawBitsOps(NumOps);
33986  for (unsigned i = 0; i != NumOps; ++i) {
33987    SDValue SrcOp = Ops[i];
33988    OneUseConstantOp |= SrcOp.hasOneUse();
33989    if (!getTargetConstantBitsFromNode(SrcOp, MaskSizeInBits, UndefEltsOps[i],
33990                                       RawBitsOps[i]))
33991      return SDValue();
33992  }
33993
33994  // Only fold if at least one of the constants is only used once or
33995  // the combined shuffle has included a variable mask shuffle, this
33996  // is to avoid constant pool bloat.
33997  if (!OneUseConstantOp && !HasVariableMask)
33998    return SDValue();
33999
34000  // Shuffle the constant bits according to the mask.
34001  APInt UndefElts(NumMaskElts, 0);
34002  APInt ZeroElts(NumMaskElts, 0);
34003  APInt ConstantElts(NumMaskElts, 0);
34004  SmallVector<APInt, 8> ConstantBitData(NumMaskElts,
34005                                        APInt::getNullValue(MaskSizeInBits));
34006  for (unsigned i = 0; i != NumMaskElts; ++i) {
34007    int M = Mask[i];
34008    if (M == SM_SentinelUndef) {
34009      UndefElts.setBit(i);
34010      continue;
34011    } else if (M == SM_SentinelZero) {
34012      ZeroElts.setBit(i);
34013      continue;
34014    }
34015    assert(0 <= M && M < (int)(NumMaskElts * NumOps));
34016
34017    unsigned SrcOpIdx = (unsigned)M / NumMaskElts;
34018    unsigned SrcMaskIdx = (unsigned)M % NumMaskElts;
34019
34020    auto &SrcUndefElts = UndefEltsOps[SrcOpIdx];
34021    if (SrcUndefElts[SrcMaskIdx]) {
34022      UndefElts.setBit(i);
34023      continue;
34024    }
34025
34026    auto &SrcEltBits = RawBitsOps[SrcOpIdx];
34027    APInt &Bits = SrcEltBits[SrcMaskIdx];
34028    if (!Bits) {
34029      ZeroElts.setBit(i);
34030      continue;
34031    }
34032
34033    ConstantElts.setBit(i);
34034    ConstantBitData[i] = Bits;
34035  }
34036  assert((UndefElts | ZeroElts | ConstantElts).isAllOnesValue());
34037
34038  // Create the constant data.
34039  MVT MaskSVT;
34040  if (VT.isFloatingPoint() && (MaskSizeInBits == 32 || MaskSizeInBits == 64))
34041    MaskSVT = MVT::getFloatingPointVT(MaskSizeInBits);
34042  else
34043    MaskSVT = MVT::getIntegerVT(MaskSizeInBits);
34044
34045  MVT MaskVT = MVT::getVectorVT(MaskSVT, NumMaskElts);
34046
34047  SDLoc DL(Root);
34048  SDValue CstOp = getConstVector(ConstantBitData, UndefElts, MaskVT, DAG, DL);
34049  return DAG.getBitcast(VT, CstOp);
34050}
34051
34052/// Fully generic combining of x86 shuffle instructions.
34053///
34054/// This should be the last combine run over the x86 shuffle instructions. Once
34055/// they have been fully optimized, this will recursively consider all chains
34056/// of single-use shuffle instructions, build a generic model of the cumulative
34057/// shuffle operation, and check for simpler instructions which implement this
34058/// operation. We use this primarily for two purposes:
34059///
34060/// 1) Collapse generic shuffles to specialized single instructions when
34061///    equivalent. In most cases, this is just an encoding size win, but
34062///    sometimes we will collapse multiple generic shuffles into a single
34063///    special-purpose shuffle.
34064/// 2) Look for sequences of shuffle instructions with 3 or more total
34065///    instructions, and replace them with the slightly more expensive SSSE3
34066///    PSHUFB instruction if available. We do this as the last combining step
34067///    to ensure we avoid using PSHUFB if we can implement the shuffle with
34068///    a suitable short sequence of other instructions. The PSHUFB will either
34069///    use a register or have to read from memory and so is slightly (but only
34070///    slightly) more expensive than the other shuffle instructions.
34071///
34072/// Because this is inherently a quadratic operation (for each shuffle in
34073/// a chain, we recurse up the chain), the depth is limited to 8 instructions.
34074/// This should never be an issue in practice as the shuffle lowering doesn't
34075/// produce sequences of more than 8 instructions.
34076///
34077/// FIXME: We will currently miss some cases where the redundant shuffling
34078/// would simplify under the threshold for PSHUFB formation because of
34079/// combine-ordering. To fix this, we should do the redundant instruction
34080/// combining in this recursive walk.
34081static SDValue combineX86ShufflesRecursively(
34082    ArrayRef<SDValue> SrcOps, int SrcOpIndex, SDValue Root,
34083    ArrayRef<int> RootMask, ArrayRef<const SDNode *> SrcNodes, unsigned Depth,
34084    bool HasVariableMask, bool AllowVariableMask, SelectionDAG &DAG,
34085    const X86Subtarget &Subtarget) {
34086  assert(RootMask.size() > 0 &&
34087         (RootMask.size() > 1 || (RootMask[0] == 0 && SrcOpIndex == 0)) &&
34088         "Illegal shuffle root mask");
34089
34090  // Bound the depth of our recursive combine because this is ultimately
34091  // quadratic in nature.
34092  const unsigned MaxRecursionDepth = 8;
34093  if (Depth >= MaxRecursionDepth)
34094    return SDValue();
34095
34096  // Directly rip through bitcasts to find the underlying operand.
34097  SDValue Op = SrcOps[SrcOpIndex];
34098  Op = peekThroughOneUseBitcasts(Op);
34099
34100  MVT VT = Op.getSimpleValueType();
34101  if (!VT.isVector())
34102    return SDValue(); // Bail if we hit a non-vector.
34103
34104  assert(Root.getSimpleValueType().isVector() &&
34105         "Shuffles operate on vector types!");
34106  assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&
34107         "Can only combine shuffles of the same vector register size.");
34108
34109  // Extract target shuffle mask and resolve sentinels and inputs.
34110  // TODO - determine Op's demanded elts from RootMask.
34111  SmallVector<int, 64> OpMask;
34112  SmallVector<SDValue, 2> OpInputs;
34113  APInt OpUndef, OpZero;
34114  APInt OpDemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
34115  bool IsOpVariableMask = isTargetShuffleVariableMask(Op.getOpcode());
34116  if (!getTargetShuffleInputs(Op, OpDemandedElts, OpInputs, OpMask, OpUndef,
34117                              OpZero, DAG, Depth, false))
34118    return SDValue();
34119
34120  SmallVector<int, 64> Mask;
34121  SmallVector<SDValue, 16> Ops;
34122
34123  // We don't need to merge masks if the root is empty.
34124  bool EmptyRoot = (Depth == 0) && (RootMask.size() == 1);
34125  if (EmptyRoot) {
34126    // Only resolve zeros if it will remove an input, otherwise we might end
34127    // up in an infinite loop.
34128    bool ResolveKnownZeros = true;
34129    if (!OpZero.isNullValue()) {
34130      APInt UsedInputs = APInt::getNullValue(OpInputs.size());
34131      for (int i = 0, e = OpMask.size(); i != e; ++i) {
34132        int M = OpMask[i];
34133        if (OpUndef[i] || OpZero[i] || isUndefOrZero(M))
34134          continue;
34135        UsedInputs.setBit(M / OpMask.size());
34136        if (UsedInputs.isAllOnesValue()) {
34137          ResolveKnownZeros = false;
34138          break;
34139        }
34140      }
34141    }
34142    resolveTargetShuffleFromZeroables(OpMask, OpUndef, OpZero,
34143                                      ResolveKnownZeros);
34144
34145    Mask = OpMask;
34146    Ops.append(OpInputs.begin(), OpInputs.end());
34147  } else {
34148    resolveTargetShuffleFromZeroables(OpMask, OpUndef, OpZero);
34149
34150    // Add the inputs to the Ops list, avoiding duplicates.
34151    Ops.append(SrcOps.begin(), SrcOps.end());
34152
34153    auto AddOp = [&Ops](SDValue Input, int InsertionPoint) -> int {
34154      // Attempt to find an existing match.
34155      SDValue InputBC = peekThroughBitcasts(Input);
34156      for (int i = 0, e = Ops.size(); i < e; ++i)
34157        if (InputBC == peekThroughBitcasts(Ops[i]))
34158          return i;
34159      // Match failed - should we replace an existing Op?
34160      if (InsertionPoint >= 0) {
34161        Ops[InsertionPoint] = Input;
34162        return InsertionPoint;
34163      }
34164      // Add to the end of the Ops list.
34165      Ops.push_back(Input);
34166      return Ops.size() - 1;
34167    };
34168
34169    SmallVector<int, 2> OpInputIdx;
34170    for (SDValue OpInput : OpInputs)
34171      OpInputIdx.push_back(
34172          AddOp(OpInput, OpInputIdx.empty() ? SrcOpIndex : -1));
34173
34174    assert(((RootMask.size() > OpMask.size() &&
34175             RootMask.size() % OpMask.size() == 0) ||
34176            (OpMask.size() > RootMask.size() &&
34177             OpMask.size() % RootMask.size() == 0) ||
34178            OpMask.size() == RootMask.size()) &&
34179           "The smaller number of elements must divide the larger.");
34180
34181    // This function can be performance-critical, so we rely on the power-of-2
34182    // knowledge that we have about the mask sizes to replace div/rem ops with
34183    // bit-masks and shifts.
34184    assert(isPowerOf2_32(RootMask.size()) &&
34185           "Non-power-of-2 shuffle mask sizes");
34186    assert(isPowerOf2_32(OpMask.size()) && "Non-power-of-2 shuffle mask sizes");
34187    unsigned RootMaskSizeLog2 = countTrailingZeros(RootMask.size());
34188    unsigned OpMaskSizeLog2 = countTrailingZeros(OpMask.size());
34189
34190    unsigned MaskWidth = std::max<unsigned>(OpMask.size(), RootMask.size());
34191    unsigned RootRatio =
34192        std::max<unsigned>(1, OpMask.size() >> RootMaskSizeLog2);
34193    unsigned OpRatio = std::max<unsigned>(1, RootMask.size() >> OpMaskSizeLog2);
34194    assert((RootRatio == 1 || OpRatio == 1) &&
34195           "Must not have a ratio for both incoming and op masks!");
34196
34197    assert(isPowerOf2_32(MaskWidth) && "Non-power-of-2 shuffle mask sizes");
34198    assert(isPowerOf2_32(RootRatio) && "Non-power-of-2 shuffle mask sizes");
34199    assert(isPowerOf2_32(OpRatio) && "Non-power-of-2 shuffle mask sizes");
34200    unsigned RootRatioLog2 = countTrailingZeros(RootRatio);
34201    unsigned OpRatioLog2 = countTrailingZeros(OpRatio);
34202
34203    Mask.resize(MaskWidth, SM_SentinelUndef);
34204
34205    // Merge this shuffle operation's mask into our accumulated mask. Note that
34206    // this shuffle's mask will be the first applied to the input, followed by
34207    // the root mask to get us all the way to the root value arrangement. The
34208    // reason for this order is that we are recursing up the operation chain.
34209    for (unsigned i = 0; i < MaskWidth; ++i) {
34210      unsigned RootIdx = i >> RootRatioLog2;
34211      if (RootMask[RootIdx] < 0) {
34212        // This is a zero or undef lane, we're done.
34213        Mask[i] = RootMask[RootIdx];
34214        continue;
34215      }
34216
34217      unsigned RootMaskedIdx =
34218          RootRatio == 1
34219              ? RootMask[RootIdx]
34220              : (RootMask[RootIdx] << RootRatioLog2) + (i & (RootRatio - 1));
34221
34222      // Just insert the scaled root mask value if it references an input other
34223      // than the SrcOp we're currently inserting.
34224      if ((RootMaskedIdx < (SrcOpIndex * MaskWidth)) ||
34225          (((SrcOpIndex + 1) * MaskWidth) <= RootMaskedIdx)) {
34226        Mask[i] = RootMaskedIdx;
34227        continue;
34228      }
34229
34230      RootMaskedIdx = RootMaskedIdx & (MaskWidth - 1);
34231      unsigned OpIdx = RootMaskedIdx >> OpRatioLog2;
34232      if (OpMask[OpIdx] < 0) {
34233        // The incoming lanes are zero or undef, it doesn't matter which ones we
34234        // are using.
34235        Mask[i] = OpMask[OpIdx];
34236        continue;
34237      }
34238
34239      // Ok, we have non-zero lanes, map them through to one of the Op's inputs.
34240      unsigned OpMaskedIdx = OpRatio == 1 ? OpMask[OpIdx]
34241                                          : (OpMask[OpIdx] << OpRatioLog2) +
34242                                                (RootMaskedIdx & (OpRatio - 1));
34243
34244      OpMaskedIdx = OpMaskedIdx & (MaskWidth - 1);
34245      int InputIdx = OpMask[OpIdx] / (int)OpMask.size();
34246      assert(0 <= OpInputIdx[InputIdx] && "Unknown target shuffle input");
34247      OpMaskedIdx += OpInputIdx[InputIdx] * MaskWidth;
34248
34249      Mask[i] = OpMaskedIdx;
34250    }
34251  }
34252
34253  // Remove unused/repeated shuffle source ops.
34254  resolveTargetShuffleInputsAndMask(Ops, Mask);
34255
34256  // Handle the all undef/zero cases early.
34257  if (all_of(Mask, [](int Idx) { return Idx == SM_SentinelUndef; }))
34258    return DAG.getUNDEF(Root.getValueType());
34259
34260  // TODO - should we handle the mixed zero/undef case as well? Just returning
34261  // a zero mask will lose information on undef elements possibly reducing
34262  // future combine possibilities.
34263  if (all_of(Mask, [](int Idx) { return Idx < 0; }))
34264    return getZeroVector(Root.getSimpleValueType(), Subtarget, DAG,
34265                         SDLoc(Root));
34266
34267  assert(!Ops.empty() && "Shuffle with no inputs detected");
34268  HasVariableMask |= IsOpVariableMask;
34269
34270  // Update the list of shuffle nodes that have been combined so far.
34271  SmallVector<const SDNode *, 16> CombinedNodes(SrcNodes.begin(),
34272                                                SrcNodes.end());
34273  CombinedNodes.push_back(Op.getNode());
34274
34275  // See if we can recurse into each shuffle source op (if it's a target
34276  // shuffle). The source op should only be generally combined if it either has
34277  // a single use (i.e. current Op) or all its users have already been combined,
34278  // if not then we can still combine but should prevent generation of variable
34279  // shuffles to avoid constant pool bloat.
34280  // Don't recurse if we already have more source ops than we can combine in
34281  // the remaining recursion depth.
34282  if (Ops.size() < (MaxRecursionDepth - Depth)) {
34283    for (int i = 0, e = Ops.size(); i < e; ++i) {
34284      // For empty roots, we need to resolve zeroable elements before combining
34285      // them with other shuffles.
34286      SmallVector<int, 64> ResolvedMask = Mask;
34287      if (EmptyRoot)
34288        resolveTargetShuffleFromZeroables(ResolvedMask, OpUndef, OpZero);
34289      bool AllowVar = false;
34290      if (Ops[i].getNode()->hasOneUse() ||
34291          SDNode::areOnlyUsersOf(CombinedNodes, Ops[i].getNode()))
34292        AllowVar = AllowVariableMask;
34293      if (SDValue Res = combineX86ShufflesRecursively(
34294              Ops, i, Root, ResolvedMask, CombinedNodes, Depth + 1,
34295              HasVariableMask, AllowVar, DAG, Subtarget))
34296        return Res;
34297    }
34298  }
34299
34300  // Attempt to constant fold all of the constant source ops.
34301  if (SDValue Cst = combineX86ShufflesConstants(
34302          Ops, Mask, Root, HasVariableMask, DAG, Subtarget))
34303    return Cst;
34304
34305  // We can only combine unary and binary shuffle mask cases.
34306  if (Ops.size() <= 2) {
34307    // Minor canonicalization of the accumulated shuffle mask to make it easier
34308    // to match below. All this does is detect masks with sequential pairs of
34309    // elements, and shrink them to the half-width mask. It does this in a loop
34310    // so it will reduce the size of the mask to the minimal width mask which
34311    // performs an equivalent shuffle.
34312    SmallVector<int, 64> WidenedMask;
34313    while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
34314      Mask = std::move(WidenedMask);
34315    }
34316
34317    // Canonicalization of binary shuffle masks to improve pattern matching by
34318    // commuting the inputs.
34319    if (Ops.size() == 2 && canonicalizeShuffleMaskWithCommute(Mask)) {
34320      ShuffleVectorSDNode::commuteMask(Mask);
34321      std::swap(Ops[0], Ops[1]);
34322    }
34323
34324    // Finally, try to combine into a single shuffle instruction.
34325    return combineX86ShuffleChain(Ops, Root, Mask, Depth, HasVariableMask,
34326                                  AllowVariableMask, DAG, Subtarget);
34327  }
34328
34329  // If that failed and any input is extracted then try to combine as a
34330  // shuffle with the larger type.
34331  return combineX86ShuffleChainWithExtract(Ops, Root, Mask, Depth,
34332                                           HasVariableMask, AllowVariableMask,
34333                                           DAG, Subtarget);
34334}
34335
34336/// Helper entry wrapper to combineX86ShufflesRecursively.
34337static SDValue combineX86ShufflesRecursively(SDValue Op, SelectionDAG &DAG,
34338                                             const X86Subtarget &Subtarget) {
34339  return combineX86ShufflesRecursively({Op}, 0, Op, {0}, {}, /*Depth*/ 0,
34340                                       /*HasVarMask*/ false,
34341                                       /*AllowVarMask*/ true, DAG, Subtarget);
34342}
34343
34344/// Get the PSHUF-style mask from PSHUF node.
34345///
34346/// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
34347/// PSHUF-style masks that can be reused with such instructions.
34348static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
34349  MVT VT = N.getSimpleValueType();
34350  SmallVector<int, 4> Mask;
34351  SmallVector<SDValue, 2> Ops;
34352  bool IsUnary;
34353  bool HaveMask =
34354      getTargetShuffleMask(N.getNode(), VT, false, Ops, Mask, IsUnary);
34355  (void)HaveMask;
34356  assert(HaveMask);
34357
34358  // If we have more than 128-bits, only the low 128-bits of shuffle mask
34359  // matter. Check that the upper masks are repeats and remove them.
34360  if (VT.getSizeInBits() > 128) {
34361    int LaneElts = 128 / VT.getScalarSizeInBits();
34362#ifndef NDEBUG
34363    for (int i = 1, NumLanes = VT.getSizeInBits() / 128; i < NumLanes; ++i)
34364      for (int j = 0; j < LaneElts; ++j)
34365        assert(Mask[j] == Mask[i * LaneElts + j] - (LaneElts * i) &&
34366               "Mask doesn't repeat in high 128-bit lanes!");
34367#endif
34368    Mask.resize(LaneElts);
34369  }
34370
34371  switch (N.getOpcode()) {
34372  case X86ISD::PSHUFD:
34373    return Mask;
34374  case X86ISD::PSHUFLW:
34375    Mask.resize(4);
34376    return Mask;
34377  case X86ISD::PSHUFHW:
34378    Mask.erase(Mask.begin(), Mask.begin() + 4);
34379    for (int &M : Mask)
34380      M -= 4;
34381    return Mask;
34382  default:
34383    llvm_unreachable("No valid shuffle instruction found!");
34384  }
34385}
34386
34387/// Search for a combinable shuffle across a chain ending in pshufd.
34388///
34389/// We walk up the chain and look for a combinable shuffle, skipping over
34390/// shuffles that we could hoist this shuffle's transformation past without
34391/// altering anything.
34392static SDValue
34393combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
34394                             SelectionDAG &DAG) {
34395  assert(N.getOpcode() == X86ISD::PSHUFD &&
34396         "Called with something other than an x86 128-bit half shuffle!");
34397  SDLoc DL(N);
34398
34399  // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
34400  // of the shuffles in the chain so that we can form a fresh chain to replace
34401  // this one.
34402  SmallVector<SDValue, 8> Chain;
34403  SDValue V = N.getOperand(0);
34404  for (; V.hasOneUse(); V = V.getOperand(0)) {
34405    switch (V.getOpcode()) {
34406    default:
34407      return SDValue(); // Nothing combined!
34408
34409    case ISD::BITCAST:
34410      // Skip bitcasts as we always know the type for the target specific
34411      // instructions.
34412      continue;
34413
34414    case X86ISD::PSHUFD:
34415      // Found another dword shuffle.
34416      break;
34417
34418    case X86ISD::PSHUFLW:
34419      // Check that the low words (being shuffled) are the identity in the
34420      // dword shuffle, and the high words are self-contained.
34421      if (Mask[0] != 0 || Mask[1] != 1 ||
34422          !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
34423        return SDValue();
34424
34425      Chain.push_back(V);
34426      continue;
34427
34428    case X86ISD::PSHUFHW:
34429      // Check that the high words (being shuffled) are the identity in the
34430      // dword shuffle, and the low words are self-contained.
34431      if (Mask[2] != 2 || Mask[3] != 3 ||
34432          !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
34433        return SDValue();
34434
34435      Chain.push_back(V);
34436      continue;
34437
34438    case X86ISD::UNPCKL:
34439    case X86ISD::UNPCKH:
34440      // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
34441      // shuffle into a preceding word shuffle.
34442      if (V.getSimpleValueType().getVectorElementType() != MVT::i8 &&
34443          V.getSimpleValueType().getVectorElementType() != MVT::i16)
34444        return SDValue();
34445
34446      // Search for a half-shuffle which we can combine with.
34447      unsigned CombineOp =
34448          V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
34449      if (V.getOperand(0) != V.getOperand(1) ||
34450          !V->isOnlyUserOf(V.getOperand(0).getNode()))
34451        return SDValue();
34452      Chain.push_back(V);
34453      V = V.getOperand(0);
34454      do {
34455        switch (V.getOpcode()) {
34456        default:
34457          return SDValue(); // Nothing to combine.
34458
34459        case X86ISD::PSHUFLW:
34460        case X86ISD::PSHUFHW:
34461          if (V.getOpcode() == CombineOp)
34462            break;
34463
34464          Chain.push_back(V);
34465
34466          LLVM_FALLTHROUGH;
34467        case ISD::BITCAST:
34468          V = V.getOperand(0);
34469          continue;
34470        }
34471        break;
34472      } while (V.hasOneUse());
34473      break;
34474    }
34475    // Break out of the loop if we break out of the switch.
34476    break;
34477  }
34478
34479  if (!V.hasOneUse())
34480    // We fell out of the loop without finding a viable combining instruction.
34481    return SDValue();
34482
34483  // Merge this node's mask and our incoming mask.
34484  SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
34485  for (int &M : Mask)
34486    M = VMask[M];
34487  V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
34488                  getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
34489
34490  // Rebuild the chain around this new shuffle.
34491  while (!Chain.empty()) {
34492    SDValue W = Chain.pop_back_val();
34493
34494    if (V.getValueType() != W.getOperand(0).getValueType())
34495      V = DAG.getBitcast(W.getOperand(0).getValueType(), V);
34496
34497    switch (W.getOpcode()) {
34498    default:
34499      llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
34500
34501    case X86ISD::UNPCKL:
34502    case X86ISD::UNPCKH:
34503      V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
34504      break;
34505
34506    case X86ISD::PSHUFD:
34507    case X86ISD::PSHUFLW:
34508    case X86ISD::PSHUFHW:
34509      V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
34510      break;
34511    }
34512  }
34513  if (V.getValueType() != N.getValueType())
34514    V = DAG.getBitcast(N.getValueType(), V);
34515
34516  // Return the new chain to replace N.
34517  return V;
34518}
34519
34520/// Try to combine x86 target specific shuffles.
34521static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG,
34522                                    TargetLowering::DAGCombinerInfo &DCI,
34523                                    const X86Subtarget &Subtarget) {
34524  SDLoc DL(N);
34525  MVT VT = N.getSimpleValueType();
34526  SmallVector<int, 4> Mask;
34527  unsigned Opcode = N.getOpcode();
34528
34529  // Combine binary shuffle of 2 similar 'Horizontal' instructions into a
34530  // single instruction.
34531  if (VT.getScalarSizeInBits() == 64 &&
34532      (Opcode == X86ISD::MOVSD || Opcode == X86ISD::UNPCKH ||
34533       Opcode == X86ISD::UNPCKL)) {
34534    auto BC0 = peekThroughBitcasts(N.getOperand(0));
34535    auto BC1 = peekThroughBitcasts(N.getOperand(1));
34536    EVT VT0 = BC0.getValueType();
34537    EVT VT1 = BC1.getValueType();
34538    unsigned Opcode0 = BC0.getOpcode();
34539    unsigned Opcode1 = BC1.getOpcode();
34540    if (Opcode0 == Opcode1 && VT0 == VT1 &&
34541        (Opcode0 == X86ISD::FHADD || Opcode0 == X86ISD::HADD ||
34542         Opcode0 == X86ISD::FHSUB || Opcode0 == X86ISD::HSUB ||
34543         Opcode0 == X86ISD::PACKSS || Opcode0 == X86ISD::PACKUS)) {
34544      SDValue Lo, Hi;
34545      if (Opcode == X86ISD::MOVSD) {
34546        Lo = BC1.getOperand(0);
34547        Hi = BC0.getOperand(1);
34548      } else {
34549        Lo = BC0.getOperand(Opcode == X86ISD::UNPCKH ? 1 : 0);
34550        Hi = BC1.getOperand(Opcode == X86ISD::UNPCKH ? 1 : 0);
34551      }
34552      SDValue Horiz = DAG.getNode(Opcode0, DL, VT0, Lo, Hi);
34553      return DAG.getBitcast(VT, Horiz);
34554    }
34555  }
34556
34557  switch (Opcode) {
34558  case X86ISD::VBROADCAST: {
34559    SDValue Src = N.getOperand(0);
34560    SDValue BC = peekThroughBitcasts(Src);
34561    EVT SrcVT = Src.getValueType();
34562    EVT BCVT = BC.getValueType();
34563
34564    // If broadcasting from another shuffle, attempt to simplify it.
34565    // TODO - we really need a general SimplifyDemandedVectorElts mechanism.
34566    if (isTargetShuffle(BC.getOpcode()) &&
34567        VT.getScalarSizeInBits() % BCVT.getScalarSizeInBits() == 0) {
34568      unsigned Scale = VT.getScalarSizeInBits() / BCVT.getScalarSizeInBits();
34569      SmallVector<int, 16> DemandedMask(BCVT.getVectorNumElements(),
34570                                        SM_SentinelUndef);
34571      for (unsigned i = 0; i != Scale; ++i)
34572        DemandedMask[i] = i;
34573      if (SDValue Res = combineX86ShufflesRecursively(
34574              {BC}, 0, BC, DemandedMask, {}, /*Depth*/ 0,
34575              /*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget))
34576        return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
34577                           DAG.getBitcast(SrcVT, Res));
34578    }
34579
34580    // broadcast(bitcast(src)) -> bitcast(broadcast(src))
34581    // 32-bit targets have to bitcast i64 to f64, so better to bitcast upward.
34582    if (Src.getOpcode() == ISD::BITCAST &&
34583        SrcVT.getScalarSizeInBits() == BCVT.getScalarSizeInBits()) {
34584      EVT NewVT = EVT::getVectorVT(*DAG.getContext(), BCVT.getScalarType(),
34585                                   VT.getVectorNumElements());
34586      return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, DL, NewVT, BC));
34587    }
34588
34589    // Reduce broadcast source vector to lowest 128-bits.
34590    if (SrcVT.getSizeInBits() > 128)
34591      return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
34592                         extract128BitVector(Src, 0, DAG, DL));
34593
34594    // broadcast(scalar_to_vector(x)) -> broadcast(x).
34595    if (Src.getOpcode() == ISD::SCALAR_TO_VECTOR)
34596      return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Src.getOperand(0));
34597
34598    // Share broadcast with the longest vector and extract low subvector (free).
34599    for (SDNode *User : Src->uses())
34600      if (User != N.getNode() && User->getOpcode() == X86ISD::VBROADCAST &&
34601          User->getValueSizeInBits(0) > VT.getSizeInBits()) {
34602        return extractSubVector(SDValue(User, 0), 0, DAG, DL,
34603                                VT.getSizeInBits());
34604      }
34605
34606    // vbroadcast(scalarload X) -> vbroadcast_load X
34607    // For float loads, extract other uses of the scalar from the broadcast.
34608    if (!SrcVT.isVector() && (Src.hasOneUse() || VT.isFloatingPoint()) &&
34609        ISD::isNormalLoad(Src.getNode())) {
34610      LoadSDNode *LN = cast<LoadSDNode>(Src);
34611      SDVTList Tys = DAG.getVTList(VT, MVT::Other);
34612      SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
34613      SDValue BcastLd =
34614          DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
34615                                  LN->getMemoryVT(), LN->getMemOperand());
34616      // If the load value is used only by N, replace it via CombineTo N.
34617      bool NoReplaceExtract = Src.hasOneUse();
34618      DCI.CombineTo(N.getNode(), BcastLd);
34619      if (NoReplaceExtract) {
34620        DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
34621        DCI.recursivelyDeleteUnusedNodes(LN);
34622      } else {
34623        SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT, BcastLd,
34624                                  DAG.getIntPtrConstant(0, DL));
34625        DCI.CombineTo(LN, Scl, BcastLd.getValue(1));
34626      }
34627      return N; // Return N so it doesn't get rechecked!
34628    }
34629
34630    return SDValue();
34631  }
34632  case X86ISD::BLENDI: {
34633    SDValue N0 = N.getOperand(0);
34634    SDValue N1 = N.getOperand(1);
34635
34636    // blend(bitcast(x),bitcast(y)) -> bitcast(blend(x,y)) to narrower types.
34637    // TODO: Handle MVT::v16i16 repeated blend mask.
34638    if (N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST &&
34639        N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()) {
34640      MVT SrcVT = N0.getOperand(0).getSimpleValueType();
34641      if ((VT.getScalarSizeInBits() % SrcVT.getScalarSizeInBits()) == 0 &&
34642          SrcVT.getScalarSizeInBits() >= 32) {
34643        unsigned BlendMask = N.getConstantOperandVal(2);
34644        unsigned Size = VT.getVectorNumElements();
34645        unsigned Scale = VT.getScalarSizeInBits() / SrcVT.getScalarSizeInBits();
34646        BlendMask = scaleVectorShuffleBlendMask(BlendMask, Size, Scale);
34647        return DAG.getBitcast(
34648            VT, DAG.getNode(X86ISD::BLENDI, DL, SrcVT, N0.getOperand(0),
34649                            N1.getOperand(0),
34650                            DAG.getTargetConstant(BlendMask, DL, MVT::i8)));
34651      }
34652    }
34653    return SDValue();
34654  }
34655  case X86ISD::VPERMI: {
34656    // vpermi(bitcast(x)) -> bitcast(vpermi(x)) for same number of elements.
34657    // TODO: Remove when we have preferred domains in combineX86ShuffleChain.
34658    SDValue N0 = N.getOperand(0);
34659    SDValue N1 = N.getOperand(1);
34660    unsigned EltSizeInBits = VT.getScalarSizeInBits();
34661    if (N0.getOpcode() == ISD::BITCAST &&
34662        N0.getOperand(0).getScalarValueSizeInBits() == EltSizeInBits) {
34663      SDValue Src = N0.getOperand(0);
34664      EVT SrcVT = Src.getValueType();
34665      SDValue Res = DAG.getNode(X86ISD::VPERMI, DL, SrcVT, Src, N1);
34666      return DAG.getBitcast(VT, Res);
34667    }
34668    return SDValue();
34669  }
34670  case X86ISD::PSHUFD:
34671  case X86ISD::PSHUFLW:
34672  case X86ISD::PSHUFHW:
34673    Mask = getPSHUFShuffleMask(N);
34674    assert(Mask.size() == 4);
34675    break;
34676  case X86ISD::MOVSD:
34677  case X86ISD::MOVSS: {
34678    SDValue N0 = N.getOperand(0);
34679    SDValue N1 = N.getOperand(1);
34680
34681    // Canonicalize scalar FPOps:
34682    // MOVS*(N0, OP(N0, N1)) --> MOVS*(N0, SCALAR_TO_VECTOR(OP(N0[0], N1[0])))
34683    // If commutable, allow OP(N1[0], N0[0]).
34684    unsigned Opcode1 = N1.getOpcode();
34685    if (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL || Opcode1 == ISD::FSUB ||
34686        Opcode1 == ISD::FDIV) {
34687      SDValue N10 = N1.getOperand(0);
34688      SDValue N11 = N1.getOperand(1);
34689      if (N10 == N0 ||
34690          (N11 == N0 && (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL))) {
34691        if (N10 != N0)
34692          std::swap(N10, N11);
34693        MVT SVT = VT.getVectorElementType();
34694        SDValue ZeroIdx = DAG.getIntPtrConstant(0, DL);
34695        N10 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N10, ZeroIdx);
34696        N11 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N11, ZeroIdx);
34697        SDValue Scl = DAG.getNode(Opcode1, DL, SVT, N10, N11);
34698        SDValue SclVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
34699        return DAG.getNode(Opcode, DL, VT, N0, SclVec);
34700      }
34701    }
34702
34703    return SDValue();
34704  }
34705  case X86ISD::INSERTPS: {
34706    assert(VT == MVT::v4f32 && "INSERTPS ValueType must be MVT::v4f32");
34707    SDValue Op0 = N.getOperand(0);
34708    SDValue Op1 = N.getOperand(1);
34709    SDValue Op2 = N.getOperand(2);
34710    unsigned InsertPSMask = cast<ConstantSDNode>(Op2)->getZExtValue();
34711    unsigned SrcIdx = (InsertPSMask >> 6) & 0x3;
34712    unsigned DstIdx = (InsertPSMask >> 4) & 0x3;
34713    unsigned ZeroMask = InsertPSMask & 0xF;
34714
34715    // If we zero out all elements from Op0 then we don't need to reference it.
34716    if (((ZeroMask | (1u << DstIdx)) == 0xF) && !Op0.isUndef())
34717      return DAG.getNode(X86ISD::INSERTPS, DL, VT, DAG.getUNDEF(VT), Op1,
34718                         DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
34719
34720    // If we zero out the element from Op1 then we don't need to reference it.
34721    if ((ZeroMask & (1u << DstIdx)) && !Op1.isUndef())
34722      return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
34723                         DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
34724
34725    // Attempt to merge insertps Op1 with an inner target shuffle node.
34726    SmallVector<int, 8> TargetMask1;
34727    SmallVector<SDValue, 2> Ops1;
34728    APInt KnownUndef1, KnownZero1;
34729    if (getTargetShuffleAndZeroables(Op1, TargetMask1, Ops1, KnownUndef1,
34730                                     KnownZero1)) {
34731      if (KnownUndef1[SrcIdx] || KnownZero1[SrcIdx]) {
34732        // Zero/UNDEF insertion - zero out element and remove dependency.
34733        InsertPSMask |= (1u << DstIdx);
34734        return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
34735                           DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
34736      }
34737      // Update insertps mask srcidx and reference the source input directly.
34738      int M = TargetMask1[SrcIdx];
34739      assert(0 <= M && M < 8 && "Shuffle index out of range");
34740      InsertPSMask = (InsertPSMask & 0x3f) | ((M & 0x3) << 6);
34741      Op1 = Ops1[M < 4 ? 0 : 1];
34742      return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
34743                         DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
34744    }
34745
34746    // Attempt to merge insertps Op0 with an inner target shuffle node.
34747    SmallVector<int, 8> TargetMask0;
34748    SmallVector<SDValue, 2> Ops0;
34749    APInt KnownUndef0, KnownZero0;
34750    if (getTargetShuffleAndZeroables(Op0, TargetMask0, Ops0, KnownUndef0,
34751                                     KnownZero0)) {
34752      bool Updated = false;
34753      bool UseInput00 = false;
34754      bool UseInput01 = false;
34755      for (int i = 0; i != 4; ++i) {
34756        if ((InsertPSMask & (1u << i)) || (i == (int)DstIdx)) {
34757          // No change if element is already zero or the inserted element.
34758          continue;
34759        } else if (KnownUndef0[i] || KnownZero0[i]) {
34760          // If the target mask is undef/zero then we must zero the element.
34761          InsertPSMask |= (1u << i);
34762          Updated = true;
34763          continue;
34764        }
34765
34766        // The input vector element must be inline.
34767        int M = TargetMask0[i];
34768        if (M != i && M != (i + 4))
34769          return SDValue();
34770
34771        // Determine which inputs of the target shuffle we're using.
34772        UseInput00 |= (0 <= M && M < 4);
34773        UseInput01 |= (4 <= M);
34774      }
34775
34776      // If we're not using both inputs of the target shuffle then use the
34777      // referenced input directly.
34778      if (UseInput00 && !UseInput01) {
34779        Updated = true;
34780        Op0 = Ops0[0];
34781      } else if (!UseInput00 && UseInput01) {
34782        Updated = true;
34783        Op0 = Ops0[1];
34784      }
34785
34786      if (Updated)
34787        return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
34788                           DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
34789    }
34790
34791    // If we're inserting an element from a vbroadcast load, fold the
34792    // load into the X86insertps instruction. We need to convert the scalar
34793    // load to a vector and clear the source lane of the INSERTPS control.
34794    if (Op1.getOpcode() == X86ISD::VBROADCAST_LOAD && Op1.hasOneUse()) {
34795      auto *MemIntr = cast<MemIntrinsicSDNode>(Op1);
34796      if (MemIntr->getMemoryVT().getScalarSizeInBits() == 32) {
34797        SDValue Load = DAG.getLoad(MVT::f32, DL, MemIntr->getChain(),
34798                                   MemIntr->getBasePtr(),
34799                                   MemIntr->getMemOperand());
34800        SDValue Insert = DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0,
34801                           DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT,
34802                                       Load),
34803                           DAG.getTargetConstant(InsertPSMask & 0x3f, DL, MVT::i8));
34804        DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), Load.getValue(1));
34805        return Insert;
34806      }
34807    }
34808
34809    return SDValue();
34810  }
34811  default:
34812    return SDValue();
34813  }
34814
34815  // Nuke no-op shuffles that show up after combining.
34816  if (isNoopShuffleMask(Mask))
34817    return N.getOperand(0);
34818
34819  // Look for simplifications involving one or two shuffle instructions.
34820  SDValue V = N.getOperand(0);
34821  switch (N.getOpcode()) {
34822  default:
34823    break;
34824  case X86ISD::PSHUFLW:
34825  case X86ISD::PSHUFHW:
34826    assert(VT.getVectorElementType() == MVT::i16 && "Bad word shuffle type!");
34827
34828    // See if this reduces to a PSHUFD which is no more expensive and can
34829    // combine with more operations. Note that it has to at least flip the
34830    // dwords as otherwise it would have been removed as a no-op.
34831    if (makeArrayRef(Mask).equals({2, 3, 0, 1})) {
34832      int DMask[] = {0, 1, 2, 3};
34833      int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
34834      DMask[DOffset + 0] = DOffset + 1;
34835      DMask[DOffset + 1] = DOffset + 0;
34836      MVT DVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
34837      V = DAG.getBitcast(DVT, V);
34838      V = DAG.getNode(X86ISD::PSHUFD, DL, DVT, V,
34839                      getV4X86ShuffleImm8ForMask(DMask, DL, DAG));
34840      return DAG.getBitcast(VT, V);
34841    }
34842
34843    // Look for shuffle patterns which can be implemented as a single unpack.
34844    // FIXME: This doesn't handle the location of the PSHUFD generically, and
34845    // only works when we have a PSHUFD followed by two half-shuffles.
34846    if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
34847        (V.getOpcode() == X86ISD::PSHUFLW ||
34848         V.getOpcode() == X86ISD::PSHUFHW) &&
34849        V.getOpcode() != N.getOpcode() &&
34850        V.hasOneUse()) {
34851      SDValue D = peekThroughOneUseBitcasts(V.getOperand(0));
34852      if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) {
34853        SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
34854        SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
34855        int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
34856        int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
34857        int WordMask[8];
34858        for (int i = 0; i < 4; ++i) {
34859          WordMask[i + NOffset] = Mask[i] + NOffset;
34860          WordMask[i + VOffset] = VMask[i] + VOffset;
34861        }
34862        // Map the word mask through the DWord mask.
34863        int MappedMask[8];
34864        for (int i = 0; i < 8; ++i)
34865          MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
34866        if (makeArrayRef(MappedMask).equals({0, 0, 1, 1, 2, 2, 3, 3}) ||
34867            makeArrayRef(MappedMask).equals({4, 4, 5, 5, 6, 6, 7, 7})) {
34868          // We can replace all three shuffles with an unpack.
34869          V = DAG.getBitcast(VT, D.getOperand(0));
34870          return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
34871                                                : X86ISD::UNPCKH,
34872                             DL, VT, V, V);
34873        }
34874      }
34875    }
34876
34877    break;
34878
34879  case X86ISD::PSHUFD:
34880    if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG))
34881      return NewN;
34882
34883    break;
34884  }
34885
34886  return SDValue();
34887}
34888
34889/// Checks if the shuffle mask takes subsequent elements
34890/// alternately from two vectors.
34891/// For example <0, 5, 2, 7> or <8, 1, 10, 3, 12, 5, 14, 7> are both correct.
34892static bool isAddSubOrSubAddMask(ArrayRef<int> Mask, bool &Op0Even) {
34893
34894  int ParitySrc[2] = {-1, -1};
34895  unsigned Size = Mask.size();
34896  for (unsigned i = 0; i != Size; ++i) {
34897    int M = Mask[i];
34898    if (M < 0)
34899      continue;
34900
34901    // Make sure we are using the matching element from the input.
34902    if ((M % Size) != i)
34903      return false;
34904
34905    // Make sure we use the same input for all elements of the same parity.
34906    int Src = M / Size;
34907    if (ParitySrc[i % 2] >= 0 && ParitySrc[i % 2] != Src)
34908      return false;
34909    ParitySrc[i % 2] = Src;
34910  }
34911
34912  // Make sure each input is used.
34913  if (ParitySrc[0] < 0 || ParitySrc[1] < 0 || ParitySrc[0] == ParitySrc[1])
34914    return false;
34915
34916  Op0Even = ParitySrc[0] == 0;
34917  return true;
34918}
34919
34920/// Returns true iff the shuffle node \p N can be replaced with ADDSUB(SUBADD)
34921/// operation. If true is returned then the operands of ADDSUB(SUBADD) operation
34922/// are written to the parameters \p Opnd0 and \p Opnd1.
34923///
34924/// We combine shuffle to ADDSUB(SUBADD) directly on the abstract vector shuffle nodes
34925/// so it is easier to generically match. We also insert dummy vector shuffle
34926/// nodes for the operands which explicitly discard the lanes which are unused
34927/// by this operation to try to flow through the rest of the combiner
34928/// the fact that they're unused.
34929static bool isAddSubOrSubAdd(SDNode *N, const X86Subtarget &Subtarget,
34930                             SelectionDAG &DAG, SDValue &Opnd0, SDValue &Opnd1,
34931                             bool &IsSubAdd) {
34932
34933  EVT VT = N->getValueType(0);
34934  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
34935  if (!Subtarget.hasSSE3() || !TLI.isTypeLegal(VT) ||
34936      !VT.getSimpleVT().isFloatingPoint())
34937    return false;
34938
34939  // We only handle target-independent shuffles.
34940  // FIXME: It would be easy and harmless to use the target shuffle mask
34941  // extraction tool to support more.
34942  if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
34943    return false;
34944
34945  SDValue V1 = N->getOperand(0);
34946  SDValue V2 = N->getOperand(1);
34947
34948  // Make sure we have an FADD and an FSUB.
34949  if ((V1.getOpcode() != ISD::FADD && V1.getOpcode() != ISD::FSUB) ||
34950      (V2.getOpcode() != ISD::FADD && V2.getOpcode() != ISD::FSUB) ||
34951      V1.getOpcode() == V2.getOpcode())
34952    return false;
34953
34954  // If there are other uses of these operations we can't fold them.
34955  if (!V1->hasOneUse() || !V2->hasOneUse())
34956    return false;
34957
34958  // Ensure that both operations have the same operands. Note that we can
34959  // commute the FADD operands.
34960  SDValue LHS, RHS;
34961  if (V1.getOpcode() == ISD::FSUB) {
34962    LHS = V1->getOperand(0); RHS = V1->getOperand(1);
34963    if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
34964        (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
34965      return false;
34966  } else {
34967    assert(V2.getOpcode() == ISD::FSUB && "Unexpected opcode");
34968    LHS = V2->getOperand(0); RHS = V2->getOperand(1);
34969    if ((V1->getOperand(0) != LHS || V1->getOperand(1) != RHS) &&
34970        (V1->getOperand(0) != RHS || V1->getOperand(1) != LHS))
34971      return false;
34972  }
34973
34974  ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
34975  bool Op0Even;
34976  if (!isAddSubOrSubAddMask(Mask, Op0Even))
34977    return false;
34978
34979  // It's a subadd if the vector in the even parity is an FADD.
34980  IsSubAdd = Op0Even ? V1->getOpcode() == ISD::FADD
34981                     : V2->getOpcode() == ISD::FADD;
34982
34983  Opnd0 = LHS;
34984  Opnd1 = RHS;
34985  return true;
34986}
34987
34988/// Combine shuffle of two fma nodes into FMAddSub or FMSubAdd.
34989static SDValue combineShuffleToFMAddSub(SDNode *N,
34990                                        const X86Subtarget &Subtarget,
34991                                        SelectionDAG &DAG) {
34992  // We only handle target-independent shuffles.
34993  // FIXME: It would be easy and harmless to use the target shuffle mask
34994  // extraction tool to support more.
34995  if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
34996    return SDValue();
34997
34998  MVT VT = N->getSimpleValueType(0);
34999  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
35000  if (!Subtarget.hasAnyFMA() || !TLI.isTypeLegal(VT))
35001    return SDValue();
35002
35003  // We're trying to match (shuffle fma(a, b, c), X86Fmsub(a, b, c).
35004  SDValue Op0 = N->getOperand(0);
35005  SDValue Op1 = N->getOperand(1);
35006  SDValue FMAdd = Op0, FMSub = Op1;
35007  if (FMSub.getOpcode() != X86ISD::FMSUB)
35008    std::swap(FMAdd, FMSub);
35009
35010  if (FMAdd.getOpcode() != ISD::FMA || FMSub.getOpcode() != X86ISD::FMSUB ||
35011      FMAdd.getOperand(0) != FMSub.getOperand(0) || !FMAdd.hasOneUse() ||
35012      FMAdd.getOperand(1) != FMSub.getOperand(1) || !FMSub.hasOneUse() ||
35013      FMAdd.getOperand(2) != FMSub.getOperand(2))
35014    return SDValue();
35015
35016  // Check for correct shuffle mask.
35017  ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
35018  bool Op0Even;
35019  if (!isAddSubOrSubAddMask(Mask, Op0Even))
35020    return SDValue();
35021
35022  // FMAddSub takes zeroth operand from FMSub node.
35023  SDLoc DL(N);
35024  bool IsSubAdd = Op0Even ? Op0 == FMAdd : Op1 == FMAdd;
35025  unsigned Opcode = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
35026  return DAG.getNode(Opcode, DL, VT, FMAdd.getOperand(0), FMAdd.getOperand(1),
35027                     FMAdd.getOperand(2));
35028}
35029
35030/// Try to combine a shuffle into a target-specific add-sub or
35031/// mul-add-sub node.
35032static SDValue combineShuffleToAddSubOrFMAddSub(SDNode *N,
35033                                                const X86Subtarget &Subtarget,
35034                                                SelectionDAG &DAG) {
35035  if (SDValue V = combineShuffleToFMAddSub(N, Subtarget, DAG))
35036    return V;
35037
35038  SDValue Opnd0, Opnd1;
35039  bool IsSubAdd;
35040  if (!isAddSubOrSubAdd(N, Subtarget, DAG, Opnd0, Opnd1, IsSubAdd))
35041    return SDValue();
35042
35043  MVT VT = N->getSimpleValueType(0);
35044  SDLoc DL(N);
35045
35046  // Try to generate X86ISD::FMADDSUB node here.
35047  SDValue Opnd2;
35048  if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, 2)) {
35049    unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
35050    return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
35051  }
35052
35053  if (IsSubAdd)
35054    return SDValue();
35055
35056  // Do not generate X86ISD::ADDSUB node for 512-bit types even though
35057  // the ADDSUB idiom has been successfully recognized. There are no known
35058  // X86 targets with 512-bit ADDSUB instructions!
35059  if (VT.is512BitVector())
35060    return SDValue();
35061
35062  return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
35063}
35064
35065// We are looking for a shuffle where both sources are concatenated with undef
35066// and have a width that is half of the output's width. AVX2 has VPERMD/Q, so
35067// if we can express this as a single-source shuffle, that's preferable.
35068static SDValue combineShuffleOfConcatUndef(SDNode *N, SelectionDAG &DAG,
35069                                           const X86Subtarget &Subtarget) {
35070  if (!Subtarget.hasAVX2() || !isa<ShuffleVectorSDNode>(N))
35071    return SDValue();
35072
35073  EVT VT = N->getValueType(0);
35074
35075  // We only care about shuffles of 128/256-bit vectors of 32/64-bit values.
35076  if (!VT.is128BitVector() && !VT.is256BitVector())
35077    return SDValue();
35078
35079  if (VT.getVectorElementType() != MVT::i32 &&
35080      VT.getVectorElementType() != MVT::i64 &&
35081      VT.getVectorElementType() != MVT::f32 &&
35082      VT.getVectorElementType() != MVT::f64)
35083    return SDValue();
35084
35085  SDValue N0 = N->getOperand(0);
35086  SDValue N1 = N->getOperand(1);
35087
35088  // Check that both sources are concats with undef.
35089  if (N0.getOpcode() != ISD::CONCAT_VECTORS ||
35090      N1.getOpcode() != ISD::CONCAT_VECTORS || N0.getNumOperands() != 2 ||
35091      N1.getNumOperands() != 2 || !N0.getOperand(1).isUndef() ||
35092      !N1.getOperand(1).isUndef())
35093    return SDValue();
35094
35095  // Construct the new shuffle mask. Elements from the first source retain their
35096  // index, but elements from the second source no longer need to skip an undef.
35097  SmallVector<int, 8> Mask;
35098  int NumElts = VT.getVectorNumElements();
35099
35100  ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
35101  for (int Elt : SVOp->getMask())
35102    Mask.push_back(Elt < NumElts ? Elt : (Elt - NumElts / 2));
35103
35104  SDLoc DL(N);
35105  SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, N0.getOperand(0),
35106                               N1.getOperand(0));
35107  return DAG.getVectorShuffle(VT, DL, Concat, DAG.getUNDEF(VT), Mask);
35108}
35109
35110/// Eliminate a redundant shuffle of a horizontal math op.
35111static SDValue foldShuffleOfHorizOp(SDNode *N, SelectionDAG &DAG) {
35112  unsigned Opcode = N->getOpcode();
35113  if (Opcode != X86ISD::MOVDDUP && Opcode != X86ISD::VBROADCAST)
35114    if (Opcode != ISD::VECTOR_SHUFFLE || !N->getOperand(1).isUndef())
35115      return SDValue();
35116
35117  // For a broadcast, peek through an extract element of index 0 to find the
35118  // horizontal op: broadcast (ext_vec_elt HOp, 0)
35119  EVT VT = N->getValueType(0);
35120  if (Opcode == X86ISD::VBROADCAST) {
35121    SDValue SrcOp = N->getOperand(0);
35122    if (SrcOp.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
35123        SrcOp.getValueType() == MVT::f64 &&
35124        SrcOp.getOperand(0).getValueType() == VT &&
35125        isNullConstant(SrcOp.getOperand(1)))
35126      N = SrcOp.getNode();
35127  }
35128
35129  SDValue HOp = N->getOperand(0);
35130  if (HOp.getOpcode() != X86ISD::HADD && HOp.getOpcode() != X86ISD::FHADD &&
35131      HOp.getOpcode() != X86ISD::HSUB && HOp.getOpcode() != X86ISD::FHSUB)
35132    return SDValue();
35133
35134  // 128-bit horizontal math instructions are defined to operate on adjacent
35135  // lanes of each operand as:
35136  // v4X32: A[0] + A[1] , A[2] + A[3] , B[0] + B[1] , B[2] + B[3]
35137  // ...similarly for v2f64 and v8i16.
35138  if (!HOp.getOperand(0).isUndef() && !HOp.getOperand(1).isUndef() &&
35139      HOp.getOperand(0) != HOp.getOperand(1))
35140    return SDValue();
35141
35142  // The shuffle that we are eliminating may have allowed the horizontal op to
35143  // have an undemanded (undefined) operand. Duplicate the other (defined)
35144  // operand to ensure that the results are defined across all lanes without the
35145  // shuffle.
35146  auto updateHOp = [](SDValue HorizOp, SelectionDAG &DAG) {
35147    SDValue X;
35148    if (HorizOp.getOperand(0).isUndef()) {
35149      assert(!HorizOp.getOperand(1).isUndef() && "Not expecting foldable h-op");
35150      X = HorizOp.getOperand(1);
35151    } else if (HorizOp.getOperand(1).isUndef()) {
35152      assert(!HorizOp.getOperand(0).isUndef() && "Not expecting foldable h-op");
35153      X = HorizOp.getOperand(0);
35154    } else {
35155      return HorizOp;
35156    }
35157    return DAG.getNode(HorizOp.getOpcode(), SDLoc(HorizOp),
35158                       HorizOp.getValueType(), X, X);
35159  };
35160
35161  // When the operands of a horizontal math op are identical, the low half of
35162  // the result is the same as the high half. If a target shuffle is also
35163  // replicating low and high halves (and without changing the type/length of
35164  // the vector), we don't need the shuffle.
35165  if (Opcode == X86ISD::MOVDDUP || Opcode == X86ISD::VBROADCAST) {
35166    if (HOp.getScalarValueSizeInBits() == 64 && HOp.getValueType() == VT) {
35167      // movddup (hadd X, X) --> hadd X, X
35168      // broadcast (extract_vec_elt (hadd X, X), 0) --> hadd X, X
35169      assert((HOp.getValueType() == MVT::v2f64 ||
35170              HOp.getValueType() == MVT::v4f64) && "Unexpected type for h-op");
35171      return updateHOp(HOp, DAG);
35172    }
35173    return SDValue();
35174  }
35175
35176  // shuffle (hadd X, X), undef, [low half...high half] --> hadd X, X
35177  ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
35178  // TODO: Other mask possibilities like {1,1} and {1,0} could be added here,
35179  // but this should be tied to whatever horizontal op matching and shuffle
35180  // canonicalization are producing.
35181  if (HOp.getValueSizeInBits() == 128 &&
35182      (isTargetShuffleEquivalent(Mask, {0, 0}) ||
35183       isTargetShuffleEquivalent(Mask, {0, 1, 0, 1}) ||
35184       isTargetShuffleEquivalent(Mask, {0, 1, 2, 3, 0, 1, 2, 3})))
35185    return updateHOp(HOp, DAG);
35186
35187  if (HOp.getValueSizeInBits() == 256 &&
35188      (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2}) ||
35189       isTargetShuffleEquivalent(Mask, {0, 1, 0, 1, 4, 5, 4, 5}) ||
35190       isTargetShuffleEquivalent(
35191           Mask, {0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 8, 9, 10, 11})))
35192    return updateHOp(HOp, DAG);
35193
35194  return SDValue();
35195}
35196
35197/// If we have a shuffle of AVX/AVX512 (256/512 bit) vectors that only uses the
35198/// low half of each source vector and does not set any high half elements in
35199/// the destination vector, narrow the shuffle to half its original size.
35200static SDValue narrowShuffle(ShuffleVectorSDNode *Shuf, SelectionDAG &DAG) {
35201  if (!Shuf->getValueType(0).isSimple())
35202    return SDValue();
35203  MVT VT = Shuf->getSimpleValueType(0);
35204  if (!VT.is256BitVector() && !VT.is512BitVector())
35205    return SDValue();
35206
35207  // See if we can ignore all of the high elements of the shuffle.
35208  ArrayRef<int> Mask = Shuf->getMask();
35209  if (!isUndefUpperHalf(Mask))
35210    return SDValue();
35211
35212  // Check if the shuffle mask accesses only the low half of each input vector
35213  // (half-index output is 0 or 2).
35214  int HalfIdx1, HalfIdx2;
35215  SmallVector<int, 8> HalfMask(Mask.size() / 2);
35216  if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2) ||
35217      (HalfIdx1 % 2 == 1) || (HalfIdx2 % 2 == 1))
35218    return SDValue();
35219
35220  // Create a half-width shuffle to replace the unnecessarily wide shuffle.
35221  // The trick is knowing that all of the insert/extract are actually free
35222  // subregister (zmm<->ymm or ymm<->xmm) ops. That leaves us with a shuffle
35223  // of narrow inputs into a narrow output, and that is always cheaper than
35224  // the wide shuffle that we started with.
35225  return getShuffleHalfVectors(SDLoc(Shuf), Shuf->getOperand(0),
35226                               Shuf->getOperand(1), HalfMask, HalfIdx1,
35227                               HalfIdx2, false, DAG, /*UseConcat*/true);
35228}
35229
35230static SDValue combineShuffle(SDNode *N, SelectionDAG &DAG,
35231                              TargetLowering::DAGCombinerInfo &DCI,
35232                              const X86Subtarget &Subtarget) {
35233  if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(N))
35234    if (SDValue V = narrowShuffle(Shuf, DAG))
35235      return V;
35236
35237  // If we have legalized the vector types, look for blends of FADD and FSUB
35238  // nodes that we can fuse into an ADDSUB, FMADDSUB, or FMSUBADD node.
35239  SDLoc dl(N);
35240  EVT VT = N->getValueType(0);
35241  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
35242  if (TLI.isTypeLegal(VT)) {
35243    if (SDValue AddSub = combineShuffleToAddSubOrFMAddSub(N, Subtarget, DAG))
35244      return AddSub;
35245
35246    if (SDValue HAddSub = foldShuffleOfHorizOp(N, DAG))
35247      return HAddSub;
35248  }
35249
35250  // Attempt to combine into a vector load/broadcast.
35251  if (SDValue LD = combineToConsecutiveLoads(VT, N, dl, DAG, Subtarget, true))
35252    return LD;
35253
35254  // For AVX2, we sometimes want to combine
35255  // (vector_shuffle <mask> (concat_vectors t1, undef)
35256  //                        (concat_vectors t2, undef))
35257  // Into:
35258  // (vector_shuffle <mask> (concat_vectors t1, t2), undef)
35259  // Since the latter can be efficiently lowered with VPERMD/VPERMQ
35260  if (SDValue ShufConcat = combineShuffleOfConcatUndef(N, DAG, Subtarget))
35261    return ShufConcat;
35262
35263  if (isTargetShuffle(N->getOpcode())) {
35264    SDValue Op(N, 0);
35265    if (SDValue Shuffle = combineTargetShuffle(Op, DAG, DCI, Subtarget))
35266      return Shuffle;
35267
35268    // Try recursively combining arbitrary sequences of x86 shuffle
35269    // instructions into higher-order shuffles. We do this after combining
35270    // specific PSHUF instruction sequences into their minimal form so that we
35271    // can evaluate how many specialized shuffle instructions are involved in
35272    // a particular chain.
35273    if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
35274      return Res;
35275
35276    // Simplify source operands based on shuffle mask.
35277    // TODO - merge this into combineX86ShufflesRecursively.
35278    APInt KnownUndef, KnownZero;
35279    APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
35280    if (TLI.SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero, DCI))
35281      return SDValue(N, 0);
35282  }
35283
35284  // Look for a v2i64/v2f64 VZEXT_MOVL of a node that already produces zeros
35285  // in the upper 64 bits.
35286  // TODO: Can we generalize this using computeKnownBits.
35287  if (N->getOpcode() == X86ISD::VZEXT_MOVL &&
35288      (VT == MVT::v2f64 || VT == MVT::v2i64) &&
35289      N->getOperand(0).getOpcode() == ISD::BITCAST &&
35290      (N->getOperand(0).getOperand(0).getValueType() == MVT::v4f32 ||
35291       N->getOperand(0).getOperand(0).getValueType() == MVT::v4i32)) {
35292    SDValue In = N->getOperand(0).getOperand(0);
35293    switch (In.getOpcode()) {
35294    default:
35295      break;
35296    case X86ISD::CVTP2SI:   case X86ISD::CVTP2UI:
35297    case X86ISD::MCVTP2SI:  case X86ISD::MCVTP2UI:
35298    case X86ISD::CVTTP2SI:  case X86ISD::CVTTP2UI:
35299    case X86ISD::MCVTTP2SI: case X86ISD::MCVTTP2UI:
35300    case X86ISD::CVTSI2P:   case X86ISD::CVTUI2P:
35301    case X86ISD::MCVTSI2P:  case X86ISD::MCVTUI2P:
35302    case X86ISD::VFPROUND:  case X86ISD::VMFPROUND:
35303      if (In.getOperand(0).getValueType() == MVT::v2f64 ||
35304          In.getOperand(0).getValueType() == MVT::v2i64)
35305        return N->getOperand(0); // return the bitcast
35306      break;
35307    case X86ISD::STRICT_CVTTP2SI:
35308    case X86ISD::STRICT_CVTTP2UI:
35309    case X86ISD::STRICT_CVTSI2P:
35310    case X86ISD::STRICT_CVTUI2P:
35311    case X86ISD::STRICT_VFPROUND:
35312      if (In.getOperand(1).getValueType() == MVT::v2f64 ||
35313          In.getOperand(1).getValueType() == MVT::v2i64)
35314        return N->getOperand(0);
35315      break;
35316    }
35317  }
35318
35319  // Pull subvector inserts into undef through VZEXT_MOVL by making it an
35320  // insert into a zero vector. This helps get VZEXT_MOVL closer to
35321  // scalar_to_vectors where 256/512 are canonicalized to an insert and a
35322  // 128-bit scalar_to_vector. This reduces the number of isel patterns.
35323  if (N->getOpcode() == X86ISD::VZEXT_MOVL && !DCI.isBeforeLegalizeOps() &&
35324      N->getOperand(0).getOpcode() == ISD::INSERT_SUBVECTOR &&
35325      N->getOperand(0).hasOneUse() &&
35326      N->getOperand(0).getOperand(0).isUndef() &&
35327      isNullConstant(N->getOperand(0).getOperand(2))) {
35328    SDValue In = N->getOperand(0).getOperand(1);
35329    SDValue Movl = DAG.getNode(X86ISD::VZEXT_MOVL, dl, In.getValueType(), In);
35330    return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT,
35331                       getZeroVector(VT.getSimpleVT(), Subtarget, DAG, dl),
35332                       Movl, N->getOperand(0).getOperand(2));
35333  }
35334
35335  // If this a vzmovl of a full vector load, replace it with a vzload, unless
35336  // the load is volatile.
35337  if (N->getOpcode() == X86ISD::VZEXT_MOVL && N->getOperand(0).hasOneUse() &&
35338      ISD::isNormalLoad(N->getOperand(0).getNode())) {
35339    LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0));
35340    if (LN->isSimple()) {
35341      SDVTList Tys = DAG.getVTList(VT, MVT::Other);
35342      SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
35343      SDValue VZLoad =
35344          DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
35345                                  VT.getVectorElementType(),
35346                                  LN->getPointerInfo(),
35347                                  LN->getAlignment(),
35348                                  MachineMemOperand::MOLoad);
35349      DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
35350      return VZLoad;
35351    }
35352  }
35353
35354  return SDValue();
35355}
35356
35357bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
35358    SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero,
35359    TargetLoweringOpt &TLO, unsigned Depth) const {
35360  int NumElts = DemandedElts.getBitWidth();
35361  unsigned Opc = Op.getOpcode();
35362  EVT VT = Op.getValueType();
35363
35364  // Handle special case opcodes.
35365  switch (Opc) {
35366  case X86ISD::PMULDQ:
35367  case X86ISD::PMULUDQ: {
35368    APInt LHSUndef, LHSZero;
35369    APInt RHSUndef, RHSZero;
35370    SDValue LHS = Op.getOperand(0);
35371    SDValue RHS = Op.getOperand(1);
35372    if (SimplifyDemandedVectorElts(LHS, DemandedElts, LHSUndef, LHSZero, TLO,
35373                                   Depth + 1))
35374      return true;
35375    if (SimplifyDemandedVectorElts(RHS, DemandedElts, RHSUndef, RHSZero, TLO,
35376                                   Depth + 1))
35377      return true;
35378    // Multiply by zero.
35379    KnownZero = LHSZero | RHSZero;
35380    break;
35381  }
35382  case X86ISD::VSHL:
35383  case X86ISD::VSRL:
35384  case X86ISD::VSRA: {
35385    // We only need the bottom 64-bits of the (128-bit) shift amount.
35386    SDValue Amt = Op.getOperand(1);
35387    MVT AmtVT = Amt.getSimpleValueType();
35388    assert(AmtVT.is128BitVector() && "Unexpected value type");
35389
35390    // If we reuse the shift amount just for sse shift amounts then we know that
35391    // only the bottom 64-bits are only ever used.
35392    bool AssumeSingleUse = llvm::all_of(Amt->uses(), [&Amt](SDNode *Use) {
35393      unsigned UseOpc = Use->getOpcode();
35394      return (UseOpc == X86ISD::VSHL || UseOpc == X86ISD::VSRL ||
35395              UseOpc == X86ISD::VSRA) &&
35396             Use->getOperand(0) != Amt;
35397    });
35398
35399    APInt AmtUndef, AmtZero;
35400    unsigned NumAmtElts = AmtVT.getVectorNumElements();
35401    APInt AmtElts = APInt::getLowBitsSet(NumAmtElts, NumAmtElts / 2);
35402    if (SimplifyDemandedVectorElts(Amt, AmtElts, AmtUndef, AmtZero, TLO,
35403                                   Depth + 1, AssumeSingleUse))
35404      return true;
35405    LLVM_FALLTHROUGH;
35406  }
35407  case X86ISD::VSHLI:
35408  case X86ISD::VSRLI:
35409  case X86ISD::VSRAI: {
35410    SDValue Src = Op.getOperand(0);
35411    APInt SrcUndef;
35412    if (SimplifyDemandedVectorElts(Src, DemandedElts, SrcUndef, KnownZero, TLO,
35413                                   Depth + 1))
35414      return true;
35415    // TODO convert SrcUndef to KnownUndef.
35416    break;
35417  }
35418  case X86ISD::KSHIFTL: {
35419    SDValue Src = Op.getOperand(0);
35420    auto *Amt = cast<ConstantSDNode>(Op.getOperand(1));
35421    assert(Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount");
35422    unsigned ShiftAmt = Amt->getZExtValue();
35423
35424    if (ShiftAmt == 0)
35425      return TLO.CombineTo(Op, Src);
35426
35427    // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
35428    // single shift.  We can do this if the bottom bits (which are shifted
35429    // out) are never demanded.
35430    if (Src.getOpcode() == X86ISD::KSHIFTR) {
35431      if (!DemandedElts.intersects(APInt::getLowBitsSet(NumElts, ShiftAmt))) {
35432        unsigned C1 = Src.getConstantOperandVal(1);
35433        unsigned NewOpc = X86ISD::KSHIFTL;
35434        int Diff = ShiftAmt - C1;
35435        if (Diff < 0) {
35436          Diff = -Diff;
35437          NewOpc = X86ISD::KSHIFTR;
35438        }
35439
35440        SDLoc dl(Op);
35441        SDValue NewSA = TLO.DAG.getTargetConstant(Diff, dl, MVT::i8);
35442        return TLO.CombineTo(
35443            Op, TLO.DAG.getNode(NewOpc, dl, VT, Src.getOperand(0), NewSA));
35444      }
35445    }
35446
35447    APInt DemandedSrc = DemandedElts.lshr(ShiftAmt);
35448    if (SimplifyDemandedVectorElts(Src, DemandedSrc, KnownUndef, KnownZero, TLO,
35449                                   Depth + 1))
35450      return true;
35451
35452    KnownUndef <<= ShiftAmt;
35453    KnownZero <<= ShiftAmt;
35454    KnownZero.setLowBits(ShiftAmt);
35455    break;
35456  }
35457  case X86ISD::KSHIFTR: {
35458    SDValue Src = Op.getOperand(0);
35459    auto *Amt = cast<ConstantSDNode>(Op.getOperand(1));
35460    assert(Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount");
35461    unsigned ShiftAmt = Amt->getZExtValue();
35462
35463    if (ShiftAmt == 0)
35464      return TLO.CombineTo(Op, Src);
35465
35466    // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a
35467    // single shift.  We can do this if the top bits (which are shifted
35468    // out) are never demanded.
35469    if (Src.getOpcode() == X86ISD::KSHIFTL) {
35470      if (!DemandedElts.intersects(APInt::getHighBitsSet(NumElts, ShiftAmt))) {
35471        unsigned C1 = Src.getConstantOperandVal(1);
35472        unsigned NewOpc = X86ISD::KSHIFTR;
35473        int Diff = ShiftAmt - C1;
35474        if (Diff < 0) {
35475          Diff = -Diff;
35476          NewOpc = X86ISD::KSHIFTL;
35477        }
35478
35479        SDLoc dl(Op);
35480        SDValue NewSA = TLO.DAG.getTargetConstant(Diff, dl, MVT::i8);
35481        return TLO.CombineTo(
35482            Op, TLO.DAG.getNode(NewOpc, dl, VT, Src.getOperand(0), NewSA));
35483      }
35484    }
35485
35486    APInt DemandedSrc = DemandedElts.shl(ShiftAmt);
35487    if (SimplifyDemandedVectorElts(Src, DemandedSrc, KnownUndef, KnownZero, TLO,
35488                                   Depth + 1))
35489      return true;
35490
35491    KnownUndef.lshrInPlace(ShiftAmt);
35492    KnownZero.lshrInPlace(ShiftAmt);
35493    KnownZero.setHighBits(ShiftAmt);
35494    break;
35495  }
35496  case X86ISD::CVTSI2P:
35497  case X86ISD::CVTUI2P: {
35498    SDValue Src = Op.getOperand(0);
35499    MVT SrcVT = Src.getSimpleValueType();
35500    APInt SrcUndef, SrcZero;
35501    APInt SrcElts = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
35502    if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
35503                                   Depth + 1))
35504      return true;
35505    break;
35506  }
35507  case X86ISD::PACKSS:
35508  case X86ISD::PACKUS: {
35509    SDValue N0 = Op.getOperand(0);
35510    SDValue N1 = Op.getOperand(1);
35511
35512    APInt DemandedLHS, DemandedRHS;
35513    getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
35514
35515    APInt SrcUndef, SrcZero;
35516    if (SimplifyDemandedVectorElts(N0, DemandedLHS, SrcUndef, SrcZero, TLO,
35517                                   Depth + 1))
35518      return true;
35519    if (SimplifyDemandedVectorElts(N1, DemandedRHS, SrcUndef, SrcZero, TLO,
35520                                   Depth + 1))
35521      return true;
35522
35523    // Aggressively peek through ops to get at the demanded elts.
35524    // TODO - we should do this for all target/faux shuffles ops.
35525    if (!DemandedElts.isAllOnesValue()) {
35526      APInt DemandedSrcBits =
35527          APInt::getAllOnesValue(N0.getScalarValueSizeInBits());
35528      SDValue NewN0 = SimplifyMultipleUseDemandedBits(
35529          N0, DemandedSrcBits, DemandedLHS, TLO.DAG, Depth + 1);
35530      SDValue NewN1 = SimplifyMultipleUseDemandedBits(
35531          N1, DemandedSrcBits, DemandedRHS, TLO.DAG, Depth + 1);
35532      if (NewN0 || NewN1) {
35533        NewN0 = NewN0 ? NewN0 : N0;
35534        NewN1 = NewN1 ? NewN1 : N1;
35535        return TLO.CombineTo(Op,
35536                             TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewN0, NewN1));
35537      }
35538    }
35539    break;
35540  }
35541  case X86ISD::HADD:
35542  case X86ISD::HSUB:
35543  case X86ISD::FHADD:
35544  case X86ISD::FHSUB: {
35545    APInt DemandedLHS, DemandedRHS;
35546    getHorizDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
35547
35548    APInt LHSUndef, LHSZero;
35549    if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedLHS, LHSUndef,
35550                                   LHSZero, TLO, Depth + 1))
35551      return true;
35552    APInt RHSUndef, RHSZero;
35553    if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedRHS, RHSUndef,
35554                                   RHSZero, TLO, Depth + 1))
35555      return true;
35556    break;
35557  }
35558  case X86ISD::VTRUNC:
35559  case X86ISD::VTRUNCS:
35560  case X86ISD::VTRUNCUS: {
35561    SDValue Src = Op.getOperand(0);
35562    MVT SrcVT = Src.getSimpleValueType();
35563    APInt DemandedSrc = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
35564    APInt SrcUndef, SrcZero;
35565    if (SimplifyDemandedVectorElts(Src, DemandedSrc, SrcUndef, SrcZero, TLO,
35566                                   Depth + 1))
35567      return true;
35568    KnownZero = SrcZero.zextOrTrunc(NumElts);
35569    KnownUndef = SrcUndef.zextOrTrunc(NumElts);
35570    break;
35571  }
35572  case X86ISD::BLENDV: {
35573    APInt SelUndef, SelZero;
35574    if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, SelUndef,
35575                                   SelZero, TLO, Depth + 1))
35576      return true;
35577
35578    // TODO: Use SelZero to adjust LHS/RHS DemandedElts.
35579    APInt LHSUndef, LHSZero;
35580    if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedElts, LHSUndef,
35581                                   LHSZero, TLO, Depth + 1))
35582      return true;
35583
35584    APInt RHSUndef, RHSZero;
35585    if (SimplifyDemandedVectorElts(Op.getOperand(2), DemandedElts, RHSUndef,
35586                                   RHSZero, TLO, Depth + 1))
35587      return true;
35588
35589    KnownZero = LHSZero & RHSZero;
35590    KnownUndef = LHSUndef & RHSUndef;
35591    break;
35592  }
35593  case X86ISD::VBROADCAST: {
35594    SDValue Src = Op.getOperand(0);
35595    MVT SrcVT = Src.getSimpleValueType();
35596    if (!SrcVT.isVector())
35597      return false;
35598    // Don't bother broadcasting if we just need the 0'th element.
35599    if (DemandedElts == 1) {
35600      if (Src.getValueType() != VT)
35601        Src = widenSubVector(VT.getSimpleVT(), Src, false, Subtarget, TLO.DAG,
35602                             SDLoc(Op));
35603      return TLO.CombineTo(Op, Src);
35604    }
35605    APInt SrcUndef, SrcZero;
35606    APInt SrcElts = APInt::getOneBitSet(SrcVT.getVectorNumElements(), 0);
35607    if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
35608                                   Depth + 1))
35609      return true;
35610    break;
35611  }
35612  case X86ISD::VPERMV: {
35613    SDValue Mask = Op.getOperand(0);
35614    APInt MaskUndef, MaskZero;
35615    if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
35616                                   Depth + 1))
35617      return true;
35618    break;
35619  }
35620  case X86ISD::PSHUFB:
35621  case X86ISD::VPERMV3:
35622  case X86ISD::VPERMILPV: {
35623    SDValue Mask = Op.getOperand(1);
35624    APInt MaskUndef, MaskZero;
35625    if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
35626                                   Depth + 1))
35627      return true;
35628    break;
35629  }
35630  case X86ISD::VPPERM:
35631  case X86ISD::VPERMIL2: {
35632    SDValue Mask = Op.getOperand(2);
35633    APInt MaskUndef, MaskZero;
35634    if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
35635                                   Depth + 1))
35636      return true;
35637    break;
35638  }
35639  }
35640
35641  // For 256/512-bit ops that are 128/256-bit ops glued together, if we do not
35642  // demand any of the high elements, then narrow the op to 128/256-bits: e.g.
35643  // (op ymm0, ymm1) --> insert undef, (op xmm0, xmm1), 0
35644  if ((VT.is256BitVector() || VT.is512BitVector()) &&
35645      DemandedElts.lshr(NumElts / 2) == 0) {
35646    unsigned SizeInBits = VT.getSizeInBits();
35647    unsigned ExtSizeInBits = SizeInBits / 2;
35648
35649    // See if 512-bit ops only use the bottom 128-bits.
35650    if (VT.is512BitVector() && DemandedElts.lshr(NumElts / 4) == 0)
35651      ExtSizeInBits = SizeInBits / 4;
35652
35653    switch (Opc) {
35654      // Zero upper elements.
35655    case X86ISD::VZEXT_MOVL: {
35656      SDLoc DL(Op);
35657      SDValue Ext0 =
35658          extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
35659      SDValue ExtOp =
35660          TLO.DAG.getNode(Opc, DL, Ext0.getValueType(), Ext0);
35661      SDValue UndefVec = TLO.DAG.getUNDEF(VT);
35662      SDValue Insert =
35663          insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
35664      return TLO.CombineTo(Op, Insert);
35665    }
35666      // Subvector broadcast.
35667    case X86ISD::SUBV_BROADCAST: {
35668      SDLoc DL(Op);
35669      SDValue Src = Op.getOperand(0);
35670      if (Src.getValueSizeInBits() > ExtSizeInBits)
35671        Src = extractSubVector(Src, 0, TLO.DAG, DL, ExtSizeInBits);
35672      else if (Src.getValueSizeInBits() < ExtSizeInBits) {
35673        MVT SrcSVT = Src.getSimpleValueType().getScalarType();
35674        MVT SrcVT =
35675            MVT::getVectorVT(SrcSVT, ExtSizeInBits / SrcSVT.getSizeInBits());
35676        Src = TLO.DAG.getNode(X86ISD::SUBV_BROADCAST, DL, SrcVT, Src);
35677      }
35678      return TLO.CombineTo(Op, insertSubVector(TLO.DAG.getUNDEF(VT), Src, 0,
35679                                               TLO.DAG, DL, ExtSizeInBits));
35680    }
35681      // Byte shifts by immediate.
35682    case X86ISD::VSHLDQ:
35683    case X86ISD::VSRLDQ:
35684      // Shift by uniform.
35685    case X86ISD::VSHL:
35686    case X86ISD::VSRL:
35687    case X86ISD::VSRA:
35688      // Shift by immediate.
35689    case X86ISD::VSHLI:
35690    case X86ISD::VSRLI:
35691    case X86ISD::VSRAI: {
35692      SDLoc DL(Op);
35693      SDValue Ext0 =
35694          extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
35695      SDValue ExtOp =
35696          TLO.DAG.getNode(Opc, DL, Ext0.getValueType(), Ext0, Op.getOperand(1));
35697      SDValue UndefVec = TLO.DAG.getUNDEF(VT);
35698      SDValue Insert =
35699          insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
35700      return TLO.CombineTo(Op, Insert);
35701    }
35702    case X86ISD::VPERMI: {
35703      // Simplify PERMPD/PERMQ to extract_subvector.
35704      // TODO: This should be done in shuffle combining.
35705      if (VT == MVT::v4f64 || VT == MVT::v4i64) {
35706        SmallVector<int, 4> Mask;
35707        DecodeVPERMMask(NumElts, Op.getConstantOperandVal(1), Mask);
35708        if (isUndefOrEqual(Mask[0], 2) && isUndefOrEqual(Mask[1], 3)) {
35709          SDLoc DL(Op);
35710          SDValue Ext = extractSubVector(Op.getOperand(0), 2, TLO.DAG, DL, 128);
35711          SDValue UndefVec = TLO.DAG.getUNDEF(VT);
35712          SDValue Insert = insertSubVector(UndefVec, Ext, 0, TLO.DAG, DL, 128);
35713          return TLO.CombineTo(Op, Insert);
35714        }
35715      }
35716      break;
35717    }
35718      // Target Shuffles.
35719    case X86ISD::PSHUFB:
35720    case X86ISD::UNPCKL:
35721    case X86ISD::UNPCKH:
35722      // Saturated Packs.
35723    case X86ISD::PACKSS:
35724    case X86ISD::PACKUS:
35725      // Horizontal Ops.
35726    case X86ISD::HADD:
35727    case X86ISD::HSUB:
35728    case X86ISD::FHADD:
35729    case X86ISD::FHSUB: {
35730      SDLoc DL(Op);
35731      MVT ExtVT = VT.getSimpleVT();
35732      ExtVT = MVT::getVectorVT(ExtVT.getScalarType(),
35733                               ExtSizeInBits / ExtVT.getScalarSizeInBits());
35734      SDValue Ext0 =
35735          extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
35736      SDValue Ext1 =
35737          extractSubVector(Op.getOperand(1), 0, TLO.DAG, DL, ExtSizeInBits);
35738      SDValue ExtOp = TLO.DAG.getNode(Opc, DL, ExtVT, Ext0, Ext1);
35739      SDValue UndefVec = TLO.DAG.getUNDEF(VT);
35740      SDValue Insert =
35741          insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
35742      return TLO.CombineTo(Op, Insert);
35743    }
35744    }
35745  }
35746
35747  // Get target/faux shuffle mask.
35748  APInt OpUndef, OpZero;
35749  SmallVector<int, 64> OpMask;
35750  SmallVector<SDValue, 2> OpInputs;
35751  if (!getTargetShuffleInputs(Op, DemandedElts, OpInputs, OpMask, OpUndef,
35752                              OpZero, TLO.DAG, Depth, false))
35753    return false;
35754
35755  // Shuffle inputs must be the same size as the result.
35756  if (OpMask.size() != (unsigned)NumElts ||
35757      llvm::any_of(OpInputs, [VT](SDValue V) {
35758        return VT.getSizeInBits() != V.getValueSizeInBits() ||
35759               !V.getValueType().isVector();
35760      }))
35761    return false;
35762
35763  KnownZero = OpZero;
35764  KnownUndef = OpUndef;
35765
35766  // Check if shuffle mask can be simplified to undef/zero/identity.
35767  int NumSrcs = OpInputs.size();
35768  for (int i = 0; i != NumElts; ++i)
35769    if (!DemandedElts[i])
35770      OpMask[i] = SM_SentinelUndef;
35771
35772  if (isUndefInRange(OpMask, 0, NumElts)) {
35773    KnownUndef.setAllBits();
35774    return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
35775  }
35776  if (isUndefOrZeroInRange(OpMask, 0, NumElts)) {
35777    KnownZero.setAllBits();
35778    return TLO.CombineTo(
35779        Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op)));
35780  }
35781  for (int Src = 0; Src != NumSrcs; ++Src)
35782    if (isSequentialOrUndefInRange(OpMask, 0, NumElts, Src * NumElts))
35783      return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, OpInputs[Src]));
35784
35785  // Attempt to simplify inputs.
35786  for (int Src = 0; Src != NumSrcs; ++Src) {
35787    // TODO: Support inputs of different types.
35788    if (OpInputs[Src].getValueType() != VT)
35789      continue;
35790
35791    int Lo = Src * NumElts;
35792    APInt SrcElts = APInt::getNullValue(NumElts);
35793    for (int i = 0; i != NumElts; ++i)
35794      if (DemandedElts[i]) {
35795        int M = OpMask[i] - Lo;
35796        if (0 <= M && M < NumElts)
35797          SrcElts.setBit(M);
35798      }
35799
35800    // TODO - Propagate input undef/zero elts.
35801    APInt SrcUndef, SrcZero;
35802    if (SimplifyDemandedVectorElts(OpInputs[Src], SrcElts, SrcUndef, SrcZero,
35803                                   TLO, Depth + 1))
35804      return true;
35805  }
35806
35807  // If we don't demand all elements, then attempt to combine to a simpler
35808  // shuffle.
35809  // TODO: Handle other depths, but first we need to handle the fact that
35810  // it might combine to the same shuffle.
35811  if (!DemandedElts.isAllOnesValue() && Depth == 0) {
35812    SmallVector<int, 64> DemandedMask(NumElts, SM_SentinelUndef);
35813    for (int i = 0; i != NumElts; ++i)
35814      if (DemandedElts[i])
35815        DemandedMask[i] = i;
35816
35817    SDValue NewShuffle = combineX86ShufflesRecursively(
35818        {Op}, 0, Op, DemandedMask, {}, Depth, /*HasVarMask*/ false,
35819        /*AllowVarMask*/ true, TLO.DAG, Subtarget);
35820    if (NewShuffle)
35821      return TLO.CombineTo(Op, NewShuffle);
35822  }
35823
35824  return false;
35825}
35826
35827bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
35828    SDValue Op, const APInt &OriginalDemandedBits,
35829    const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
35830    unsigned Depth) const {
35831  EVT VT = Op.getValueType();
35832  unsigned BitWidth = OriginalDemandedBits.getBitWidth();
35833  unsigned Opc = Op.getOpcode();
35834  switch(Opc) {
35835  case X86ISD::PMULDQ:
35836  case X86ISD::PMULUDQ: {
35837    // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
35838    KnownBits KnownOp;
35839    SDValue LHS = Op.getOperand(0);
35840    SDValue RHS = Op.getOperand(1);
35841    // FIXME: Can we bound this better?
35842    APInt DemandedMask = APInt::getLowBitsSet(64, 32);
35843    if (SimplifyDemandedBits(LHS, DemandedMask, OriginalDemandedElts, KnownOp,
35844                             TLO, Depth + 1))
35845      return true;
35846    if (SimplifyDemandedBits(RHS, DemandedMask, OriginalDemandedElts, KnownOp,
35847                             TLO, Depth + 1))
35848      return true;
35849
35850    // Aggressively peek through ops to get at the demanded low bits.
35851    SDValue DemandedLHS = SimplifyMultipleUseDemandedBits(
35852        LHS, DemandedMask, OriginalDemandedElts, TLO.DAG, Depth + 1);
35853    SDValue DemandedRHS = SimplifyMultipleUseDemandedBits(
35854        RHS, DemandedMask, OriginalDemandedElts, TLO.DAG, Depth + 1);
35855    if (DemandedLHS || DemandedRHS) {
35856      DemandedLHS = DemandedLHS ? DemandedLHS : LHS;
35857      DemandedRHS = DemandedRHS ? DemandedRHS : RHS;
35858      return TLO.CombineTo(
35859          Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, DemandedLHS, DemandedRHS));
35860    }
35861    break;
35862  }
35863  case X86ISD::VSHLI: {
35864    SDValue Op0 = Op.getOperand(0);
35865
35866    unsigned ShAmt = Op.getConstantOperandVal(1);
35867    if (ShAmt >= BitWidth)
35868      break;
35869
35870    APInt DemandedMask = OriginalDemandedBits.lshr(ShAmt);
35871
35872    // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
35873    // single shift.  We can do this if the bottom bits (which are shifted
35874    // out) are never demanded.
35875    if (Op0.getOpcode() == X86ISD::VSRLI &&
35876        OriginalDemandedBits.countTrailingZeros() >= ShAmt) {
35877      unsigned Shift2Amt = Op0.getConstantOperandVal(1);
35878      if (Shift2Amt < BitWidth) {
35879        int Diff = ShAmt - Shift2Amt;
35880        if (Diff == 0)
35881          return TLO.CombineTo(Op, Op0.getOperand(0));
35882
35883        unsigned NewOpc = Diff < 0 ? X86ISD::VSRLI : X86ISD::VSHLI;
35884        SDValue NewShift = TLO.DAG.getNode(
35885            NewOpc, SDLoc(Op), VT, Op0.getOperand(0),
35886            TLO.DAG.getTargetConstant(std::abs(Diff), SDLoc(Op), MVT::i8));
35887        return TLO.CombineTo(Op, NewShift);
35888      }
35889    }
35890
35891    if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
35892                             TLO, Depth + 1))
35893      return true;
35894
35895    assert(!Known.hasConflict() && "Bits known to be one AND zero?");
35896    Known.Zero <<= ShAmt;
35897    Known.One <<= ShAmt;
35898
35899    // Low bits known zero.
35900    Known.Zero.setLowBits(ShAmt);
35901    break;
35902  }
35903  case X86ISD::VSRLI: {
35904    unsigned ShAmt = Op.getConstantOperandVal(1);
35905    if (ShAmt >= BitWidth)
35906      break;
35907
35908    APInt DemandedMask = OriginalDemandedBits << ShAmt;
35909
35910    if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask,
35911                             OriginalDemandedElts, Known, TLO, Depth + 1))
35912      return true;
35913
35914    assert(!Known.hasConflict() && "Bits known to be one AND zero?");
35915    Known.Zero.lshrInPlace(ShAmt);
35916    Known.One.lshrInPlace(ShAmt);
35917
35918    // High bits known zero.
35919    Known.Zero.setHighBits(ShAmt);
35920    break;
35921  }
35922  case X86ISD::VSRAI: {
35923    SDValue Op0 = Op.getOperand(0);
35924    SDValue Op1 = Op.getOperand(1);
35925
35926    unsigned ShAmt = cast<ConstantSDNode>(Op1)->getZExtValue();
35927    if (ShAmt >= BitWidth)
35928      break;
35929
35930    APInt DemandedMask = OriginalDemandedBits << ShAmt;
35931
35932    // If we just want the sign bit then we don't need to shift it.
35933    if (OriginalDemandedBits.isSignMask())
35934      return TLO.CombineTo(Op, Op0);
35935
35936    // fold (VSRAI (VSHLI X, C1), C1) --> X iff NumSignBits(X) > C1
35937    if (Op0.getOpcode() == X86ISD::VSHLI &&
35938        Op.getOperand(1) == Op0.getOperand(1)) {
35939      SDValue Op00 = Op0.getOperand(0);
35940      unsigned NumSignBits =
35941          TLO.DAG.ComputeNumSignBits(Op00, OriginalDemandedElts);
35942      if (ShAmt < NumSignBits)
35943        return TLO.CombineTo(Op, Op00);
35944    }
35945
35946    // If any of the demanded bits are produced by the sign extension, we also
35947    // demand the input sign bit.
35948    if (OriginalDemandedBits.countLeadingZeros() < ShAmt)
35949      DemandedMask.setSignBit();
35950
35951    if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
35952                             TLO, Depth + 1))
35953      return true;
35954
35955    assert(!Known.hasConflict() && "Bits known to be one AND zero?");
35956    Known.Zero.lshrInPlace(ShAmt);
35957    Known.One.lshrInPlace(ShAmt);
35958
35959    // If the input sign bit is known to be zero, or if none of the top bits
35960    // are demanded, turn this into an unsigned shift right.
35961    if (Known.Zero[BitWidth - ShAmt - 1] ||
35962        OriginalDemandedBits.countLeadingZeros() >= ShAmt)
35963      return TLO.CombineTo(
35964          Op, TLO.DAG.getNode(X86ISD::VSRLI, SDLoc(Op), VT, Op0, Op1));
35965
35966    // High bits are known one.
35967    if (Known.One[BitWidth - ShAmt - 1])
35968      Known.One.setHighBits(ShAmt);
35969    break;
35970  }
35971  case X86ISD::PEXTRB:
35972  case X86ISD::PEXTRW: {
35973    SDValue Vec = Op.getOperand(0);
35974    auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
35975    MVT VecVT = Vec.getSimpleValueType();
35976    unsigned NumVecElts = VecVT.getVectorNumElements();
35977
35978    if (CIdx && CIdx->getAPIntValue().ult(NumVecElts)) {
35979      unsigned Idx = CIdx->getZExtValue();
35980      unsigned VecBitWidth = VecVT.getScalarSizeInBits();
35981
35982      // If we demand no bits from the vector then we must have demanded
35983      // bits from the implict zext - simplify to zero.
35984      APInt DemandedVecBits = OriginalDemandedBits.trunc(VecBitWidth);
35985      if (DemandedVecBits == 0)
35986        return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
35987
35988      APInt KnownUndef, KnownZero;
35989      APInt DemandedVecElts = APInt::getOneBitSet(NumVecElts, Idx);
35990      if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef,
35991                                     KnownZero, TLO, Depth + 1))
35992        return true;
35993
35994      KnownBits KnownVec;
35995      if (SimplifyDemandedBits(Vec, DemandedVecBits, DemandedVecElts,
35996                               KnownVec, TLO, Depth + 1))
35997        return true;
35998
35999      if (SDValue V = SimplifyMultipleUseDemandedBits(
36000              Vec, DemandedVecBits, DemandedVecElts, TLO.DAG, Depth + 1))
36001        return TLO.CombineTo(
36002            Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, V, Op.getOperand(1)));
36003
36004      Known = KnownVec.zext(BitWidth, true);
36005      return false;
36006    }
36007    break;
36008  }
36009  case X86ISD::PINSRB:
36010  case X86ISD::PINSRW: {
36011    SDValue Vec = Op.getOperand(0);
36012    SDValue Scl = Op.getOperand(1);
36013    auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
36014    MVT VecVT = Vec.getSimpleValueType();
36015
36016    if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements())) {
36017      unsigned Idx = CIdx->getZExtValue();
36018      if (!OriginalDemandedElts[Idx])
36019        return TLO.CombineTo(Op, Vec);
36020
36021      KnownBits KnownVec;
36022      APInt DemandedVecElts(OriginalDemandedElts);
36023      DemandedVecElts.clearBit(Idx);
36024      if (SimplifyDemandedBits(Vec, OriginalDemandedBits, DemandedVecElts,
36025                               KnownVec, TLO, Depth + 1))
36026        return true;
36027
36028      KnownBits KnownScl;
36029      unsigned NumSclBits = Scl.getScalarValueSizeInBits();
36030      APInt DemandedSclBits = OriginalDemandedBits.zext(NumSclBits);
36031      if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1))
36032        return true;
36033
36034      KnownScl = KnownScl.trunc(VecVT.getScalarSizeInBits());
36035      Known.One = KnownVec.One & KnownScl.One;
36036      Known.Zero = KnownVec.Zero & KnownScl.Zero;
36037      return false;
36038    }
36039    break;
36040  }
36041  case X86ISD::PACKSS:
36042    // PACKSS saturates to MIN/MAX integer values. So if we just want the
36043    // sign bit then we can just ask for the source operands sign bit.
36044    // TODO - add known bits handling.
36045    if (OriginalDemandedBits.isSignMask()) {
36046      APInt DemandedLHS, DemandedRHS;
36047      getPackDemandedElts(VT, OriginalDemandedElts, DemandedLHS, DemandedRHS);
36048
36049      KnownBits KnownLHS, KnownRHS;
36050      APInt SignMask = APInt::getSignMask(BitWidth * 2);
36051      if (SimplifyDemandedBits(Op.getOperand(0), SignMask, DemandedLHS,
36052                               KnownLHS, TLO, Depth + 1))
36053        return true;
36054      if (SimplifyDemandedBits(Op.getOperand(1), SignMask, DemandedRHS,
36055                               KnownRHS, TLO, Depth + 1))
36056        return true;
36057    }
36058    // TODO - add general PACKSS/PACKUS SimplifyDemandedBits support.
36059    break;
36060  case X86ISD::PCMPGT:
36061    // icmp sgt(0, R) == ashr(R, BitWidth-1).
36062    // iff we only need the sign bit then we can use R directly.
36063    if (OriginalDemandedBits.isSignMask() &&
36064        ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()))
36065      return TLO.CombineTo(Op, Op.getOperand(1));
36066    break;
36067  case X86ISD::MOVMSK: {
36068    SDValue Src = Op.getOperand(0);
36069    MVT SrcVT = Src.getSimpleValueType();
36070    unsigned SrcBits = SrcVT.getScalarSizeInBits();
36071    unsigned NumElts = SrcVT.getVectorNumElements();
36072
36073    // If we don't need the sign bits at all just return zero.
36074    if (OriginalDemandedBits.countTrailingZeros() >= NumElts)
36075      return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
36076
36077    // Only demand the vector elements of the sign bits we need.
36078    APInt KnownUndef, KnownZero;
36079    APInt DemandedElts = OriginalDemandedBits.zextOrTrunc(NumElts);
36080    if (SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef, KnownZero,
36081                                   TLO, Depth + 1))
36082      return true;
36083
36084    Known.Zero = KnownZero.zextOrSelf(BitWidth);
36085    Known.Zero.setHighBits(BitWidth - NumElts);
36086
36087    // MOVMSK only uses the MSB from each vector element.
36088    KnownBits KnownSrc;
36089    if (SimplifyDemandedBits(Src, APInt::getSignMask(SrcBits), DemandedElts,
36090                             KnownSrc, TLO, Depth + 1))
36091      return true;
36092
36093    if (KnownSrc.One[SrcBits - 1])
36094      Known.One.setLowBits(NumElts);
36095    else if (KnownSrc.Zero[SrcBits - 1])
36096      Known.Zero.setLowBits(NumElts);
36097    return false;
36098  }
36099  }
36100
36101  return TargetLowering::SimplifyDemandedBitsForTargetNode(
36102      Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth);
36103}
36104
36105SDValue X86TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
36106    SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
36107    SelectionDAG &DAG, unsigned Depth) const {
36108  int NumElts = DemandedElts.getBitWidth();
36109  unsigned Opc = Op.getOpcode();
36110  EVT VT = Op.getValueType();
36111
36112  switch (Opc) {
36113  case X86ISD::PINSRB:
36114  case X86ISD::PINSRW: {
36115    // If we don't demand the inserted element, return the base vector.
36116    SDValue Vec = Op.getOperand(0);
36117    auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
36118    MVT VecVT = Vec.getSimpleValueType();
36119    if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements()) &&
36120        !DemandedElts[CIdx->getZExtValue()])
36121      return Vec;
36122     break;
36123  }
36124  case X86ISD::PCMPGT:
36125    // icmp sgt(0, R) == ashr(R, BitWidth-1).
36126    // iff we only need the sign bit then we can use R directly.
36127    if (DemandedBits.isSignMask() &&
36128        ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()))
36129      return Op.getOperand(1);
36130    break;
36131  }
36132
36133  APInt ShuffleUndef, ShuffleZero;
36134  SmallVector<int, 16> ShuffleMask;
36135  SmallVector<SDValue, 2> ShuffleOps;
36136  if (getTargetShuffleInputs(Op, DemandedElts, ShuffleOps, ShuffleMask,
36137                             ShuffleUndef, ShuffleZero, DAG, Depth, false)) {
36138    // If all the demanded elts are from one operand and are inline,
36139    // then we can use the operand directly.
36140    int NumOps = ShuffleOps.size();
36141    if (ShuffleMask.size() == (unsigned)NumElts &&
36142        llvm::all_of(ShuffleOps, [VT](SDValue V) {
36143          return VT.getSizeInBits() == V.getValueSizeInBits();
36144        })) {
36145
36146      if (DemandedElts.isSubsetOf(ShuffleUndef))
36147        return DAG.getUNDEF(VT);
36148      if (DemandedElts.isSubsetOf(ShuffleUndef | ShuffleZero))
36149        return getZeroVector(VT.getSimpleVT(), Subtarget, DAG, SDLoc(Op));
36150
36151      // Bitmask that indicates which ops have only been accessed 'inline'.
36152      APInt IdentityOp = APInt::getAllOnesValue(NumOps);
36153      for (int i = 0; i != NumElts; ++i) {
36154        int M = ShuffleMask[i];
36155        if (!DemandedElts[i] || ShuffleUndef[i])
36156          continue;
36157        int Op = M / NumElts;
36158        int Index = M % NumElts;
36159        if (M < 0 || Index != i) {
36160          IdentityOp.clearAllBits();
36161          break;
36162        }
36163        IdentityOp &= APInt::getOneBitSet(NumOps, Op);
36164        if (IdentityOp == 0)
36165          break;
36166      }
36167      assert((IdentityOp == 0 || IdentityOp.countPopulation() == 1) &&
36168             "Multiple identity shuffles detected");
36169
36170      if (IdentityOp != 0)
36171        return DAG.getBitcast(VT, ShuffleOps[IdentityOp.countTrailingZeros()]);
36172    }
36173  }
36174
36175  return TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
36176      Op, DemandedBits, DemandedElts, DAG, Depth);
36177}
36178
36179// Helper to peek through bitops/setcc to determine size of source vector.
36180// Allows combineBitcastvxi1 to determine what size vector generated a <X x i1>.
36181static bool checkBitcastSrcVectorSize(SDValue Src, unsigned Size) {
36182  switch (Src.getOpcode()) {
36183  case ISD::SETCC:
36184    return Src.getOperand(0).getValueSizeInBits() == Size;
36185  case ISD::AND:
36186  case ISD::XOR:
36187  case ISD::OR:
36188    return checkBitcastSrcVectorSize(Src.getOperand(0), Size) &&
36189           checkBitcastSrcVectorSize(Src.getOperand(1), Size);
36190  }
36191  return false;
36192}
36193
36194// Helper to push sign extension of vXi1 SETCC result through bitops.
36195static SDValue signExtendBitcastSrcVector(SelectionDAG &DAG, EVT SExtVT,
36196                                          SDValue Src, const SDLoc &DL) {
36197  switch (Src.getOpcode()) {
36198  case ISD::SETCC:
36199    return DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
36200  case ISD::AND:
36201  case ISD::XOR:
36202  case ISD::OR:
36203    return DAG.getNode(
36204        Src.getOpcode(), DL, SExtVT,
36205        signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(0), DL),
36206        signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(1), DL));
36207  }
36208  llvm_unreachable("Unexpected node type for vXi1 sign extension");
36209}
36210
36211// Try to match patterns such as
36212// (i16 bitcast (v16i1 x))
36213// ->
36214// (i16 movmsk (16i8 sext (v16i1 x)))
36215// before the illegal vector is scalarized on subtargets that don't have legal
36216// vxi1 types.
36217static SDValue combineBitcastvxi1(SelectionDAG &DAG, EVT VT, SDValue Src,
36218                                  const SDLoc &DL,
36219                                  const X86Subtarget &Subtarget) {
36220  EVT SrcVT = Src.getValueType();
36221  if (!SrcVT.isSimple() || SrcVT.getScalarType() != MVT::i1)
36222    return SDValue();
36223
36224  // If the input is a truncate from v16i8 or v32i8 go ahead and use a
36225  // movmskb even with avx512. This will be better than truncating to vXi1 and
36226  // using a kmov. This can especially help KNL if the input is a v16i8/v32i8
36227  // vpcmpeqb/vpcmpgtb.
36228  bool IsTruncated = Src.getOpcode() == ISD::TRUNCATE && Src.hasOneUse() &&
36229                     (Src.getOperand(0).getValueType() == MVT::v16i8 ||
36230                      Src.getOperand(0).getValueType() == MVT::v32i8 ||
36231                      Src.getOperand(0).getValueType() == MVT::v64i8);
36232
36233  // With AVX512 vxi1 types are legal and we prefer using k-regs.
36234  // MOVMSK is supported in SSE2 or later.
36235  if (!Subtarget.hasSSE2() || (Subtarget.hasAVX512() && !IsTruncated))
36236    return SDValue();
36237
36238  // There are MOVMSK flavors for types v16i8, v32i8, v4f32, v8f32, v4f64 and
36239  // v8f64. So all legal 128-bit and 256-bit vectors are covered except for
36240  // v8i16 and v16i16.
36241  // For these two cases, we can shuffle the upper element bytes to a
36242  // consecutive sequence at the start of the vector and treat the results as
36243  // v16i8 or v32i8, and for v16i8 this is the preferable solution. However,
36244  // for v16i16 this is not the case, because the shuffle is expensive, so we
36245  // avoid sign-extending to this type entirely.
36246  // For example, t0 := (v8i16 sext(v8i1 x)) needs to be shuffled as:
36247  // (v16i8 shuffle <0,2,4,6,8,10,12,14,u,u,...,u> (v16i8 bitcast t0), undef)
36248  MVT SExtVT;
36249  bool PropagateSExt = false;
36250  switch (SrcVT.getSimpleVT().SimpleTy) {
36251  default:
36252    return SDValue();
36253  case MVT::v2i1:
36254    SExtVT = MVT::v2i64;
36255    break;
36256  case MVT::v4i1:
36257    SExtVT = MVT::v4i32;
36258    // For cases such as (i4 bitcast (v4i1 setcc v4i64 v1, v2))
36259    // sign-extend to a 256-bit operation to avoid truncation.
36260    if (Subtarget.hasAVX() && checkBitcastSrcVectorSize(Src, 256)) {
36261      SExtVT = MVT::v4i64;
36262      PropagateSExt = true;
36263    }
36264    break;
36265  case MVT::v8i1:
36266    SExtVT = MVT::v8i16;
36267    // For cases such as (i8 bitcast (v8i1 setcc v8i32 v1, v2)),
36268    // sign-extend to a 256-bit operation to match the compare.
36269    // If the setcc operand is 128-bit, prefer sign-extending to 128-bit over
36270    // 256-bit because the shuffle is cheaper than sign extending the result of
36271    // the compare.
36272    if (Subtarget.hasAVX() && (checkBitcastSrcVectorSize(Src, 256) ||
36273                               checkBitcastSrcVectorSize(Src, 512))) {
36274      SExtVT = MVT::v8i32;
36275      PropagateSExt = true;
36276    }
36277    break;
36278  case MVT::v16i1:
36279    SExtVT = MVT::v16i8;
36280    // For the case (i16 bitcast (v16i1 setcc v16i16 v1, v2)),
36281    // it is not profitable to sign-extend to 256-bit because this will
36282    // require an extra cross-lane shuffle which is more expensive than
36283    // truncating the result of the compare to 128-bits.
36284    break;
36285  case MVT::v32i1:
36286    SExtVT = MVT::v32i8;
36287    break;
36288  case MVT::v64i1:
36289    // If we have AVX512F, but not AVX512BW and the input is truncated from
36290    // v64i8 checked earlier. Then split the input and make two pmovmskbs.
36291    if (Subtarget.hasAVX512() && !Subtarget.hasBWI()) {
36292      SExtVT = MVT::v64i8;
36293      break;
36294    }
36295    return SDValue();
36296  };
36297
36298  SDValue V = PropagateSExt ? signExtendBitcastSrcVector(DAG, SExtVT, Src, DL)
36299                            : DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
36300
36301  if (SExtVT == MVT::v16i8 || SExtVT == MVT::v32i8 || SExtVT == MVT::v64i8) {
36302    V = getPMOVMSKB(DL, V, DAG, Subtarget);
36303  } else {
36304    if (SExtVT == MVT::v8i16)
36305      V = DAG.getNode(X86ISD::PACKSS, DL, MVT::v16i8, V,
36306                      DAG.getUNDEF(MVT::v8i16));
36307    V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
36308  }
36309
36310  EVT IntVT =
36311      EVT::getIntegerVT(*DAG.getContext(), SrcVT.getVectorNumElements());
36312  V = DAG.getZExtOrTrunc(V, DL, IntVT);
36313  return DAG.getBitcast(VT, V);
36314}
36315
36316// Convert a vXi1 constant build vector to the same width scalar integer.
36317static SDValue combinevXi1ConstantToInteger(SDValue Op, SelectionDAG &DAG) {
36318  EVT SrcVT = Op.getValueType();
36319  assert(SrcVT.getVectorElementType() == MVT::i1 &&
36320         "Expected a vXi1 vector");
36321  assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
36322         "Expected a constant build vector");
36323
36324  APInt Imm(SrcVT.getVectorNumElements(), 0);
36325  for (unsigned Idx = 0, e = Op.getNumOperands(); Idx < e; ++Idx) {
36326    SDValue In = Op.getOperand(Idx);
36327    if (!In.isUndef() && (cast<ConstantSDNode>(In)->getZExtValue() & 0x1))
36328      Imm.setBit(Idx);
36329  }
36330  EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), Imm.getBitWidth());
36331  return DAG.getConstant(Imm, SDLoc(Op), IntVT);
36332}
36333
36334static SDValue combineCastedMaskArithmetic(SDNode *N, SelectionDAG &DAG,
36335                                           TargetLowering::DAGCombinerInfo &DCI,
36336                                           const X86Subtarget &Subtarget) {
36337  assert(N->getOpcode() == ISD::BITCAST && "Expected a bitcast");
36338
36339  if (!DCI.isBeforeLegalizeOps())
36340    return SDValue();
36341
36342  // Only do this if we have k-registers.
36343  if (!Subtarget.hasAVX512())
36344    return SDValue();
36345
36346  EVT DstVT = N->getValueType(0);
36347  SDValue Op = N->getOperand(0);
36348  EVT SrcVT = Op.getValueType();
36349
36350  if (!Op.hasOneUse())
36351    return SDValue();
36352
36353  // Look for logic ops.
36354  if (Op.getOpcode() != ISD::AND &&
36355      Op.getOpcode() != ISD::OR &&
36356      Op.getOpcode() != ISD::XOR)
36357    return SDValue();
36358
36359  // Make sure we have a bitcast between mask registers and a scalar type.
36360  if (!(SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
36361        DstVT.isScalarInteger()) &&
36362      !(DstVT.isVector() && DstVT.getVectorElementType() == MVT::i1 &&
36363        SrcVT.isScalarInteger()))
36364    return SDValue();
36365
36366  SDValue LHS = Op.getOperand(0);
36367  SDValue RHS = Op.getOperand(1);
36368
36369  if (LHS.hasOneUse() && LHS.getOpcode() == ISD::BITCAST &&
36370      LHS.getOperand(0).getValueType() == DstVT)
36371    return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT, LHS.getOperand(0),
36372                       DAG.getBitcast(DstVT, RHS));
36373
36374  if (RHS.hasOneUse() && RHS.getOpcode() == ISD::BITCAST &&
36375      RHS.getOperand(0).getValueType() == DstVT)
36376    return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
36377                       DAG.getBitcast(DstVT, LHS), RHS.getOperand(0));
36378
36379  // If the RHS is a vXi1 build vector, this is a good reason to flip too.
36380  // Most of these have to move a constant from the scalar domain anyway.
36381  if (ISD::isBuildVectorOfConstantSDNodes(RHS.getNode())) {
36382    RHS = combinevXi1ConstantToInteger(RHS, DAG);
36383    return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
36384                       DAG.getBitcast(DstVT, LHS), RHS);
36385  }
36386
36387  return SDValue();
36388}
36389
36390static SDValue createMMXBuildVector(BuildVectorSDNode *BV, SelectionDAG &DAG,
36391                                    const X86Subtarget &Subtarget) {
36392  SDLoc DL(BV);
36393  unsigned NumElts = BV->getNumOperands();
36394  SDValue Splat = BV->getSplatValue();
36395
36396  // Build MMX element from integer GPR or SSE float values.
36397  auto CreateMMXElement = [&](SDValue V) {
36398    if (V.isUndef())
36399      return DAG.getUNDEF(MVT::x86mmx);
36400    if (V.getValueType().isFloatingPoint()) {
36401      if (Subtarget.hasSSE1() && !isa<ConstantFPSDNode>(V)) {
36402        V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4f32, V);
36403        V = DAG.getBitcast(MVT::v2i64, V);
36404        return DAG.getNode(X86ISD::MOVDQ2Q, DL, MVT::x86mmx, V);
36405      }
36406      V = DAG.getBitcast(MVT::i32, V);
36407    } else {
36408      V = DAG.getAnyExtOrTrunc(V, DL, MVT::i32);
36409    }
36410    return DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, V);
36411  };
36412
36413  // Convert build vector ops to MMX data in the bottom elements.
36414  SmallVector<SDValue, 8> Ops;
36415
36416  // Broadcast - use (PUNPCKL+)PSHUFW to broadcast single element.
36417  if (Splat) {
36418    if (Splat.isUndef())
36419      return DAG.getUNDEF(MVT::x86mmx);
36420
36421    Splat = CreateMMXElement(Splat);
36422
36423    if (Subtarget.hasSSE1()) {
36424      // Unpack v8i8 to splat i8 elements to lowest 16-bits.
36425      if (NumElts == 8)
36426        Splat = DAG.getNode(
36427            ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
36428            DAG.getConstant(Intrinsic::x86_mmx_punpcklbw, DL, MVT::i32), Splat,
36429            Splat);
36430
36431      // Use PSHUFW to repeat 16-bit elements.
36432      unsigned ShufMask = (NumElts > 2 ? 0 : 0x44);
36433      return DAG.getNode(
36434          ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
36435          DAG.getTargetConstant(Intrinsic::x86_sse_pshuf_w, DL, MVT::i32),
36436          Splat, DAG.getTargetConstant(ShufMask, DL, MVT::i8));
36437    }
36438    Ops.append(NumElts, Splat);
36439  } else {
36440    for (unsigned i = 0; i != NumElts; ++i)
36441      Ops.push_back(CreateMMXElement(BV->getOperand(i)));
36442  }
36443
36444  // Use tree of PUNPCKLs to build up general MMX vector.
36445  while (Ops.size() > 1) {
36446    unsigned NumOps = Ops.size();
36447    unsigned IntrinOp =
36448        (NumOps == 2 ? Intrinsic::x86_mmx_punpckldq
36449                     : (NumOps == 4 ? Intrinsic::x86_mmx_punpcklwd
36450                                    : Intrinsic::x86_mmx_punpcklbw));
36451    SDValue Intrin = DAG.getConstant(IntrinOp, DL, MVT::i32);
36452    for (unsigned i = 0; i != NumOps; i += 2)
36453      Ops[i / 2] = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx, Intrin,
36454                               Ops[i], Ops[i + 1]);
36455    Ops.resize(NumOps / 2);
36456  }
36457
36458  return Ops[0];
36459}
36460
36461static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG,
36462                              TargetLowering::DAGCombinerInfo &DCI,
36463                              const X86Subtarget &Subtarget) {
36464  SDValue N0 = N->getOperand(0);
36465  EVT VT = N->getValueType(0);
36466  EVT SrcVT = N0.getValueType();
36467
36468  // Try to match patterns such as
36469  // (i16 bitcast (v16i1 x))
36470  // ->
36471  // (i16 movmsk (16i8 sext (v16i1 x)))
36472  // before the setcc result is scalarized on subtargets that don't have legal
36473  // vxi1 types.
36474  if (DCI.isBeforeLegalize()) {
36475    SDLoc dl(N);
36476    if (SDValue V = combineBitcastvxi1(DAG, VT, N0, dl, Subtarget))
36477      return V;
36478
36479    // Recognize the IR pattern for the movmsk intrinsic under SSE1 befoer type
36480    // legalization destroys the v4i32 type.
36481    if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && SrcVT == MVT::v4i1 &&
36482        VT.isScalarInteger() && N0.getOpcode() == ISD::SETCC &&
36483        N0.getOperand(0).getValueType() == MVT::v4i32 &&
36484        ISD::isBuildVectorAllZeros(N0.getOperand(1).getNode()) &&
36485        cast<CondCodeSDNode>(N0.getOperand(2))->get() == ISD::SETLT) {
36486      SDValue N00 = N0.getOperand(0);
36487      // Only do this if we can avoid scalarizing the input.
36488      if (ISD::isNormalLoad(N00.getNode()) ||
36489          (N00.getOpcode() == ISD::BITCAST &&
36490           N00.getOperand(0).getValueType() == MVT::v4f32)) {
36491        SDValue V = DAG.getNode(X86ISD::MOVMSK, dl, MVT::i32,
36492                                DAG.getBitcast(MVT::v4f32, N00));
36493        return DAG.getZExtOrTrunc(V, dl, VT);
36494      }
36495    }
36496
36497    // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
36498    // type, widen both sides to avoid a trip through memory.
36499    if ((VT == MVT::v4i1 || VT == MVT::v2i1) && SrcVT.isScalarInteger() &&
36500        Subtarget.hasAVX512()) {
36501      N0 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i8, N0);
36502      N0 = DAG.getBitcast(MVT::v8i1, N0);
36503      return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, N0,
36504                         DAG.getIntPtrConstant(0, dl));
36505    }
36506
36507    // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
36508    // type, widen both sides to avoid a trip through memory.
36509    if ((SrcVT == MVT::v4i1 || SrcVT == MVT::v2i1) && VT.isScalarInteger() &&
36510        Subtarget.hasAVX512()) {
36511      // Use zeros for the widening if we already have some zeroes. This can
36512      // allow SimplifyDemandedBits to remove scalar ANDs that may be down
36513      // stream of this.
36514      // FIXME: It might make sense to detect a concat_vectors with a mix of
36515      // zeroes and undef and turn it into insert_subvector for i1 vectors as
36516      // a separate combine. What we can't do is canonicalize the operands of
36517      // such a concat or we'll get into a loop with SimplifyDemandedBits.
36518      if (N0.getOpcode() == ISD::CONCAT_VECTORS) {
36519        SDValue LastOp = N0.getOperand(N0.getNumOperands() - 1);
36520        if (ISD::isBuildVectorAllZeros(LastOp.getNode())) {
36521          SrcVT = LastOp.getValueType();
36522          unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
36523          SmallVector<SDValue, 4> Ops(N0->op_begin(), N0->op_end());
36524          Ops.resize(NumConcats, DAG.getConstant(0, dl, SrcVT));
36525          N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
36526          N0 = DAG.getBitcast(MVT::i8, N0);
36527          return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
36528        }
36529      }
36530
36531      unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
36532      SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(SrcVT));
36533      Ops[0] = N0;
36534      N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
36535      N0 = DAG.getBitcast(MVT::i8, N0);
36536      return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
36537    }
36538  }
36539
36540  // Look for (i8 (bitcast (v8i1 (extract_subvector (v16i1 X), 0)))) and
36541  // replace with (i8 (trunc (i16 (bitcast (v16i1 X))))). This can occur
36542  // due to insert_subvector legalization on KNL. By promoting the copy to i16
36543  // we can help with known bits propagation from the vXi1 domain to the
36544  // scalar domain.
36545  if (VT == MVT::i8 && SrcVT == MVT::v8i1 && Subtarget.hasAVX512() &&
36546      !Subtarget.hasDQI() && N0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
36547      N0.getOperand(0).getValueType() == MVT::v16i1 &&
36548      isNullConstant(N0.getOperand(1)))
36549    return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT,
36550                       DAG.getBitcast(MVT::i16, N0.getOperand(0)));
36551
36552  // Combine (bitcast (vbroadcast_load)) -> (vbroadcast_load). The memory VT
36553  // determines // the number of bits loaded. Remaining bits are zero.
36554  if (N0.getOpcode() == X86ISD::VBROADCAST_LOAD && N0.hasOneUse() &&
36555      VT.getScalarSizeInBits() == SrcVT.getScalarSizeInBits()) {
36556    auto *BCast = cast<MemIntrinsicSDNode>(N0);
36557    SDVTList Tys = DAG.getVTList(VT, MVT::Other);
36558    SDValue Ops[] = { BCast->getChain(), BCast->getBasePtr() };
36559    SDValue ResNode =
36560        DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, SDLoc(N), Tys, Ops,
36561                                VT.getVectorElementType(),
36562                                BCast->getMemOperand());
36563    DAG.ReplaceAllUsesOfValueWith(SDValue(BCast, 1), ResNode.getValue(1));
36564    return ResNode;
36565  }
36566
36567  // Since MMX types are special and don't usually play with other vector types,
36568  // it's better to handle them early to be sure we emit efficient code by
36569  // avoiding store-load conversions.
36570  if (VT == MVT::x86mmx) {
36571    // Detect MMX constant vectors.
36572    APInt UndefElts;
36573    SmallVector<APInt, 1> EltBits;
36574    if (getTargetConstantBitsFromNode(N0, 64, UndefElts, EltBits)) {
36575      SDLoc DL(N0);
36576      // Handle zero-extension of i32 with MOVD.
36577      if (EltBits[0].countLeadingZeros() >= 32)
36578        return DAG.getNode(X86ISD::MMX_MOVW2D, DL, VT,
36579                           DAG.getConstant(EltBits[0].trunc(32), DL, MVT::i32));
36580      // Else, bitcast to a double.
36581      // TODO - investigate supporting sext 32-bit immediates on x86_64.
36582      APFloat F64(APFloat::IEEEdouble(), EltBits[0]);
36583      return DAG.getBitcast(VT, DAG.getConstantFP(F64, DL, MVT::f64));
36584    }
36585
36586    // Detect bitcasts to x86mmx low word.
36587    if (N0.getOpcode() == ISD::BUILD_VECTOR &&
36588        (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) &&
36589        N0.getOperand(0).getValueType() == SrcVT.getScalarType()) {
36590      bool LowUndef = true, AllUndefOrZero = true;
36591      for (unsigned i = 1, e = SrcVT.getVectorNumElements(); i != e; ++i) {
36592        SDValue Op = N0.getOperand(i);
36593        LowUndef &= Op.isUndef() || (i >= e/2);
36594        AllUndefOrZero &= (Op.isUndef() || isNullConstant(Op));
36595      }
36596      if (AllUndefOrZero) {
36597        SDValue N00 = N0.getOperand(0);
36598        SDLoc dl(N00);
36599        N00 = LowUndef ? DAG.getAnyExtOrTrunc(N00, dl, MVT::i32)
36600                       : DAG.getZExtOrTrunc(N00, dl, MVT::i32);
36601        return DAG.getNode(X86ISD::MMX_MOVW2D, dl, VT, N00);
36602      }
36603    }
36604
36605    // Detect bitcasts of 64-bit build vectors and convert to a
36606    // MMX UNPCK/PSHUFW which takes MMX type inputs with the value in the
36607    // lowest element.
36608    if (N0.getOpcode() == ISD::BUILD_VECTOR &&
36609        (SrcVT == MVT::v2f32 || SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 ||
36610         SrcVT == MVT::v8i8))
36611      return createMMXBuildVector(cast<BuildVectorSDNode>(N0), DAG, Subtarget);
36612
36613    // Detect bitcasts between element or subvector extraction to x86mmx.
36614    if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
36615         N0.getOpcode() == ISD::EXTRACT_SUBVECTOR) &&
36616        isNullConstant(N0.getOperand(1))) {
36617      SDValue N00 = N0.getOperand(0);
36618      if (N00.getValueType().is128BitVector())
36619        return DAG.getNode(X86ISD::MOVDQ2Q, SDLoc(N00), VT,
36620                           DAG.getBitcast(MVT::v2i64, N00));
36621    }
36622
36623    // Detect bitcasts from FP_TO_SINT to x86mmx.
36624    if (SrcVT == MVT::v2i32 && N0.getOpcode() == ISD::FP_TO_SINT) {
36625      SDLoc DL(N0);
36626      SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
36627                                DAG.getUNDEF(MVT::v2i32));
36628      return DAG.getNode(X86ISD::MOVDQ2Q, DL, VT,
36629                         DAG.getBitcast(MVT::v2i64, Res));
36630    }
36631  }
36632
36633  // Try to remove a bitcast of constant vXi1 vector. We have to legalize
36634  // most of these to scalar anyway.
36635  if (Subtarget.hasAVX512() && VT.isScalarInteger() &&
36636      SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
36637      ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) {
36638    return combinevXi1ConstantToInteger(N0, DAG);
36639  }
36640
36641  if (Subtarget.hasAVX512() && SrcVT.isScalarInteger() &&
36642      VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
36643      isa<ConstantSDNode>(N0)) {
36644    auto *C = cast<ConstantSDNode>(N0);
36645    if (C->isAllOnesValue())
36646      return DAG.getConstant(1, SDLoc(N0), VT);
36647    if (C->isNullValue())
36648      return DAG.getConstant(0, SDLoc(N0), VT);
36649  }
36650
36651  // Try to remove bitcasts from input and output of mask arithmetic to
36652  // remove GPR<->K-register crossings.
36653  if (SDValue V = combineCastedMaskArithmetic(N, DAG, DCI, Subtarget))
36654    return V;
36655
36656  // Convert a bitcasted integer logic operation that has one bitcasted
36657  // floating-point operand into a floating-point logic operation. This may
36658  // create a load of a constant, but that is cheaper than materializing the
36659  // constant in an integer register and transferring it to an SSE register or
36660  // transferring the SSE operand to integer register and back.
36661  unsigned FPOpcode;
36662  switch (N0.getOpcode()) {
36663    case ISD::AND: FPOpcode = X86ISD::FAND; break;
36664    case ISD::OR:  FPOpcode = X86ISD::FOR;  break;
36665    case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
36666    default: return SDValue();
36667  }
36668
36669  if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
36670        (Subtarget.hasSSE2() && VT == MVT::f64)))
36671    return SDValue();
36672
36673  SDValue LogicOp0 = N0.getOperand(0);
36674  SDValue LogicOp1 = N0.getOperand(1);
36675  SDLoc DL0(N0);
36676
36677  // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y))
36678  if (N0.hasOneUse() && LogicOp0.getOpcode() == ISD::BITCAST &&
36679      LogicOp0.hasOneUse() && LogicOp0.getOperand(0).getValueType() == VT &&
36680      !isa<ConstantSDNode>(LogicOp0.getOperand(0))) {
36681    SDValue CastedOp1 = DAG.getBitcast(VT, LogicOp1);
36682    return DAG.getNode(FPOpcode, DL0, VT, LogicOp0.getOperand(0), CastedOp1);
36683  }
36684  // bitcast(logic(X, bitcast(Y))) --> logic'(bitcast(X), Y)
36685  if (N0.hasOneUse() && LogicOp1.getOpcode() == ISD::BITCAST &&
36686      LogicOp1.hasOneUse() && LogicOp1.getOperand(0).getValueType() == VT &&
36687      !isa<ConstantSDNode>(LogicOp1.getOperand(0))) {
36688    SDValue CastedOp0 = DAG.getBitcast(VT, LogicOp0);
36689    return DAG.getNode(FPOpcode, DL0, VT, LogicOp1.getOperand(0), CastedOp0);
36690  }
36691
36692  return SDValue();
36693}
36694
36695// Given a ABS node, detect the following pattern:
36696// (ABS (SUB (ZERO_EXTEND a), (ZERO_EXTEND b))).
36697// This is useful as it is the input into a SAD pattern.
36698static bool detectZextAbsDiff(const SDValue &Abs, SDValue &Op0, SDValue &Op1) {
36699  SDValue AbsOp1 = Abs->getOperand(0);
36700  if (AbsOp1.getOpcode() != ISD::SUB)
36701    return false;
36702
36703  Op0 = AbsOp1.getOperand(0);
36704  Op1 = AbsOp1.getOperand(1);
36705
36706  // Check if the operands of the sub are zero-extended from vectors of i8.
36707  if (Op0.getOpcode() != ISD::ZERO_EXTEND ||
36708      Op0.getOperand(0).getValueType().getVectorElementType() != MVT::i8 ||
36709      Op1.getOpcode() != ISD::ZERO_EXTEND ||
36710      Op1.getOperand(0).getValueType().getVectorElementType() != MVT::i8)
36711    return false;
36712
36713  return true;
36714}
36715
36716// Given two zexts of <k x i8> to <k x i32>, create a PSADBW of the inputs
36717// to these zexts.
36718static SDValue createPSADBW(SelectionDAG &DAG, const SDValue &Zext0,
36719                            const SDValue &Zext1, const SDLoc &DL,
36720                            const X86Subtarget &Subtarget) {
36721  // Find the appropriate width for the PSADBW.
36722  EVT InVT = Zext0.getOperand(0).getValueType();
36723  unsigned RegSize = std::max(128u, (unsigned)InVT.getSizeInBits());
36724
36725  // "Zero-extend" the i8 vectors. This is not a per-element zext, rather we
36726  // fill in the missing vector elements with 0.
36727  unsigned NumConcat = RegSize / InVT.getSizeInBits();
36728  SmallVector<SDValue, 16> Ops(NumConcat, DAG.getConstant(0, DL, InVT));
36729  Ops[0] = Zext0.getOperand(0);
36730  MVT ExtendedVT = MVT::getVectorVT(MVT::i8, RegSize / 8);
36731  SDValue SadOp0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
36732  Ops[0] = Zext1.getOperand(0);
36733  SDValue SadOp1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
36734
36735  // Actually build the SAD, split as 128/256/512 bits for SSE/AVX2/AVX512BW.
36736  auto PSADBWBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
36737                          ArrayRef<SDValue> Ops) {
36738    MVT VT = MVT::getVectorVT(MVT::i64, Ops[0].getValueSizeInBits() / 64);
36739    return DAG.getNode(X86ISD::PSADBW, DL, VT, Ops);
36740  };
36741  MVT SadVT = MVT::getVectorVT(MVT::i64, RegSize / 64);
36742  return SplitOpsAndApply(DAG, Subtarget, DL, SadVT, { SadOp0, SadOp1 },
36743                          PSADBWBuilder);
36744}
36745
36746// Attempt to replace an min/max v8i16/v16i8 horizontal reduction with
36747// PHMINPOSUW.
36748static SDValue combineHorizontalMinMaxResult(SDNode *Extract, SelectionDAG &DAG,
36749                                             const X86Subtarget &Subtarget) {
36750  // Bail without SSE41.
36751  if (!Subtarget.hasSSE41())
36752    return SDValue();
36753
36754  EVT ExtractVT = Extract->getValueType(0);
36755  if (ExtractVT != MVT::i16 && ExtractVT != MVT::i8)
36756    return SDValue();
36757
36758  // Check for SMAX/SMIN/UMAX/UMIN horizontal reduction patterns.
36759  ISD::NodeType BinOp;
36760  SDValue Src = DAG.matchBinOpReduction(
36761      Extract, BinOp, {ISD::SMAX, ISD::SMIN, ISD::UMAX, ISD::UMIN}, true);
36762  if (!Src)
36763    return SDValue();
36764
36765  EVT SrcVT = Src.getValueType();
36766  EVT SrcSVT = SrcVT.getScalarType();
36767  if (SrcSVT != ExtractVT || (SrcVT.getSizeInBits() % 128) != 0)
36768    return SDValue();
36769
36770  SDLoc DL(Extract);
36771  SDValue MinPos = Src;
36772
36773  // First, reduce the source down to 128-bit, applying BinOp to lo/hi.
36774  while (SrcVT.getSizeInBits() > 128) {
36775    unsigned NumElts = SrcVT.getVectorNumElements();
36776    unsigned NumSubElts = NumElts / 2;
36777    SrcVT = EVT::getVectorVT(*DAG.getContext(), SrcSVT, NumSubElts);
36778    unsigned SubSizeInBits = SrcVT.getSizeInBits();
36779    SDValue Lo = extractSubVector(MinPos, 0, DAG, DL, SubSizeInBits);
36780    SDValue Hi = extractSubVector(MinPos, NumSubElts, DAG, DL, SubSizeInBits);
36781    MinPos = DAG.getNode(BinOp, DL, SrcVT, Lo, Hi);
36782  }
36783  assert(((SrcVT == MVT::v8i16 && ExtractVT == MVT::i16) ||
36784          (SrcVT == MVT::v16i8 && ExtractVT == MVT::i8)) &&
36785         "Unexpected value type");
36786
36787  // PHMINPOSUW applies to UMIN(v8i16), for SMIN/SMAX/UMAX we must apply a mask
36788  // to flip the value accordingly.
36789  SDValue Mask;
36790  unsigned MaskEltsBits = ExtractVT.getSizeInBits();
36791  if (BinOp == ISD::SMAX)
36792    Mask = DAG.getConstant(APInt::getSignedMaxValue(MaskEltsBits), DL, SrcVT);
36793  else if (BinOp == ISD::SMIN)
36794    Mask = DAG.getConstant(APInt::getSignedMinValue(MaskEltsBits), DL, SrcVT);
36795  else if (BinOp == ISD::UMAX)
36796    Mask = DAG.getConstant(APInt::getAllOnesValue(MaskEltsBits), DL, SrcVT);
36797
36798  if (Mask)
36799    MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
36800
36801  // For v16i8 cases we need to perform UMIN on pairs of byte elements,
36802  // shuffling each upper element down and insert zeros. This means that the
36803  // v16i8 UMIN will leave the upper element as zero, performing zero-extension
36804  // ready for the PHMINPOS.
36805  if (ExtractVT == MVT::i8) {
36806    SDValue Upper = DAG.getVectorShuffle(
36807        SrcVT, DL, MinPos, DAG.getConstant(0, DL, MVT::v16i8),
36808        {1, 16, 3, 16, 5, 16, 7, 16, 9, 16, 11, 16, 13, 16, 15, 16});
36809    MinPos = DAG.getNode(ISD::UMIN, DL, SrcVT, MinPos, Upper);
36810  }
36811
36812  // Perform the PHMINPOS on a v8i16 vector,
36813  MinPos = DAG.getBitcast(MVT::v8i16, MinPos);
36814  MinPos = DAG.getNode(X86ISD::PHMINPOS, DL, MVT::v8i16, MinPos);
36815  MinPos = DAG.getBitcast(SrcVT, MinPos);
36816
36817  if (Mask)
36818    MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
36819
36820  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, MinPos,
36821                     DAG.getIntPtrConstant(0, DL));
36822}
36823
36824// Attempt to replace an all_of/any_of/parity style horizontal reduction with a MOVMSK.
36825static SDValue combineHorizontalPredicateResult(SDNode *Extract,
36826                                                SelectionDAG &DAG,
36827                                                const X86Subtarget &Subtarget) {
36828  // Bail without SSE2.
36829  if (!Subtarget.hasSSE2())
36830    return SDValue();
36831
36832  EVT ExtractVT = Extract->getValueType(0);
36833  unsigned BitWidth = ExtractVT.getSizeInBits();
36834  if (ExtractVT != MVT::i64 && ExtractVT != MVT::i32 && ExtractVT != MVT::i16 &&
36835      ExtractVT != MVT::i8 && ExtractVT != MVT::i1)
36836    return SDValue();
36837
36838  // Check for OR(any_of)/AND(all_of)/XOR(parity) horizontal reduction patterns.
36839  ISD::NodeType BinOp;
36840  SDValue Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::OR, ISD::AND});
36841  if (!Match && ExtractVT == MVT::i1)
36842    Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::XOR});
36843  if (!Match)
36844    return SDValue();
36845
36846  // EXTRACT_VECTOR_ELT can require implicit extension of the vector element
36847  // which we can't support here for now.
36848  if (Match.getScalarValueSizeInBits() != BitWidth)
36849    return SDValue();
36850
36851  SDValue Movmsk;
36852  SDLoc DL(Extract);
36853  EVT MatchVT = Match.getValueType();
36854  unsigned NumElts = MatchVT.getVectorNumElements();
36855  unsigned MaxElts = Subtarget.hasInt256() ? 32 : 16;
36856  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
36857
36858  if (ExtractVT == MVT::i1) {
36859    // Special case for (pre-legalization) vXi1 reductions.
36860    if (NumElts > 64 || !isPowerOf2_32(NumElts))
36861      return SDValue();
36862    if (TLI.isTypeLegal(MatchVT)) {
36863      // If this is a legal AVX512 predicate type then we can just bitcast.
36864      EVT MovmskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
36865      Movmsk = DAG.getBitcast(MovmskVT, Match);
36866    } else {
36867      // Use combineBitcastvxi1 to create the MOVMSK.
36868      while (NumElts > MaxElts) {
36869        SDValue Lo, Hi;
36870        std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
36871        Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
36872        NumElts /= 2;
36873      }
36874      EVT MovmskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
36875      Movmsk = combineBitcastvxi1(DAG, MovmskVT, Match, DL, Subtarget);
36876    }
36877    if (!Movmsk)
36878      return SDValue();
36879    Movmsk = DAG.getZExtOrTrunc(Movmsk, DL, NumElts > 32 ? MVT::i64 : MVT::i32);
36880  } else {
36881    // Bail with AVX512VL (which uses predicate registers).
36882    if (Subtarget.hasVLX())
36883      return SDValue();
36884
36885    unsigned MatchSizeInBits = Match.getValueSizeInBits();
36886    if (!(MatchSizeInBits == 128 ||
36887          (MatchSizeInBits == 256 && Subtarget.hasAVX())))
36888      return SDValue();
36889
36890    // Make sure this isn't a vector of 1 element. The perf win from using
36891    // MOVMSK diminishes with less elements in the reduction, but it is
36892    // generally better to get the comparison over to the GPRs as soon as
36893    // possible to reduce the number of vector ops.
36894    if (Match.getValueType().getVectorNumElements() < 2)
36895      return SDValue();
36896
36897    // Check that we are extracting a reduction of all sign bits.
36898    if (DAG.ComputeNumSignBits(Match) != BitWidth)
36899      return SDValue();
36900
36901    if (MatchSizeInBits == 256 && BitWidth < 32 && !Subtarget.hasInt256()) {
36902      SDValue Lo, Hi;
36903      std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
36904      Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
36905      MatchSizeInBits = Match.getValueSizeInBits();
36906    }
36907
36908    // For 32/64 bit comparisons use MOVMSKPS/MOVMSKPD, else PMOVMSKB.
36909    MVT MaskSrcVT;
36910    if (64 == BitWidth || 32 == BitWidth)
36911      MaskSrcVT = MVT::getVectorVT(MVT::getFloatingPointVT(BitWidth),
36912                                   MatchSizeInBits / BitWidth);
36913    else
36914      MaskSrcVT = MVT::getVectorVT(MVT::i8, MatchSizeInBits / 8);
36915
36916    SDValue BitcastLogicOp = DAG.getBitcast(MaskSrcVT, Match);
36917    Movmsk = getPMOVMSKB(DL, BitcastLogicOp, DAG, Subtarget);
36918    NumElts = MaskSrcVT.getVectorNumElements();
36919  }
36920  assert((NumElts <= 32 || NumElts == 64) &&
36921         "Not expecting more than 64 elements");
36922
36923  MVT CmpVT = NumElts == 64 ? MVT::i64 : MVT::i32;
36924  if (BinOp == ISD::XOR) {
36925    // parity -> (AND (CTPOP(MOVMSK X)), 1)
36926    SDValue Mask = DAG.getConstant(1, DL, CmpVT);
36927    SDValue Result = DAG.getNode(ISD::CTPOP, DL, CmpVT, Movmsk);
36928    Result = DAG.getNode(ISD::AND, DL, CmpVT, Result, Mask);
36929    return DAG.getZExtOrTrunc(Result, DL, ExtractVT);
36930  }
36931
36932  SDValue CmpC;
36933  ISD::CondCode CondCode;
36934  if (BinOp == ISD::OR) {
36935    // any_of -> MOVMSK != 0
36936    CmpC = DAG.getConstant(0, DL, CmpVT);
36937    CondCode = ISD::CondCode::SETNE;
36938  } else {
36939    // all_of -> MOVMSK == ((1 << NumElts) - 1)
36940    CmpC = DAG.getConstant(APInt::getLowBitsSet(CmpVT.getSizeInBits(), NumElts),
36941                           DL, CmpVT);
36942    CondCode = ISD::CondCode::SETEQ;
36943  }
36944
36945  // The setcc produces an i8 of 0/1, so extend that to the result width and
36946  // negate to get the final 0/-1 mask value.
36947  EVT SetccVT =
36948      TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), CmpVT);
36949  SDValue Setcc = DAG.getSetCC(DL, SetccVT, Movmsk, CmpC, CondCode);
36950  SDValue Zext = DAG.getZExtOrTrunc(Setcc, DL, ExtractVT);
36951  SDValue Zero = DAG.getConstant(0, DL, ExtractVT);
36952  return DAG.getNode(ISD::SUB, DL, ExtractVT, Zero, Zext);
36953}
36954
36955static SDValue combineBasicSADPattern(SDNode *Extract, SelectionDAG &DAG,
36956                                      const X86Subtarget &Subtarget) {
36957  // PSADBW is only supported on SSE2 and up.
36958  if (!Subtarget.hasSSE2())
36959    return SDValue();
36960
36961  // Verify the type we're extracting from is any integer type above i16.
36962  EVT VT = Extract->getOperand(0).getValueType();
36963  if (!VT.isSimple() || !(VT.getVectorElementType().getSizeInBits() > 16))
36964    return SDValue();
36965
36966  unsigned RegSize = 128;
36967  if (Subtarget.useBWIRegs())
36968    RegSize = 512;
36969  else if (Subtarget.hasAVX())
36970    RegSize = 256;
36971
36972  // We handle upto v16i* for SSE2 / v32i* for AVX / v64i* for AVX512.
36973  // TODO: We should be able to handle larger vectors by splitting them before
36974  // feeding them into several SADs, and then reducing over those.
36975  if (RegSize / VT.getVectorNumElements() < 8)
36976    return SDValue();
36977
36978  // Match shuffle + add pyramid.
36979  ISD::NodeType BinOp;
36980  SDValue Root = DAG.matchBinOpReduction(Extract, BinOp, {ISD::ADD});
36981
36982  // The operand is expected to be zero extended from i8
36983  // (verified in detectZextAbsDiff).
36984  // In order to convert to i64 and above, additional any/zero/sign
36985  // extend is expected.
36986  // The zero extend from 32 bit has no mathematical effect on the result.
36987  // Also the sign extend is basically zero extend
36988  // (extends the sign bit which is zero).
36989  // So it is correct to skip the sign/zero extend instruction.
36990  if (Root && (Root.getOpcode() == ISD::SIGN_EXTEND ||
36991    Root.getOpcode() == ISD::ZERO_EXTEND ||
36992    Root.getOpcode() == ISD::ANY_EXTEND))
36993    Root = Root.getOperand(0);
36994
36995  // If there was a match, we want Root to be a select that is the root of an
36996  // abs-diff pattern.
36997  if (!Root || Root.getOpcode() != ISD::ABS)
36998    return SDValue();
36999
37000  // Check whether we have an abs-diff pattern feeding into the select.
37001  SDValue Zext0, Zext1;
37002  if (!detectZextAbsDiff(Root, Zext0, Zext1))
37003    return SDValue();
37004
37005  // Create the SAD instruction.
37006  SDLoc DL(Extract);
37007  SDValue SAD = createPSADBW(DAG, Zext0, Zext1, DL, Subtarget);
37008
37009  // If the original vector was wider than 8 elements, sum over the results
37010  // in the SAD vector.
37011  unsigned Stages = Log2_32(VT.getVectorNumElements());
37012  MVT SadVT = SAD.getSimpleValueType();
37013  if (Stages > 3) {
37014    unsigned SadElems = SadVT.getVectorNumElements();
37015
37016    for(unsigned i = Stages - 3; i > 0; --i) {
37017      SmallVector<int, 16> Mask(SadElems, -1);
37018      for(unsigned j = 0, MaskEnd = 1 << (i - 1); j < MaskEnd; ++j)
37019        Mask[j] = MaskEnd + j;
37020
37021      SDValue Shuffle =
37022          DAG.getVectorShuffle(SadVT, DL, SAD, DAG.getUNDEF(SadVT), Mask);
37023      SAD = DAG.getNode(ISD::ADD, DL, SadVT, SAD, Shuffle);
37024    }
37025  }
37026
37027  MVT Type = Extract->getSimpleValueType(0);
37028  unsigned TypeSizeInBits = Type.getSizeInBits();
37029  // Return the lowest TypeSizeInBits bits.
37030  MVT ResVT = MVT::getVectorVT(Type, SadVT.getSizeInBits() / TypeSizeInBits);
37031  SAD = DAG.getBitcast(ResVT, SAD);
37032  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Type, SAD,
37033                     Extract->getOperand(1));
37034}
37035
37036// Attempt to peek through a target shuffle and extract the scalar from the
37037// source.
37038static SDValue combineExtractWithShuffle(SDNode *N, SelectionDAG &DAG,
37039                                         TargetLowering::DAGCombinerInfo &DCI,
37040                                         const X86Subtarget &Subtarget) {
37041  if (DCI.isBeforeLegalizeOps())
37042    return SDValue();
37043
37044  SDLoc dl(N);
37045  SDValue Src = N->getOperand(0);
37046  SDValue Idx = N->getOperand(1);
37047
37048  EVT VT = N->getValueType(0);
37049  EVT SrcVT = Src.getValueType();
37050  EVT SrcSVT = SrcVT.getVectorElementType();
37051  unsigned NumSrcElts = SrcVT.getVectorNumElements();
37052
37053  // Don't attempt this for boolean mask vectors or unknown extraction indices.
37054  if (SrcSVT == MVT::i1 || !isa<ConstantSDNode>(Idx))
37055    return SDValue();
37056
37057  SDValue SrcBC = peekThroughBitcasts(Src);
37058
37059  // Handle extract(broadcast(scalar_value)), it doesn't matter what index is.
37060  if (X86ISD::VBROADCAST == SrcBC.getOpcode()) {
37061    SDValue SrcOp = SrcBC.getOperand(0);
37062    if (SrcOp.getValueSizeInBits() == VT.getSizeInBits())
37063      return DAG.getBitcast(VT, SrcOp);
37064  }
37065
37066  // If we're extracting a single element from a broadcast load and there are
37067  // no other users, just create a single load.
37068  if (SrcBC.getOpcode() == X86ISD::VBROADCAST_LOAD && SrcBC.hasOneUse()) {
37069    auto *MemIntr = cast<MemIntrinsicSDNode>(SrcBC);
37070    unsigned SrcBCWidth = SrcBC.getScalarValueSizeInBits();
37071    if (MemIntr->getMemoryVT().getSizeInBits() == SrcBCWidth &&
37072        VT.getSizeInBits() == SrcBCWidth) {
37073      SDValue Load = DAG.getLoad(VT, dl, MemIntr->getChain(),
37074                                 MemIntr->getBasePtr(),
37075                                 MemIntr->getPointerInfo(),
37076                                 MemIntr->getAlignment(),
37077                                 MemIntr->getMemOperand()->getFlags());
37078      DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), Load.getValue(1));
37079      return Load;
37080    }
37081  }
37082
37083  // Handle extract(truncate(x)) for 0'th index.
37084  // TODO: Treat this as a faux shuffle?
37085  // TODO: When can we use this for general indices?
37086  if (ISD::TRUNCATE == Src.getOpcode() && SrcVT.is128BitVector() &&
37087      isNullConstant(Idx)) {
37088    Src = extract128BitVector(Src.getOperand(0), 0, DAG, dl);
37089    Src = DAG.getBitcast(SrcVT, Src);
37090    return DAG.getNode(N->getOpcode(), dl, VT, Src, Idx);
37091  }
37092
37093  // Resolve the target shuffle inputs and mask.
37094  SmallVector<int, 16> Mask;
37095  SmallVector<SDValue, 2> Ops;
37096  if (!getTargetShuffleInputs(SrcBC, Ops, Mask, DAG))
37097    return SDValue();
37098
37099  // Attempt to narrow/widen the shuffle mask to the correct size.
37100  if (Mask.size() != NumSrcElts) {
37101    if ((NumSrcElts % Mask.size()) == 0) {
37102      SmallVector<int, 16> ScaledMask;
37103      int Scale = NumSrcElts / Mask.size();
37104      scaleShuffleMask<int>(Scale, Mask, ScaledMask);
37105      Mask = std::move(ScaledMask);
37106    } else if ((Mask.size() % NumSrcElts) == 0) {
37107      // Simplify Mask based on demanded element.
37108      int ExtractIdx = (int)N->getConstantOperandVal(1);
37109      int Scale = Mask.size() / NumSrcElts;
37110      int Lo = Scale * ExtractIdx;
37111      int Hi = Scale * (ExtractIdx + 1);
37112      for (int i = 0, e = (int)Mask.size(); i != e; ++i)
37113        if (i < Lo || Hi <= i)
37114          Mask[i] = SM_SentinelUndef;
37115
37116      SmallVector<int, 16> WidenedMask;
37117      while (Mask.size() > NumSrcElts &&
37118             canWidenShuffleElements(Mask, WidenedMask))
37119        Mask = std::move(WidenedMask);
37120      // TODO - investigate support for wider shuffle masks with known upper
37121      // undef/zero elements for implicit zero-extension.
37122    }
37123  }
37124
37125  // Check if narrowing/widening failed.
37126  if (Mask.size() != NumSrcElts)
37127    return SDValue();
37128
37129  int SrcIdx = Mask[N->getConstantOperandVal(1)];
37130
37131  // If the shuffle source element is undef/zero then we can just accept it.
37132  if (SrcIdx == SM_SentinelUndef)
37133    return DAG.getUNDEF(VT);
37134
37135  if (SrcIdx == SM_SentinelZero)
37136    return VT.isFloatingPoint() ? DAG.getConstantFP(0.0, dl, VT)
37137                                : DAG.getConstant(0, dl, VT);
37138
37139  SDValue SrcOp = Ops[SrcIdx / Mask.size()];
37140  SrcIdx = SrcIdx % Mask.size();
37141
37142  // We can only extract other elements from 128-bit vectors and in certain
37143  // circumstances, depending on SSE-level.
37144  // TODO: Investigate using extract_subvector for larger vectors.
37145  // TODO: Investigate float/double extraction if it will be just stored.
37146  if ((SrcVT == MVT::v4i32 || SrcVT == MVT::v2i64) &&
37147      ((SrcIdx == 0 && Subtarget.hasSSE2()) || Subtarget.hasSSE41())) {
37148    assert(SrcSVT == VT && "Unexpected extraction type");
37149    SrcOp = DAG.getBitcast(SrcVT, SrcOp);
37150    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SrcSVT, SrcOp,
37151                       DAG.getIntPtrConstant(SrcIdx, dl));
37152  }
37153
37154  if ((SrcVT == MVT::v8i16 && Subtarget.hasSSE2()) ||
37155      (SrcVT == MVT::v16i8 && Subtarget.hasSSE41())) {
37156    assert(VT.getSizeInBits() >= SrcSVT.getSizeInBits() &&
37157           "Unexpected extraction type");
37158    unsigned OpCode = (SrcVT == MVT::v8i16 ? X86ISD::PEXTRW : X86ISD::PEXTRB);
37159    SrcOp = DAG.getBitcast(SrcVT, SrcOp);
37160    SDValue ExtOp = DAG.getNode(OpCode, dl, MVT::i32, SrcOp,
37161                                DAG.getIntPtrConstant(SrcIdx, dl));
37162    return DAG.getZExtOrTrunc(ExtOp, dl, VT);
37163  }
37164
37165  return SDValue();
37166}
37167
37168/// Extracting a scalar FP value from vector element 0 is free, so extract each
37169/// operand first, then perform the math as a scalar op.
37170static SDValue scalarizeExtEltFP(SDNode *ExtElt, SelectionDAG &DAG) {
37171  assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Expected extract");
37172  SDValue Vec = ExtElt->getOperand(0);
37173  SDValue Index = ExtElt->getOperand(1);
37174  EVT VT = ExtElt->getValueType(0);
37175  EVT VecVT = Vec.getValueType();
37176
37177  // TODO: If this is a unary/expensive/expand op, allow extraction from a
37178  // non-zero element because the shuffle+scalar op will be cheaper?
37179  if (!Vec.hasOneUse() || !isNullConstant(Index) || VecVT.getScalarType() != VT)
37180    return SDValue();
37181
37182  // Vector FP compares don't fit the pattern of FP math ops (propagate, not
37183  // extract, the condition code), so deal with those as a special-case.
37184  if (Vec.getOpcode() == ISD::SETCC && VT == MVT::i1) {
37185    EVT OpVT = Vec.getOperand(0).getValueType().getScalarType();
37186    if (OpVT != MVT::f32 && OpVT != MVT::f64)
37187      return SDValue();
37188
37189    // extract (setcc X, Y, CC), 0 --> setcc (extract X, 0), (extract Y, 0), CC
37190    SDLoc DL(ExtElt);
37191    SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
37192                               Vec.getOperand(0), Index);
37193    SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
37194                               Vec.getOperand(1), Index);
37195    return DAG.getNode(Vec.getOpcode(), DL, VT, Ext0, Ext1, Vec.getOperand(2));
37196  }
37197
37198  if (VT != MVT::f32 && VT != MVT::f64)
37199    return SDValue();
37200
37201  // Vector FP selects don't fit the pattern of FP math ops (because the
37202  // condition has a different type and we have to change the opcode), so deal
37203  // with those here.
37204  // FIXME: This is restricted to pre type legalization by ensuring the setcc
37205  // has i1 elements. If we loosen this we need to convert vector bool to a
37206  // scalar bool.
37207  if (Vec.getOpcode() == ISD::VSELECT &&
37208      Vec.getOperand(0).getOpcode() == ISD::SETCC &&
37209      Vec.getOperand(0).getValueType().getScalarType() == MVT::i1 &&
37210      Vec.getOperand(0).getOperand(0).getValueType() == VecVT) {
37211    // ext (sel Cond, X, Y), 0 --> sel (ext Cond, 0), (ext X, 0), (ext Y, 0)
37212    SDLoc DL(ExtElt);
37213    SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
37214                               Vec.getOperand(0).getValueType().getScalarType(),
37215                               Vec.getOperand(0), Index);
37216    SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
37217                               Vec.getOperand(1), Index);
37218    SDValue Ext2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
37219                               Vec.getOperand(2), Index);
37220    return DAG.getNode(ISD::SELECT, DL, VT, Ext0, Ext1, Ext2);
37221  }
37222
37223  // TODO: This switch could include FNEG and the x86-specific FP logic ops
37224  // (FAND, FANDN, FOR, FXOR). But that may require enhancements to avoid
37225  // missed load folding and fma+fneg combining.
37226  switch (Vec.getOpcode()) {
37227  case ISD::FMA: // Begin 3 operands
37228  case ISD::FMAD:
37229  case ISD::FADD: // Begin 2 operands
37230  case ISD::FSUB:
37231  case ISD::FMUL:
37232  case ISD::FDIV:
37233  case ISD::FREM:
37234  case ISD::FCOPYSIGN:
37235  case ISD::FMINNUM:
37236  case ISD::FMAXNUM:
37237  case ISD::FMINNUM_IEEE:
37238  case ISD::FMAXNUM_IEEE:
37239  case ISD::FMAXIMUM:
37240  case ISD::FMINIMUM:
37241  case X86ISD::FMAX:
37242  case X86ISD::FMIN:
37243  case ISD::FABS: // Begin 1 operand
37244  case ISD::FSQRT:
37245  case ISD::FRINT:
37246  case ISD::FCEIL:
37247  case ISD::FTRUNC:
37248  case ISD::FNEARBYINT:
37249  case ISD::FROUND:
37250  case ISD::FFLOOR:
37251  case X86ISD::FRCP:
37252  case X86ISD::FRSQRT: {
37253    // extract (fp X, Y, ...), 0 --> fp (extract X, 0), (extract Y, 0), ...
37254    SDLoc DL(ExtElt);
37255    SmallVector<SDValue, 4> ExtOps;
37256    for (SDValue Op : Vec->ops())
37257      ExtOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op, Index));
37258    return DAG.getNode(Vec.getOpcode(), DL, VT, ExtOps);
37259  }
37260  default:
37261    return SDValue();
37262  }
37263  llvm_unreachable("All opcodes should return within switch");
37264}
37265
37266/// Try to convert a vector reduction sequence composed of binops and shuffles
37267/// into horizontal ops.
37268static SDValue combineReductionToHorizontal(SDNode *ExtElt, SelectionDAG &DAG,
37269                                            const X86Subtarget &Subtarget) {
37270  assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Unexpected caller");
37271
37272  // We need at least SSE2 to anything here.
37273  if (!Subtarget.hasSSE2())
37274    return SDValue();
37275
37276  ISD::NodeType Opc;
37277  SDValue Rdx =
37278      DAG.matchBinOpReduction(ExtElt, Opc, {ISD::ADD, ISD::FADD}, true);
37279  if (!Rdx)
37280    return SDValue();
37281
37282  SDValue Index = ExtElt->getOperand(1);
37283  assert(isNullConstant(Index) &&
37284         "Reduction doesn't end in an extract from index 0");
37285
37286  EVT VT = ExtElt->getValueType(0);
37287  EVT VecVT = Rdx.getValueType();
37288  if (VecVT.getScalarType() != VT)
37289    return SDValue();
37290
37291  SDLoc DL(ExtElt);
37292
37293  // vXi8 reduction - sub 128-bit vector.
37294  if (VecVT == MVT::v4i8 || VecVT == MVT::v8i8) {
37295    if (VecVT == MVT::v4i8) {
37296      // Pad with zero.
37297      if (Subtarget.hasSSE41()) {
37298        Rdx = DAG.getBitcast(MVT::i32, Rdx);
37299        Rdx = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v4i32,
37300                          DAG.getConstant(0, DL, MVT::v4i32), Rdx,
37301                          DAG.getIntPtrConstant(0, DL));
37302        Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
37303      } else {
37304        Rdx = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i8, Rdx,
37305                          DAG.getConstant(0, DL, VecVT));
37306      }
37307    }
37308    if (Rdx.getValueType() == MVT::v8i8) {
37309      // Pad with undef.
37310      Rdx = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, Rdx,
37311                        DAG.getUNDEF(MVT::v8i8));
37312    }
37313    Rdx = DAG.getNode(X86ISD::PSADBW, DL, MVT::v2i64, Rdx,
37314                      DAG.getConstant(0, DL, MVT::v16i8));
37315    Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
37316    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
37317  }
37318
37319  // Must be a >=128-bit vector with pow2 elements.
37320  if ((VecVT.getSizeInBits() % 128) != 0 ||
37321      !isPowerOf2_32(VecVT.getVectorNumElements()))
37322    return SDValue();
37323
37324  // vXi8 reduction - sum lo/hi halves then use PSADBW.
37325  if (VT == MVT::i8) {
37326    while (Rdx.getValueSizeInBits() > 128) {
37327      unsigned HalfSize = VecVT.getSizeInBits() / 2;
37328      unsigned HalfElts = VecVT.getVectorNumElements() / 2;
37329      SDValue Lo = extractSubVector(Rdx, 0, DAG, DL, HalfSize);
37330      SDValue Hi = extractSubVector(Rdx, HalfElts, DAG, DL, HalfSize);
37331      Rdx = DAG.getNode(ISD::ADD, DL, Lo.getValueType(), Lo, Hi);
37332      VecVT = Rdx.getValueType();
37333    }
37334    assert(VecVT == MVT::v16i8 && "v16i8 reduction expected");
37335
37336    SDValue Hi = DAG.getVectorShuffle(
37337        MVT::v16i8, DL, Rdx, Rdx,
37338        {8, 9, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1});
37339    Rdx = DAG.getNode(ISD::ADD, DL, MVT::v16i8, Rdx, Hi);
37340    Rdx = DAG.getNode(X86ISD::PSADBW, DL, MVT::v2i64, Rdx,
37341                      getZeroVector(MVT::v16i8, Subtarget, DAG, DL));
37342    Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
37343    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
37344  }
37345
37346  // Only use (F)HADD opcodes if they aren't microcoded or minimizes codesize.
37347  bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
37348  if (!Subtarget.hasFastHorizontalOps() && !OptForSize)
37349    return SDValue();
37350
37351  unsigned HorizOpcode = Opc == ISD::ADD ? X86ISD::HADD : X86ISD::FHADD;
37352
37353  // 256-bit horizontal instructions operate on 128-bit chunks rather than
37354  // across the whole vector, so we need an extract + hop preliminary stage.
37355  // This is the only step where the operands of the hop are not the same value.
37356  // TODO: We could extend this to handle 512-bit or even longer vectors.
37357  if (((VecVT == MVT::v16i16 || VecVT == MVT::v8i32) && Subtarget.hasSSSE3()) ||
37358      ((VecVT == MVT::v8f32 || VecVT == MVT::v4f64) && Subtarget.hasSSE3())) {
37359    unsigned NumElts = VecVT.getVectorNumElements();
37360    SDValue Hi = extract128BitVector(Rdx, NumElts / 2, DAG, DL);
37361    SDValue Lo = extract128BitVector(Rdx, 0, DAG, DL);
37362    Rdx = DAG.getNode(HorizOpcode, DL, Lo.getValueType(), Hi, Lo);
37363    VecVT = Rdx.getValueType();
37364  }
37365  if (!((VecVT == MVT::v8i16 || VecVT == MVT::v4i32) && Subtarget.hasSSSE3()) &&
37366      !((VecVT == MVT::v4f32 || VecVT == MVT::v2f64) && Subtarget.hasSSE3()))
37367    return SDValue();
37368
37369  // extract (add (shuf X), X), 0 --> extract (hadd X, X), 0
37370  unsigned ReductionSteps = Log2_32(VecVT.getVectorNumElements());
37371  for (unsigned i = 0; i != ReductionSteps; ++i)
37372    Rdx = DAG.getNode(HorizOpcode, DL, VecVT, Rdx, Rdx);
37373
37374  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
37375}
37376
37377/// Detect vector gather/scatter index generation and convert it from being a
37378/// bunch of shuffles and extracts into a somewhat faster sequence.
37379/// For i686, the best sequence is apparently storing the value and loading
37380/// scalars back, while for x64 we should use 64-bit extracts and shifts.
37381static SDValue combineExtractVectorElt(SDNode *N, SelectionDAG &DAG,
37382                                       TargetLowering::DAGCombinerInfo &DCI,
37383                                       const X86Subtarget &Subtarget) {
37384  if (SDValue NewOp = combineExtractWithShuffle(N, DAG, DCI, Subtarget))
37385    return NewOp;
37386
37387  SDValue InputVector = N->getOperand(0);
37388  SDValue EltIdx = N->getOperand(1);
37389  auto *CIdx = dyn_cast<ConstantSDNode>(EltIdx);
37390
37391  EVT SrcVT = InputVector.getValueType();
37392  EVT VT = N->getValueType(0);
37393  SDLoc dl(InputVector);
37394  bool IsPextr = N->getOpcode() != ISD::EXTRACT_VECTOR_ELT;
37395  unsigned NumSrcElts = SrcVT.getVectorNumElements();
37396
37397  if (CIdx && CIdx->getAPIntValue().uge(NumSrcElts))
37398    return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
37399
37400  // Integer Constant Folding.
37401  if (CIdx && VT.isInteger()) {
37402    APInt UndefVecElts;
37403    SmallVector<APInt, 16> EltBits;
37404    unsigned VecEltBitWidth = SrcVT.getScalarSizeInBits();
37405    if (getTargetConstantBitsFromNode(InputVector, VecEltBitWidth, UndefVecElts,
37406                                      EltBits, true, false)) {
37407      uint64_t Idx = CIdx->getZExtValue();
37408      if (UndefVecElts[Idx])
37409        return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
37410      return DAG.getConstant(EltBits[Idx].zextOrSelf(VT.getScalarSizeInBits()),
37411                             dl, VT);
37412    }
37413  }
37414
37415  if (IsPextr) {
37416    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
37417    if (TLI.SimplifyDemandedBits(
37418            SDValue(N, 0), APInt::getAllOnesValue(VT.getSizeInBits()), DCI))
37419      return SDValue(N, 0);
37420
37421    // PEXTR*(PINSR*(v, s, c), c) -> s (with implicit zext handling).
37422    if ((InputVector.getOpcode() == X86ISD::PINSRB ||
37423         InputVector.getOpcode() == X86ISD::PINSRW) &&
37424        InputVector.getOperand(2) == EltIdx) {
37425      assert(SrcVT == InputVector.getOperand(0).getValueType() &&
37426             "Vector type mismatch");
37427      SDValue Scl = InputVector.getOperand(1);
37428      Scl = DAG.getNode(ISD::TRUNCATE, dl, SrcVT.getScalarType(), Scl);
37429      return DAG.getZExtOrTrunc(Scl, dl, VT);
37430    }
37431
37432    // TODO - Remove this once we can handle the implicit zero-extension of
37433    // X86ISD::PEXTRW/X86ISD::PEXTRB in combineHorizontalPredicateResult and
37434    // combineBasicSADPattern.
37435    return SDValue();
37436  }
37437
37438  // Detect mmx extraction of all bits as a i64. It works better as a bitcast.
37439  if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
37440      VT == MVT::i64 && SrcVT == MVT::v1i64 && isNullConstant(EltIdx)) {
37441    SDValue MMXSrc = InputVector.getOperand(0);
37442
37443    // The bitcast source is a direct mmx result.
37444    if (MMXSrc.getValueType() == MVT::x86mmx)
37445      return DAG.getBitcast(VT, InputVector);
37446  }
37447
37448  // Detect mmx to i32 conversion through a v2i32 elt extract.
37449  if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
37450      VT == MVT::i32 && SrcVT == MVT::v2i32 && isNullConstant(EltIdx)) {
37451    SDValue MMXSrc = InputVector.getOperand(0);
37452
37453    // The bitcast source is a direct mmx result.
37454    if (MMXSrc.getValueType() == MVT::x86mmx)
37455      return DAG.getNode(X86ISD::MMX_MOVD2W, dl, MVT::i32, MMXSrc);
37456  }
37457
37458  // Check whether this extract is the root of a sum of absolute differences
37459  // pattern. This has to be done here because we really want it to happen
37460  // pre-legalization,
37461  if (SDValue SAD = combineBasicSADPattern(N, DAG, Subtarget))
37462    return SAD;
37463
37464  // Attempt to replace an all_of/any_of horizontal reduction with a MOVMSK.
37465  if (SDValue Cmp = combineHorizontalPredicateResult(N, DAG, Subtarget))
37466    return Cmp;
37467
37468  // Attempt to replace min/max v8i16/v16i8 reductions with PHMINPOSUW.
37469  if (SDValue MinMax = combineHorizontalMinMaxResult(N, DAG, Subtarget))
37470    return MinMax;
37471
37472  if (SDValue V = combineReductionToHorizontal(N, DAG, Subtarget))
37473    return V;
37474
37475  if (SDValue V = scalarizeExtEltFP(N, DAG))
37476    return V;
37477
37478  // Attempt to extract a i1 element by using MOVMSK to extract the signbits
37479  // and then testing the relevant element.
37480  if (CIdx && SrcVT.getScalarType() == MVT::i1) {
37481    SmallVector<SDNode *, 16> BoolExtracts;
37482    auto IsBoolExtract = [&BoolExtracts](SDNode *Use) {
37483      if (Use->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
37484          isa<ConstantSDNode>(Use->getOperand(1)) &&
37485          Use->getValueType(0) == MVT::i1) {
37486        BoolExtracts.push_back(Use);
37487        return true;
37488      }
37489      return false;
37490    };
37491    if (all_of(InputVector->uses(), IsBoolExtract) &&
37492        BoolExtracts.size() > 1) {
37493      EVT BCVT = EVT::getIntegerVT(*DAG.getContext(), NumSrcElts);
37494      if (SDValue BC =
37495              combineBitcastvxi1(DAG, BCVT, InputVector, dl, Subtarget)) {
37496        for (SDNode *Use : BoolExtracts) {
37497          // extractelement vXi1 X, MaskIdx --> ((movmsk X) & Mask) == Mask
37498          unsigned MaskIdx = Use->getConstantOperandVal(1);
37499          APInt MaskBit = APInt::getOneBitSet(NumSrcElts, MaskIdx);
37500          SDValue Mask = DAG.getConstant(MaskBit, dl, BCVT);
37501          SDValue Res = DAG.getNode(ISD::AND, dl, BCVT, BC, Mask);
37502          Res = DAG.getSetCC(dl, MVT::i1, Res, Mask, ISD::SETEQ);
37503          DCI.CombineTo(Use, Res);
37504        }
37505        return SDValue(N, 0);
37506      }
37507    }
37508  }
37509
37510  return SDValue();
37511}
37512
37513/// If a vector select has an operand that is -1 or 0, try to simplify the
37514/// select to a bitwise logic operation.
37515/// TODO: Move to DAGCombiner, possibly using TargetLowering::hasAndNot()?
37516static SDValue
37517combineVSelectWithAllOnesOrZeros(SDNode *N, SelectionDAG &DAG,
37518                                 TargetLowering::DAGCombinerInfo &DCI,
37519                                 const X86Subtarget &Subtarget) {
37520  SDValue Cond = N->getOperand(0);
37521  SDValue LHS = N->getOperand(1);
37522  SDValue RHS = N->getOperand(2);
37523  EVT VT = LHS.getValueType();
37524  EVT CondVT = Cond.getValueType();
37525  SDLoc DL(N);
37526  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
37527
37528  if (N->getOpcode() != ISD::VSELECT)
37529    return SDValue();
37530
37531  assert(CondVT.isVector() && "Vector select expects a vector selector!");
37532
37533  // Check if the first operand is all zeros and Cond type is vXi1.
37534  // This situation only applies to avx512.
37535  // TODO: Use isNullOrNullSplat() to distinguish constants with undefs?
37536  // TODO: Can we assert that both operands are not zeros (because that should
37537  //       get simplified at node creation time)?
37538  bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
37539  bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
37540
37541  // If both inputs are 0/undef, create a complete zero vector.
37542  // FIXME: As noted above this should be handled by DAGCombiner/getNode.
37543  if (TValIsAllZeros && FValIsAllZeros) {
37544    if (VT.isFloatingPoint())
37545      return DAG.getConstantFP(0.0, DL, VT);
37546    return DAG.getConstant(0, DL, VT);
37547  }
37548
37549  if (TValIsAllZeros && !FValIsAllZeros && Subtarget.hasAVX512() &&
37550      Cond.hasOneUse() && CondVT.getVectorElementType() == MVT::i1) {
37551    // Invert the cond to not(cond) : xor(op,allones)=not(op)
37552    SDValue CondNew = DAG.getNOT(DL, Cond, CondVT);
37553    // Vselect cond, op1, op2 = Vselect not(cond), op2, op1
37554    return DAG.getSelect(DL, VT, CondNew, RHS, LHS);
37555  }
37556
37557  // To use the condition operand as a bitwise mask, it must have elements that
37558  // are the same size as the select elements. Ie, the condition operand must
37559  // have already been promoted from the IR select condition type <N x i1>.
37560  // Don't check if the types themselves are equal because that excludes
37561  // vector floating-point selects.
37562  if (CondVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
37563    return SDValue();
37564
37565  // Try to invert the condition if true value is not all 1s and false value is
37566  // not all 0s. Only do this if the condition has one use.
37567  bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
37568  if (!TValIsAllOnes && !FValIsAllZeros && Cond.hasOneUse() &&
37569      // Check if the selector will be produced by CMPP*/PCMP*.
37570      Cond.getOpcode() == ISD::SETCC &&
37571      // Check if SETCC has already been promoted.
37572      TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT) ==
37573          CondVT) {
37574    bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
37575
37576    if (TValIsAllZeros || FValIsAllOnes) {
37577      SDValue CC = Cond.getOperand(2);
37578      ISD::CondCode NewCC = ISD::getSetCCInverse(
37579          cast<CondCodeSDNode>(CC)->get(), Cond.getOperand(0).getValueType());
37580      Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1),
37581                          NewCC);
37582      std::swap(LHS, RHS);
37583      TValIsAllOnes = FValIsAllOnes;
37584      FValIsAllZeros = TValIsAllZeros;
37585    }
37586  }
37587
37588  // Cond value must be 'sign splat' to be converted to a logical op.
37589  if (DAG.ComputeNumSignBits(Cond) != CondVT.getScalarSizeInBits())
37590    return SDValue();
37591
37592  // vselect Cond, 111..., 000... -> Cond
37593  if (TValIsAllOnes && FValIsAllZeros)
37594    return DAG.getBitcast(VT, Cond);
37595
37596  if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(CondVT))
37597    return SDValue();
37598
37599  // vselect Cond, 111..., X -> or Cond, X
37600  if (TValIsAllOnes) {
37601    SDValue CastRHS = DAG.getBitcast(CondVT, RHS);
37602    SDValue Or = DAG.getNode(ISD::OR, DL, CondVT, Cond, CastRHS);
37603    return DAG.getBitcast(VT, Or);
37604  }
37605
37606  // vselect Cond, X, 000... -> and Cond, X
37607  if (FValIsAllZeros) {
37608    SDValue CastLHS = DAG.getBitcast(CondVT, LHS);
37609    SDValue And = DAG.getNode(ISD::AND, DL, CondVT, Cond, CastLHS);
37610    return DAG.getBitcast(VT, And);
37611  }
37612
37613  // vselect Cond, 000..., X -> andn Cond, X
37614  if (TValIsAllZeros) {
37615    MVT AndNVT = MVT::getVectorVT(MVT::i64, CondVT.getSizeInBits() / 64);
37616    SDValue CastCond = DAG.getBitcast(AndNVT, Cond);
37617    SDValue CastRHS = DAG.getBitcast(AndNVT, RHS);
37618    SDValue AndN = DAG.getNode(X86ISD::ANDNP, DL, AndNVT, CastCond, CastRHS);
37619    return DAG.getBitcast(VT, AndN);
37620  }
37621
37622  return SDValue();
37623}
37624
37625/// If both arms of a vector select are concatenated vectors, split the select,
37626/// and concatenate the result to eliminate a wide (256-bit) vector instruction:
37627///   vselect Cond, (concat T0, T1), (concat F0, F1) -->
37628///   concat (vselect (split Cond), T0, F0), (vselect (split Cond), T1, F1)
37629static SDValue narrowVectorSelect(SDNode *N, SelectionDAG &DAG,
37630                                  const X86Subtarget &Subtarget) {
37631  unsigned Opcode = N->getOpcode();
37632  if (Opcode != X86ISD::BLENDV && Opcode != ISD::VSELECT)
37633    return SDValue();
37634
37635  // TODO: Split 512-bit vectors too?
37636  EVT VT = N->getValueType(0);
37637  if (!VT.is256BitVector())
37638    return SDValue();
37639
37640  // TODO: Split as long as any 2 of the 3 operands are concatenated?
37641  SDValue Cond = N->getOperand(0);
37642  SDValue TVal = N->getOperand(1);
37643  SDValue FVal = N->getOperand(2);
37644  SmallVector<SDValue, 4> CatOpsT, CatOpsF;
37645  if (!TVal.hasOneUse() || !FVal.hasOneUse() ||
37646      !collectConcatOps(TVal.getNode(), CatOpsT) ||
37647      !collectConcatOps(FVal.getNode(), CatOpsF))
37648    return SDValue();
37649
37650  auto makeBlend = [Opcode](SelectionDAG &DAG, const SDLoc &DL,
37651                            ArrayRef<SDValue> Ops) {
37652    return DAG.getNode(Opcode, DL, Ops[1].getValueType(), Ops);
37653  };
37654  return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { Cond, TVal, FVal },
37655                          makeBlend, /*CheckBWI*/ false);
37656}
37657
37658static SDValue combineSelectOfTwoConstants(SDNode *N, SelectionDAG &DAG) {
37659  SDValue Cond = N->getOperand(0);
37660  SDValue LHS = N->getOperand(1);
37661  SDValue RHS = N->getOperand(2);
37662  SDLoc DL(N);
37663
37664  auto *TrueC = dyn_cast<ConstantSDNode>(LHS);
37665  auto *FalseC = dyn_cast<ConstantSDNode>(RHS);
37666  if (!TrueC || !FalseC)
37667    return SDValue();
37668
37669  // Don't do this for crazy integer types.
37670  EVT VT = N->getValueType(0);
37671  if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
37672    return SDValue();
37673
37674  // We're going to use the condition bit in math or logic ops. We could allow
37675  // this with a wider condition value (post-legalization it becomes an i8),
37676  // but if nothing is creating selects that late, it doesn't matter.
37677  if (Cond.getValueType() != MVT::i1)
37678    return SDValue();
37679
37680  // A power-of-2 multiply is just a shift. LEA also cheaply handles multiply by
37681  // 3, 5, or 9 with i32/i64, so those get transformed too.
37682  // TODO: For constants that overflow or do not differ by power-of-2 or small
37683  // multiplier, convert to 'and' + 'add'.
37684  const APInt &TrueVal = TrueC->getAPIntValue();
37685  const APInt &FalseVal = FalseC->getAPIntValue();
37686  bool OV;
37687  APInt Diff = TrueVal.ssub_ov(FalseVal, OV);
37688  if (OV)
37689    return SDValue();
37690
37691  APInt AbsDiff = Diff.abs();
37692  if (AbsDiff.isPowerOf2() ||
37693      ((VT == MVT::i32 || VT == MVT::i64) &&
37694       (AbsDiff == 3 || AbsDiff == 5 || AbsDiff == 9))) {
37695
37696    // We need a positive multiplier constant for shift/LEA codegen. The 'not'
37697    // of the condition can usually be folded into a compare predicate, but even
37698    // without that, the sequence should be cheaper than a CMOV alternative.
37699    if (TrueVal.slt(FalseVal)) {
37700      Cond = DAG.getNOT(DL, Cond, MVT::i1);
37701      std::swap(TrueC, FalseC);
37702    }
37703
37704    // select Cond, TC, FC --> (zext(Cond) * (TC - FC)) + FC
37705    SDValue R = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond);
37706
37707    // Multiply condition by the difference if non-one.
37708    if (!AbsDiff.isOneValue())
37709      R = DAG.getNode(ISD::MUL, DL, VT, R, DAG.getConstant(AbsDiff, DL, VT));
37710
37711    // Add the base if non-zero.
37712    if (!FalseC->isNullValue())
37713      R = DAG.getNode(ISD::ADD, DL, VT, R, SDValue(FalseC, 0));
37714
37715    return R;
37716  }
37717
37718  return SDValue();
37719}
37720
37721/// If this is a *dynamic* select (non-constant condition) and we can match
37722/// this node with one of the variable blend instructions, restructure the
37723/// condition so that blends can use the high (sign) bit of each element.
37724/// This function will also call SimplifyDemandedBits on already created
37725/// BLENDV to perform additional simplifications.
37726static SDValue combineVSelectToBLENDV(SDNode *N, SelectionDAG &DAG,
37727                                           TargetLowering::DAGCombinerInfo &DCI,
37728                                           const X86Subtarget &Subtarget) {
37729  SDValue Cond = N->getOperand(0);
37730  if ((N->getOpcode() != ISD::VSELECT &&
37731       N->getOpcode() != X86ISD::BLENDV) ||
37732      ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
37733    return SDValue();
37734
37735  // Don't optimize before the condition has been transformed to a legal type
37736  // and don't ever optimize vector selects that map to AVX512 mask-registers.
37737  unsigned BitWidth = Cond.getScalarValueSizeInBits();
37738  if (BitWidth < 8 || BitWidth > 64)
37739    return SDValue();
37740
37741  // We can only handle the cases where VSELECT is directly legal on the
37742  // subtarget. We custom lower VSELECT nodes with constant conditions and
37743  // this makes it hard to see whether a dynamic VSELECT will correctly
37744  // lower, so we both check the operation's status and explicitly handle the
37745  // cases where a *dynamic* blend will fail even though a constant-condition
37746  // blend could be custom lowered.
37747  // FIXME: We should find a better way to handle this class of problems.
37748  // Potentially, we should combine constant-condition vselect nodes
37749  // pre-legalization into shuffles and not mark as many types as custom
37750  // lowered.
37751  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
37752  EVT VT = N->getValueType(0);
37753  if (!TLI.isOperationLegalOrCustom(ISD::VSELECT, VT))
37754    return SDValue();
37755  // FIXME: We don't support i16-element blends currently. We could and
37756  // should support them by making *all* the bits in the condition be set
37757  // rather than just the high bit and using an i8-element blend.
37758  if (VT.getVectorElementType() == MVT::i16)
37759    return SDValue();
37760  // Dynamic blending was only available from SSE4.1 onward.
37761  if (VT.is128BitVector() && !Subtarget.hasSSE41())
37762    return SDValue();
37763  // Byte blends are only available in AVX2
37764  if (VT == MVT::v32i8 && !Subtarget.hasAVX2())
37765    return SDValue();
37766  // There are no 512-bit blend instructions that use sign bits.
37767  if (VT.is512BitVector())
37768    return SDValue();
37769
37770  auto OnlyUsedAsSelectCond = [](SDValue Cond) {
37771    for (SDNode::use_iterator UI = Cond->use_begin(), UE = Cond->use_end();
37772         UI != UE; ++UI)
37773      if ((UI->getOpcode() != ISD::VSELECT &&
37774           UI->getOpcode() != X86ISD::BLENDV) ||
37775          UI.getOperandNo() != 0)
37776        return false;
37777
37778    return true;
37779  };
37780
37781  if (OnlyUsedAsSelectCond(Cond)) {
37782    APInt DemandedMask(APInt::getSignMask(BitWidth));
37783    KnownBits Known;
37784    TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
37785                                          !DCI.isBeforeLegalizeOps());
37786    if (!TLI.SimplifyDemandedBits(Cond, DemandedMask, Known, TLO, 0, true))
37787      return SDValue();
37788
37789    // If we changed the computation somewhere in the DAG, this change will
37790    // affect all users of Cond. Update all the nodes so that we do not use
37791    // the generic VSELECT anymore. Otherwise, we may perform wrong
37792    // optimizations as we messed with the actual expectation for the vector
37793    // boolean values.
37794    for (SDNode *U : Cond->uses()) {
37795      if (U->getOpcode() == X86ISD::BLENDV)
37796        continue;
37797
37798      SDValue SB = DAG.getNode(X86ISD::BLENDV, SDLoc(U), U->getValueType(0),
37799                               Cond, U->getOperand(1), U->getOperand(2));
37800      DAG.ReplaceAllUsesOfValueWith(SDValue(U, 0), SB);
37801      DCI.AddToWorklist(U);
37802    }
37803    DCI.CommitTargetLoweringOpt(TLO);
37804    return SDValue(N, 0);
37805  }
37806
37807  // Otherwise we can still at least try to simplify multiple use bits.
37808  APInt DemandedMask(APInt::getSignMask(BitWidth));
37809  APInt DemandedElts(APInt::getAllOnesValue(VT.getVectorNumElements()));
37810  KnownBits Known;
37811  TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
37812                                        !DCI.isBeforeLegalizeOps());
37813  if (SDValue V = TLI.SimplifyMultipleUseDemandedBits(Cond, DemandedMask,
37814                                                      DemandedElts, DAG, 0))
37815    return DAG.getNode(X86ISD::BLENDV, SDLoc(N), N->getValueType(0),
37816                       V, N->getOperand(1), N->getOperand(2));
37817
37818  return SDValue();
37819}
37820
37821// Try to match:
37822//   (or (and (M, (sub 0, X)), (pandn M, X)))
37823// which is a special case of:
37824//   (select M, (sub 0, X), X)
37825// Per:
37826// http://graphics.stanford.edu/~seander/bithacks.html#ConditionalNegate
37827// We know that, if fNegate is 0 or 1:
37828//   (fNegate ? -v : v) == ((v ^ -fNegate) + fNegate)
37829//
37830// Here, we have a mask, M (all 1s or 0), and, similarly, we know that:
37831//   ((M & 1) ? -X : X) == ((X ^ -(M & 1)) + (M & 1))
37832//   ( M      ? -X : X) == ((X ^   M     ) + (M & 1))
37833// This lets us transform our vselect to:
37834//   (add (xor X, M), (and M, 1))
37835// And further to:
37836//   (sub (xor X, M), M)
37837static SDValue combineLogicBlendIntoConditionalNegate(
37838    EVT VT, SDValue Mask, SDValue X, SDValue Y, const SDLoc &DL,
37839    SelectionDAG &DAG, const X86Subtarget &Subtarget) {
37840  EVT MaskVT = Mask.getValueType();
37841  assert(MaskVT.isInteger() &&
37842         DAG.ComputeNumSignBits(Mask) == MaskVT.getScalarSizeInBits() &&
37843         "Mask must be zero/all-bits");
37844
37845  if (X.getValueType() != MaskVT || Y.getValueType() != MaskVT)
37846    return SDValue();
37847  if (!DAG.getTargetLoweringInfo().isOperationLegal(ISD::SUB, MaskVT))
37848    return SDValue();
37849
37850  auto IsNegV = [](SDNode *N, SDValue V) {
37851    return N->getOpcode() == ISD::SUB && N->getOperand(1) == V &&
37852           ISD::isBuildVectorAllZeros(N->getOperand(0).getNode());
37853  };
37854
37855  SDValue V;
37856  if (IsNegV(Y.getNode(), X))
37857    V = X;
37858  else if (IsNegV(X.getNode(), Y))
37859    V = Y;
37860  else
37861    return SDValue();
37862
37863  SDValue SubOp1 = DAG.getNode(ISD::XOR, DL, MaskVT, V, Mask);
37864  SDValue SubOp2 = Mask;
37865
37866  // If the negate was on the false side of the select, then
37867  // the operands of the SUB need to be swapped. PR 27251.
37868  // This is because the pattern being matched above is
37869  // (vselect M, (sub (0, X), X)  -> (sub (xor X, M), M)
37870  // but if the pattern matched was
37871  // (vselect M, X, (sub (0, X))), that is really negation of the pattern
37872  // above, -(vselect M, (sub 0, X), X), and therefore the replacement
37873  // pattern also needs to be a negation of the replacement pattern above.
37874  // And -(sub X, Y) is just sub (Y, X), so swapping the operands of the
37875  // sub accomplishes the negation of the replacement pattern.
37876  if (V == Y)
37877    std::swap(SubOp1, SubOp2);
37878
37879  SDValue Res = DAG.getNode(ISD::SUB, DL, MaskVT, SubOp1, SubOp2);
37880  return DAG.getBitcast(VT, Res);
37881}
37882
37883/// Do target-specific dag combines on SELECT and VSELECT nodes.
37884static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
37885                             TargetLowering::DAGCombinerInfo &DCI,
37886                             const X86Subtarget &Subtarget) {
37887  SDLoc DL(N);
37888  SDValue Cond = N->getOperand(0);
37889  SDValue LHS = N->getOperand(1);
37890  SDValue RHS = N->getOperand(2);
37891
37892  // Try simplification again because we use this function to optimize
37893  // BLENDV nodes that are not handled by the generic combiner.
37894  if (SDValue V = DAG.simplifySelect(Cond, LHS, RHS))
37895    return V;
37896
37897  EVT VT = LHS.getValueType();
37898  EVT CondVT = Cond.getValueType();
37899  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
37900  bool CondConstantVector = ISD::isBuildVectorOfConstantSDNodes(Cond.getNode());
37901
37902  // Attempt to combine (select M, (sub 0, X), X) -> (sub (xor X, M), M).
37903  // Limit this to cases of non-constant masks that createShuffleMaskFromVSELECT
37904  // can't catch, plus vXi8 cases where we'd likely end up with BLENDV.
37905  if (CondVT.isVector() && CondVT.isInteger() &&
37906      CondVT.getScalarSizeInBits() == VT.getScalarSizeInBits() &&
37907      (!CondConstantVector || CondVT.getScalarType() == MVT::i8) &&
37908      DAG.ComputeNumSignBits(Cond) == CondVT.getScalarSizeInBits())
37909    if (SDValue V = combineLogicBlendIntoConditionalNegate(VT, Cond, RHS, LHS,
37910                                                           DL, DAG, Subtarget))
37911      return V;
37912
37913  // Convert vselects with constant condition into shuffles.
37914  if (CondConstantVector && DCI.isBeforeLegalizeOps()) {
37915    SmallVector<int, 64> Mask;
37916    if (createShuffleMaskFromVSELECT(Mask, Cond))
37917      return DAG.getVectorShuffle(VT, DL, LHS, RHS, Mask);
37918  }
37919
37920  // If we have SSE[12] support, try to form min/max nodes. SSE min/max
37921  // instructions match the semantics of the common C idiom x<y?x:y but not
37922  // x<=y?x:y, because of how they handle negative zero (which can be
37923  // ignored in unsafe-math mode).
37924  // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
37925  if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
37926      VT != MVT::f80 && VT != MVT::f128 &&
37927      (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
37928      (Subtarget.hasSSE2() ||
37929       (Subtarget.hasSSE1() && VT.getScalarType() == MVT::f32))) {
37930    ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
37931
37932    unsigned Opcode = 0;
37933    // Check for x CC y ? x : y.
37934    if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
37935        DAG.isEqualTo(RHS, Cond.getOperand(1))) {
37936      switch (CC) {
37937      default: break;
37938      case ISD::SETULT:
37939        // Converting this to a min would handle NaNs incorrectly, and swapping
37940        // the operands would cause it to handle comparisons between positive
37941        // and negative zero incorrectly.
37942        if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
37943          if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
37944              !(DAG.isKnownNeverZeroFloat(LHS) ||
37945                DAG.isKnownNeverZeroFloat(RHS)))
37946            break;
37947          std::swap(LHS, RHS);
37948        }
37949        Opcode = X86ISD::FMIN;
37950        break;
37951      case ISD::SETOLE:
37952        // Converting this to a min would handle comparisons between positive
37953        // and negative zero incorrectly.
37954        if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
37955            !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
37956          break;
37957        Opcode = X86ISD::FMIN;
37958        break;
37959      case ISD::SETULE:
37960        // Converting this to a min would handle both negative zeros and NaNs
37961        // incorrectly, but we can swap the operands to fix both.
37962        std::swap(LHS, RHS);
37963        LLVM_FALLTHROUGH;
37964      case ISD::SETOLT:
37965      case ISD::SETLT:
37966      case ISD::SETLE:
37967        Opcode = X86ISD::FMIN;
37968        break;
37969
37970      case ISD::SETOGE:
37971        // Converting this to a max would handle comparisons between positive
37972        // and negative zero incorrectly.
37973        if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
37974            !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
37975          break;
37976        Opcode = X86ISD::FMAX;
37977        break;
37978      case ISD::SETUGT:
37979        // Converting this to a max would handle NaNs incorrectly, and swapping
37980        // the operands would cause it to handle comparisons between positive
37981        // and negative zero incorrectly.
37982        if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
37983          if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
37984              !(DAG.isKnownNeverZeroFloat(LHS) ||
37985                DAG.isKnownNeverZeroFloat(RHS)))
37986            break;
37987          std::swap(LHS, RHS);
37988        }
37989        Opcode = X86ISD::FMAX;
37990        break;
37991      case ISD::SETUGE:
37992        // Converting this to a max would handle both negative zeros and NaNs
37993        // incorrectly, but we can swap the operands to fix both.
37994        std::swap(LHS, RHS);
37995        LLVM_FALLTHROUGH;
37996      case ISD::SETOGT:
37997      case ISD::SETGT:
37998      case ISD::SETGE:
37999        Opcode = X86ISD::FMAX;
38000        break;
38001      }
38002    // Check for x CC y ? y : x -- a min/max with reversed arms.
38003    } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
38004               DAG.isEqualTo(RHS, Cond.getOperand(0))) {
38005      switch (CC) {
38006      default: break;
38007      case ISD::SETOGE:
38008        // Converting this to a min would handle comparisons between positive
38009        // and negative zero incorrectly, and swapping the operands would
38010        // cause it to handle NaNs incorrectly.
38011        if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
38012            !(DAG.isKnownNeverZeroFloat(LHS) ||
38013              DAG.isKnownNeverZeroFloat(RHS))) {
38014          if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
38015            break;
38016          std::swap(LHS, RHS);
38017        }
38018        Opcode = X86ISD::FMIN;
38019        break;
38020      case ISD::SETUGT:
38021        // Converting this to a min would handle NaNs incorrectly.
38022        if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
38023          break;
38024        Opcode = X86ISD::FMIN;
38025        break;
38026      case ISD::SETUGE:
38027        // Converting this to a min would handle both negative zeros and NaNs
38028        // incorrectly, but we can swap the operands to fix both.
38029        std::swap(LHS, RHS);
38030        LLVM_FALLTHROUGH;
38031      case ISD::SETOGT:
38032      case ISD::SETGT:
38033      case ISD::SETGE:
38034        Opcode = X86ISD::FMIN;
38035        break;
38036
38037      case ISD::SETULT:
38038        // Converting this to a max would handle NaNs incorrectly.
38039        if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
38040          break;
38041        Opcode = X86ISD::FMAX;
38042        break;
38043      case ISD::SETOLE:
38044        // Converting this to a max would handle comparisons between positive
38045        // and negative zero incorrectly, and swapping the operands would
38046        // cause it to handle NaNs incorrectly.
38047        if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
38048            !DAG.isKnownNeverZeroFloat(LHS) &&
38049            !DAG.isKnownNeverZeroFloat(RHS)) {
38050          if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
38051            break;
38052          std::swap(LHS, RHS);
38053        }
38054        Opcode = X86ISD::FMAX;
38055        break;
38056      case ISD::SETULE:
38057        // Converting this to a max would handle both negative zeros and NaNs
38058        // incorrectly, but we can swap the operands to fix both.
38059        std::swap(LHS, RHS);
38060        LLVM_FALLTHROUGH;
38061      case ISD::SETOLT:
38062      case ISD::SETLT:
38063      case ISD::SETLE:
38064        Opcode = X86ISD::FMAX;
38065        break;
38066      }
38067    }
38068
38069    if (Opcode)
38070      return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
38071  }
38072
38073  // Some mask scalar intrinsics rely on checking if only one bit is set
38074  // and implement it in C code like this:
38075  // A[0] = (U & 1) ? A[0] : W[0];
38076  // This creates some redundant instructions that break pattern matching.
38077  // fold (select (setcc (and (X, 1), 0, seteq), Y, Z)) -> select(and(X, 1),Z,Y)
38078  if (Subtarget.hasAVX512() && N->getOpcode() == ISD::SELECT &&
38079      Cond.getOpcode() == ISD::SETCC && (VT == MVT::f32 || VT == MVT::f64)) {
38080    ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
38081    SDValue AndNode = Cond.getOperand(0);
38082    if (AndNode.getOpcode() == ISD::AND && CC == ISD::SETEQ &&
38083        isNullConstant(Cond.getOperand(1)) &&
38084        isOneConstant(AndNode.getOperand(1))) {
38085      // LHS and RHS swapped due to
38086      // setcc outputting 1 when AND resulted in 0 and vice versa.
38087      AndNode = DAG.getZExtOrTrunc(AndNode, DL, MVT::i8);
38088      return DAG.getNode(ISD::SELECT, DL, VT, AndNode, RHS, LHS);
38089    }
38090  }
38091
38092  // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
38093  // lowering on KNL. In this case we convert it to
38094  // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
38095  // The same situation all vectors of i8 and i16 without BWI.
38096  // Make sure we extend these even before type legalization gets a chance to
38097  // split wide vectors.
38098  // Since SKX these selects have a proper lowering.
38099  if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && CondVT.isVector() &&
38100      CondVT.getVectorElementType() == MVT::i1 &&
38101      (VT.getVectorElementType() == MVT::i8 ||
38102       VT.getVectorElementType() == MVT::i16)) {
38103    Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Cond);
38104    return DAG.getNode(N->getOpcode(), DL, VT, Cond, LHS, RHS);
38105  }
38106
38107  // AVX512 - Extend select with zero to merge with target shuffle.
38108  // select(mask, extract_subvector(shuffle(x)), zero) -->
38109  // extract_subvector(select(insert_subvector(mask), shuffle(x), zero))
38110  // TODO - support non target shuffles as well.
38111  if (Subtarget.hasAVX512() && CondVT.isVector() &&
38112      CondVT.getVectorElementType() == MVT::i1) {
38113    auto SelectableOp = [&TLI](SDValue Op) {
38114      return Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
38115             isTargetShuffle(Op.getOperand(0).getOpcode()) &&
38116             isNullConstant(Op.getOperand(1)) &&
38117             TLI.isTypeLegal(Op.getOperand(0).getValueType()) &&
38118             Op.hasOneUse() && Op.getOperand(0).hasOneUse();
38119    };
38120
38121    bool SelectableLHS = SelectableOp(LHS);
38122    bool SelectableRHS = SelectableOp(RHS);
38123    bool ZeroLHS = ISD::isBuildVectorAllZeros(LHS.getNode());
38124    bool ZeroRHS = ISD::isBuildVectorAllZeros(RHS.getNode());
38125
38126    if ((SelectableLHS && ZeroRHS) || (SelectableRHS && ZeroLHS)) {
38127      EVT SrcVT = SelectableLHS ? LHS.getOperand(0).getValueType()
38128                                : RHS.getOperand(0).getValueType();
38129      unsigned NumSrcElts = SrcVT.getVectorNumElements();
38130      EVT SrcCondVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumSrcElts);
38131      LHS = insertSubVector(DAG.getUNDEF(SrcVT), LHS, 0, DAG, DL,
38132                            VT.getSizeInBits());
38133      RHS = insertSubVector(DAG.getUNDEF(SrcVT), RHS, 0, DAG, DL,
38134                            VT.getSizeInBits());
38135      Cond = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SrcCondVT,
38136                         DAG.getUNDEF(SrcCondVT), Cond,
38137                         DAG.getIntPtrConstant(0, DL));
38138      SDValue Res = DAG.getSelect(DL, SrcVT, Cond, LHS, RHS);
38139      return extractSubVector(Res, 0, DAG, DL, VT.getSizeInBits());
38140    }
38141  }
38142
38143  if (SDValue V = combineSelectOfTwoConstants(N, DAG))
38144    return V;
38145
38146  // Canonicalize max and min:
38147  // (x > y) ? x : y -> (x >= y) ? x : y
38148  // (x < y) ? x : y -> (x <= y) ? x : y
38149  // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
38150  // the need for an extra compare
38151  // against zero. e.g.
38152  // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
38153  // subl   %esi, %edi
38154  // testl  %edi, %edi
38155  // movl   $0, %eax
38156  // cmovgl %edi, %eax
38157  // =>
38158  // xorl   %eax, %eax
38159  // subl   %esi, $edi
38160  // cmovsl %eax, %edi
38161  if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
38162      Cond.hasOneUse() &&
38163      DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
38164      DAG.isEqualTo(RHS, Cond.getOperand(1))) {
38165    ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
38166    switch (CC) {
38167    default: break;
38168    case ISD::SETLT:
38169    case ISD::SETGT: {
38170      ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
38171      Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
38172                          Cond.getOperand(0), Cond.getOperand(1), NewCC);
38173      return DAG.getSelect(DL, VT, Cond, LHS, RHS);
38174    }
38175    }
38176  }
38177
38178  // Match VSELECTs into subs with unsigned saturation.
38179  if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
38180      // psubus is available in SSE2 for i8 and i16 vectors.
38181      Subtarget.hasSSE2() && VT.getVectorNumElements() >= 2 &&
38182      isPowerOf2_32(VT.getVectorNumElements()) &&
38183      (VT.getVectorElementType() == MVT::i8 ||
38184       VT.getVectorElementType() == MVT::i16)) {
38185    ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
38186
38187    // Check if one of the arms of the VSELECT is a zero vector. If it's on the
38188    // left side invert the predicate to simplify logic below.
38189    SDValue Other;
38190    if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
38191      Other = RHS;
38192      CC = ISD::getSetCCInverse(CC, VT.getVectorElementType());
38193    } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
38194      Other = LHS;
38195    }
38196
38197    if (Other.getNode() && Other->getNumOperands() == 2 &&
38198        Other->getOperand(0) == Cond.getOperand(0)) {
38199      SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
38200      SDValue CondRHS = Cond->getOperand(1);
38201
38202      // Look for a general sub with unsigned saturation first.
38203      // x >= y ? x-y : 0 --> subus x, y
38204      // x >  y ? x-y : 0 --> subus x, y
38205      if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
38206          Other->getOpcode() == ISD::SUB && OpRHS == CondRHS)
38207        return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
38208
38209      if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS)) {
38210        if (isa<BuildVectorSDNode>(CondRHS)) {
38211          // If the RHS is a constant we have to reverse the const
38212          // canonicalization.
38213          // x > C-1 ? x+-C : 0 --> subus x, C
38214          auto MatchUSUBSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
38215            return (!Op && !Cond) ||
38216                   (Op && Cond &&
38217                    Cond->getAPIntValue() == (-Op->getAPIntValue() - 1));
38218          };
38219          if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
38220              ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUSUBSAT,
38221                                        /*AllowUndefs*/ true)) {
38222            OpRHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
38223                                OpRHS);
38224            return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
38225          }
38226
38227          // Another special case: If C was a sign bit, the sub has been
38228          // canonicalized into a xor.
38229          // FIXME: Would it be better to use computeKnownBits to determine
38230          //        whether it's safe to decanonicalize the xor?
38231          // x s< 0 ? x^C : 0 --> subus x, C
38232          if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
38233            if (CC == ISD::SETLT && Other.getOpcode() == ISD::XOR &&
38234                ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
38235                OpRHSConst->getAPIntValue().isSignMask()) {
38236              // Note that we have to rebuild the RHS constant here to ensure we
38237              // don't rely on particular values of undef lanes.
38238              OpRHS = DAG.getConstant(OpRHSConst->getAPIntValue(), DL, VT);
38239              return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
38240            }
38241          }
38242        }
38243      }
38244    }
38245  }
38246
38247  // Match VSELECTs into add with unsigned saturation.
38248  if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
38249      // paddus is available in SSE2 for i8 and i16 vectors.
38250      Subtarget.hasSSE2() && VT.getVectorNumElements() >= 2 &&
38251      isPowerOf2_32(VT.getVectorNumElements()) &&
38252      (VT.getVectorElementType() == MVT::i8 ||
38253       VT.getVectorElementType() == MVT::i16)) {
38254    ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
38255
38256    SDValue CondLHS = Cond->getOperand(0);
38257    SDValue CondRHS = Cond->getOperand(1);
38258
38259    // Check if one of the arms of the VSELECT is vector with all bits set.
38260    // If it's on the left side invert the predicate to simplify logic below.
38261    SDValue Other;
38262    if (ISD::isBuildVectorAllOnes(LHS.getNode())) {
38263      Other = RHS;
38264      CC = ISD::getSetCCInverse(CC, VT.getVectorElementType());
38265    } else if (ISD::isBuildVectorAllOnes(RHS.getNode())) {
38266      Other = LHS;
38267    }
38268
38269    if (Other.getNode() && Other.getOpcode() == ISD::ADD) {
38270      SDValue OpLHS = Other.getOperand(0), OpRHS = Other.getOperand(1);
38271
38272      // Canonicalize condition operands.
38273      if (CC == ISD::SETUGE) {
38274        std::swap(CondLHS, CondRHS);
38275        CC = ISD::SETULE;
38276      }
38277
38278      // We can test against either of the addition operands.
38279      // x <= x+y ? x+y : ~0 --> addus x, y
38280      // x+y >= x ? x+y : ~0 --> addus x, y
38281      if (CC == ISD::SETULE && Other == CondRHS &&
38282          (OpLHS == CondLHS || OpRHS == CondLHS))
38283        return DAG.getNode(ISD::UADDSAT, DL, VT, OpLHS, OpRHS);
38284
38285      if (isa<BuildVectorSDNode>(OpRHS) && isa<BuildVectorSDNode>(CondRHS) &&
38286          CondLHS == OpLHS) {
38287        // If the RHS is a constant we have to reverse the const
38288        // canonicalization.
38289        // x > ~C ? x+C : ~0 --> addus x, C
38290        auto MatchUADDSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
38291          return Cond->getAPIntValue() == ~Op->getAPIntValue();
38292        };
38293        if (CC == ISD::SETULE &&
38294            ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUADDSAT))
38295          return DAG.getNode(ISD::UADDSAT, DL, VT, OpLHS, OpRHS);
38296      }
38297    }
38298  }
38299
38300  // Early exit check
38301  if (!TLI.isTypeLegal(VT))
38302    return SDValue();
38303
38304  if (SDValue V = combineVSelectWithAllOnesOrZeros(N, DAG, DCI, Subtarget))
38305    return V;
38306
38307  if (SDValue V = combineVSelectToBLENDV(N, DAG, DCI, Subtarget))
38308    return V;
38309
38310  if (SDValue V = narrowVectorSelect(N, DAG, Subtarget))
38311    return V;
38312
38313  // select(~Cond, X, Y) -> select(Cond, Y, X)
38314  if (CondVT.getScalarType() != MVT::i1)
38315    if (SDValue CondNot = IsNOT(Cond, DAG))
38316      return DAG.getNode(N->getOpcode(), DL, VT,
38317                         DAG.getBitcast(CondVT, CondNot), RHS, LHS);
38318
38319  // Custom action for SELECT MMX
38320  if (VT == MVT::x86mmx) {
38321    LHS = DAG.getBitcast(MVT::i64, LHS);
38322    RHS = DAG.getBitcast(MVT::i64, RHS);
38323    SDValue newSelect = DAG.getNode(ISD::SELECT, DL, MVT::i64, Cond, LHS, RHS);
38324    return DAG.getBitcast(VT, newSelect);
38325  }
38326
38327  return SDValue();
38328}
38329
38330/// Combine:
38331///   (brcond/cmov/setcc .., (cmp (atomic_load_add x, 1), 0), COND_S)
38332/// to:
38333///   (brcond/cmov/setcc .., (LADD x, 1), COND_LE)
38334/// i.e., reusing the EFLAGS produced by the LOCKed instruction.
38335/// Note that this is only legal for some op/cc combinations.
38336static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
38337                                       SelectionDAG &DAG,
38338                                       const X86Subtarget &Subtarget) {
38339  // This combine only operates on CMP-like nodes.
38340  if (!(Cmp.getOpcode() == X86ISD::CMP ||
38341        (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
38342    return SDValue();
38343
38344  // Can't replace the cmp if it has more uses than the one we're looking at.
38345  // FIXME: We would like to be able to handle this, but would need to make sure
38346  // all uses were updated.
38347  if (!Cmp.hasOneUse())
38348    return SDValue();
38349
38350  // This only applies to variations of the common case:
38351  //   (icmp slt x, 0) -> (icmp sle (add x, 1), 0)
38352  //   (icmp sge x, 0) -> (icmp sgt (add x, 1), 0)
38353  //   (icmp sle x, 0) -> (icmp slt (sub x, 1), 0)
38354  //   (icmp sgt x, 0) -> (icmp sge (sub x, 1), 0)
38355  // Using the proper condcodes (see below), overflow is checked for.
38356
38357  // FIXME: We can generalize both constraints:
38358  // - XOR/OR/AND (if they were made to survive AtomicExpand)
38359  // - LHS != 1
38360  // if the result is compared.
38361
38362  SDValue CmpLHS = Cmp.getOperand(0);
38363  SDValue CmpRHS = Cmp.getOperand(1);
38364
38365  if (!CmpLHS.hasOneUse())
38366    return SDValue();
38367
38368  unsigned Opc = CmpLHS.getOpcode();
38369  if (Opc != ISD::ATOMIC_LOAD_ADD && Opc != ISD::ATOMIC_LOAD_SUB)
38370    return SDValue();
38371
38372  SDValue OpRHS = CmpLHS.getOperand(2);
38373  auto *OpRHSC = dyn_cast<ConstantSDNode>(OpRHS);
38374  if (!OpRHSC)
38375    return SDValue();
38376
38377  APInt Addend = OpRHSC->getAPIntValue();
38378  if (Opc == ISD::ATOMIC_LOAD_SUB)
38379    Addend = -Addend;
38380
38381  auto *CmpRHSC = dyn_cast<ConstantSDNode>(CmpRHS);
38382  if (!CmpRHSC)
38383    return SDValue();
38384
38385  APInt Comparison = CmpRHSC->getAPIntValue();
38386
38387  // If the addend is the negation of the comparison value, then we can do
38388  // a full comparison by emitting the atomic arithmetic as a locked sub.
38389  if (Comparison == -Addend) {
38390    // The CC is fine, but we need to rewrite the LHS of the comparison as an
38391    // atomic sub.
38392    auto *AN = cast<AtomicSDNode>(CmpLHS.getNode());
38393    auto AtomicSub = DAG.getAtomic(
38394        ISD::ATOMIC_LOAD_SUB, SDLoc(CmpLHS), CmpLHS.getValueType(),
38395        /*Chain*/ CmpLHS.getOperand(0), /*LHS*/ CmpLHS.getOperand(1),
38396        /*RHS*/ DAG.getConstant(-Addend, SDLoc(CmpRHS), CmpRHS.getValueType()),
38397        AN->getMemOperand());
38398    auto LockOp = lowerAtomicArithWithLOCK(AtomicSub, DAG, Subtarget);
38399    DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),
38400                                  DAG.getUNDEF(CmpLHS.getValueType()));
38401    DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
38402    return LockOp;
38403  }
38404
38405  // We can handle comparisons with zero in a number of cases by manipulating
38406  // the CC used.
38407  if (!Comparison.isNullValue())
38408    return SDValue();
38409
38410  if (CC == X86::COND_S && Addend == 1)
38411    CC = X86::COND_LE;
38412  else if (CC == X86::COND_NS && Addend == 1)
38413    CC = X86::COND_G;
38414  else if (CC == X86::COND_G && Addend == -1)
38415    CC = X86::COND_GE;
38416  else if (CC == X86::COND_LE && Addend == -1)
38417    CC = X86::COND_L;
38418  else
38419    return SDValue();
38420
38421  SDValue LockOp = lowerAtomicArithWithLOCK(CmpLHS, DAG, Subtarget);
38422  DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),
38423                                DAG.getUNDEF(CmpLHS.getValueType()));
38424  DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
38425  return LockOp;
38426}
38427
38428// Check whether a boolean test is testing a boolean value generated by
38429// X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
38430// code.
38431//
38432// Simplify the following patterns:
38433// (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
38434// (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
38435// to (Op EFLAGS Cond)
38436//
38437// (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
38438// (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
38439// to (Op EFLAGS !Cond)
38440//
38441// where Op could be BRCOND or CMOV.
38442//
38443static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
38444  // This combine only operates on CMP-like nodes.
38445  if (!(Cmp.getOpcode() == X86ISD::CMP ||
38446        (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
38447    return SDValue();
38448
38449  // Quit if not used as a boolean value.
38450  if (CC != X86::COND_E && CC != X86::COND_NE)
38451    return SDValue();
38452
38453  // Check CMP operands. One of them should be 0 or 1 and the other should be
38454  // an SetCC or extended from it.
38455  SDValue Op1 = Cmp.getOperand(0);
38456  SDValue Op2 = Cmp.getOperand(1);
38457
38458  SDValue SetCC;
38459  const ConstantSDNode* C = nullptr;
38460  bool needOppositeCond = (CC == X86::COND_E);
38461  bool checkAgainstTrue = false; // Is it a comparison against 1?
38462
38463  if ((C = dyn_cast<ConstantSDNode>(Op1)))
38464    SetCC = Op2;
38465  else if ((C = dyn_cast<ConstantSDNode>(Op2)))
38466    SetCC = Op1;
38467  else // Quit if all operands are not constants.
38468    return SDValue();
38469
38470  if (C->getZExtValue() == 1) {
38471    needOppositeCond = !needOppositeCond;
38472    checkAgainstTrue = true;
38473  } else if (C->getZExtValue() != 0)
38474    // Quit if the constant is neither 0 or 1.
38475    return SDValue();
38476
38477  bool truncatedToBoolWithAnd = false;
38478  // Skip (zext $x), (trunc $x), or (and $x, 1) node.
38479  while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
38480         SetCC.getOpcode() == ISD::TRUNCATE ||
38481         SetCC.getOpcode() == ISD::AND) {
38482    if (SetCC.getOpcode() == ISD::AND) {
38483      int OpIdx = -1;
38484      if (isOneConstant(SetCC.getOperand(0)))
38485        OpIdx = 1;
38486      if (isOneConstant(SetCC.getOperand(1)))
38487        OpIdx = 0;
38488      if (OpIdx < 0)
38489        break;
38490      SetCC = SetCC.getOperand(OpIdx);
38491      truncatedToBoolWithAnd = true;
38492    } else
38493      SetCC = SetCC.getOperand(0);
38494  }
38495
38496  switch (SetCC.getOpcode()) {
38497  case X86ISD::SETCC_CARRY:
38498    // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
38499    // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
38500    // i.e. it's a comparison against true but the result of SETCC_CARRY is not
38501    // truncated to i1 using 'and'.
38502    if (checkAgainstTrue && !truncatedToBoolWithAnd)
38503      break;
38504    assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
38505           "Invalid use of SETCC_CARRY!");
38506    LLVM_FALLTHROUGH;
38507  case X86ISD::SETCC:
38508    // Set the condition code or opposite one if necessary.
38509    CC = X86::CondCode(SetCC.getConstantOperandVal(0));
38510    if (needOppositeCond)
38511      CC = X86::GetOppositeBranchCondition(CC);
38512    return SetCC.getOperand(1);
38513  case X86ISD::CMOV: {
38514    // Check whether false/true value has canonical one, i.e. 0 or 1.
38515    ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
38516    ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
38517    // Quit if true value is not a constant.
38518    if (!TVal)
38519      return SDValue();
38520    // Quit if false value is not a constant.
38521    if (!FVal) {
38522      SDValue Op = SetCC.getOperand(0);
38523      // Skip 'zext' or 'trunc' node.
38524      if (Op.getOpcode() == ISD::ZERO_EXTEND ||
38525          Op.getOpcode() == ISD::TRUNCATE)
38526        Op = Op.getOperand(0);
38527      // A special case for rdrand/rdseed, where 0 is set if false cond is
38528      // found.
38529      if ((Op.getOpcode() != X86ISD::RDRAND &&
38530           Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
38531        return SDValue();
38532    }
38533    // Quit if false value is not the constant 0 or 1.
38534    bool FValIsFalse = true;
38535    if (FVal && FVal->getZExtValue() != 0) {
38536      if (FVal->getZExtValue() != 1)
38537        return SDValue();
38538      // If FVal is 1, opposite cond is needed.
38539      needOppositeCond = !needOppositeCond;
38540      FValIsFalse = false;
38541    }
38542    // Quit if TVal is not the constant opposite of FVal.
38543    if (FValIsFalse && TVal->getZExtValue() != 1)
38544      return SDValue();
38545    if (!FValIsFalse && TVal->getZExtValue() != 0)
38546      return SDValue();
38547    CC = X86::CondCode(SetCC.getConstantOperandVal(2));
38548    if (needOppositeCond)
38549      CC = X86::GetOppositeBranchCondition(CC);
38550    return SetCC.getOperand(3);
38551  }
38552  }
38553
38554  return SDValue();
38555}
38556
38557/// Check whether Cond is an AND/OR of SETCCs off of the same EFLAGS.
38558/// Match:
38559///   (X86or (X86setcc) (X86setcc))
38560///   (X86cmp (and (X86setcc) (X86setcc)), 0)
38561static bool checkBoolTestAndOrSetCCCombine(SDValue Cond, X86::CondCode &CC0,
38562                                           X86::CondCode &CC1, SDValue &Flags,
38563                                           bool &isAnd) {
38564  if (Cond->getOpcode() == X86ISD::CMP) {
38565    if (!isNullConstant(Cond->getOperand(1)))
38566      return false;
38567
38568    Cond = Cond->getOperand(0);
38569  }
38570
38571  isAnd = false;
38572
38573  SDValue SetCC0, SetCC1;
38574  switch (Cond->getOpcode()) {
38575  default: return false;
38576  case ISD::AND:
38577  case X86ISD::AND:
38578    isAnd = true;
38579    LLVM_FALLTHROUGH;
38580  case ISD::OR:
38581  case X86ISD::OR:
38582    SetCC0 = Cond->getOperand(0);
38583    SetCC1 = Cond->getOperand(1);
38584    break;
38585  };
38586
38587  // Make sure we have SETCC nodes, using the same flags value.
38588  if (SetCC0.getOpcode() != X86ISD::SETCC ||
38589      SetCC1.getOpcode() != X86ISD::SETCC ||
38590      SetCC0->getOperand(1) != SetCC1->getOperand(1))
38591    return false;
38592
38593  CC0 = (X86::CondCode)SetCC0->getConstantOperandVal(0);
38594  CC1 = (X86::CondCode)SetCC1->getConstantOperandVal(0);
38595  Flags = SetCC0->getOperand(1);
38596  return true;
38597}
38598
38599// When legalizing carry, we create carries via add X, -1
38600// If that comes from an actual carry, via setcc, we use the
38601// carry directly.
38602static SDValue combineCarryThroughADD(SDValue EFLAGS, SelectionDAG &DAG) {
38603  if (EFLAGS.getOpcode() == X86ISD::ADD) {
38604    if (isAllOnesConstant(EFLAGS.getOperand(1))) {
38605      SDValue Carry = EFLAGS.getOperand(0);
38606      while (Carry.getOpcode() == ISD::TRUNCATE ||
38607             Carry.getOpcode() == ISD::ZERO_EXTEND ||
38608             Carry.getOpcode() == ISD::SIGN_EXTEND ||
38609             Carry.getOpcode() == ISD::ANY_EXTEND ||
38610             (Carry.getOpcode() == ISD::AND &&
38611              isOneConstant(Carry.getOperand(1))))
38612        Carry = Carry.getOperand(0);
38613      if (Carry.getOpcode() == X86ISD::SETCC ||
38614          Carry.getOpcode() == X86ISD::SETCC_CARRY) {
38615        // TODO: Merge this code with equivalent in combineAddOrSubToADCOrSBB?
38616        uint64_t CarryCC = Carry.getConstantOperandVal(0);
38617        SDValue CarryOp1 = Carry.getOperand(1);
38618        if (CarryCC == X86::COND_B)
38619          return CarryOp1;
38620        if (CarryCC == X86::COND_A) {
38621          // Try to convert COND_A into COND_B in an attempt to facilitate
38622          // materializing "setb reg".
38623          //
38624          // Do not flip "e > c", where "c" is a constant, because Cmp
38625          // instruction cannot take an immediate as its first operand.
38626          //
38627          if (CarryOp1.getOpcode() == X86ISD::SUB &&
38628              CarryOp1.getNode()->hasOneUse() &&
38629              CarryOp1.getValueType().isInteger() &&
38630              !isa<ConstantSDNode>(CarryOp1.getOperand(1))) {
38631            SDValue SubCommute =
38632                DAG.getNode(X86ISD::SUB, SDLoc(CarryOp1), CarryOp1->getVTList(),
38633                            CarryOp1.getOperand(1), CarryOp1.getOperand(0));
38634            return SDValue(SubCommute.getNode(), CarryOp1.getResNo());
38635          }
38636        }
38637        // If this is a check of the z flag of an add with 1, switch to the
38638        // C flag.
38639        if (CarryCC == X86::COND_E &&
38640            CarryOp1.getOpcode() == X86ISD::ADD &&
38641            isOneConstant(CarryOp1.getOperand(1)))
38642          return CarryOp1;
38643      }
38644    }
38645  }
38646
38647  return SDValue();
38648}
38649
38650/// Optimize an EFLAGS definition used according to the condition code \p CC
38651/// into a simpler EFLAGS value, potentially returning a new \p CC and replacing
38652/// uses of chain values.
38653static SDValue combineSetCCEFLAGS(SDValue EFLAGS, X86::CondCode &CC,
38654                                  SelectionDAG &DAG,
38655                                  const X86Subtarget &Subtarget) {
38656  if (CC == X86::COND_B)
38657    if (SDValue Flags = combineCarryThroughADD(EFLAGS, DAG))
38658      return Flags;
38659
38660  if (SDValue R = checkBoolTestSetCCCombine(EFLAGS, CC))
38661    return R;
38662  return combineSetCCAtomicArith(EFLAGS, CC, DAG, Subtarget);
38663}
38664
38665/// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
38666static SDValue combineCMov(SDNode *N, SelectionDAG &DAG,
38667                           TargetLowering::DAGCombinerInfo &DCI,
38668                           const X86Subtarget &Subtarget) {
38669  SDLoc DL(N);
38670
38671  SDValue FalseOp = N->getOperand(0);
38672  SDValue TrueOp = N->getOperand(1);
38673  X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
38674  SDValue Cond = N->getOperand(3);
38675
38676  // cmov X, X, ?, ? --> X
38677  if (TrueOp == FalseOp)
38678    return TrueOp;
38679
38680  // Try to simplify the EFLAGS and condition code operands.
38681  // We can't always do this as FCMOV only supports a subset of X86 cond.
38682  if (SDValue Flags = combineSetCCEFLAGS(Cond, CC, DAG, Subtarget)) {
38683    if (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC)) {
38684      SDValue Ops[] = {FalseOp, TrueOp, DAG.getTargetConstant(CC, DL, MVT::i8),
38685                       Flags};
38686      return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
38687    }
38688  }
38689
38690  // If this is a select between two integer constants, try to do some
38691  // optimizations.  Note that the operands are ordered the opposite of SELECT
38692  // operands.
38693  if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
38694    if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
38695      // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
38696      // larger than FalseC (the false value).
38697      if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
38698        CC = X86::GetOppositeBranchCondition(CC);
38699        std::swap(TrueC, FalseC);
38700        std::swap(TrueOp, FalseOp);
38701      }
38702
38703      // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3.  Likewise for any pow2/0.
38704      // This is efficient for any integer data type (including i8/i16) and
38705      // shift amount.
38706      if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
38707        Cond = getSETCC(CC, Cond, DL, DAG);
38708
38709        // Zero extend the condition if needed.
38710        Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
38711
38712        unsigned ShAmt = TrueC->getAPIntValue().logBase2();
38713        Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
38714                           DAG.getConstant(ShAmt, DL, MVT::i8));
38715        return Cond;
38716      }
38717
38718      // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst.  This is efficient
38719      // for any integer data type, including i8/i16.
38720      if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
38721        Cond = getSETCC(CC, Cond, DL, DAG);
38722
38723        // Zero extend the condition if needed.
38724        Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
38725                           FalseC->getValueType(0), Cond);
38726        Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
38727                           SDValue(FalseC, 0));
38728        return Cond;
38729      }
38730
38731      // Optimize cases that will turn into an LEA instruction.  This requires
38732      // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
38733      if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
38734        APInt Diff = TrueC->getAPIntValue() - FalseC->getAPIntValue();
38735        assert(Diff.getBitWidth() == N->getValueType(0).getSizeInBits() &&
38736               "Implicit constant truncation");
38737
38738        bool isFastMultiplier = false;
38739        if (Diff.ult(10)) {
38740          switch (Diff.getZExtValue()) {
38741          default: break;
38742          case 1:  // result = add base, cond
38743          case 2:  // result = lea base(    , cond*2)
38744          case 3:  // result = lea base(cond, cond*2)
38745          case 4:  // result = lea base(    , cond*4)
38746          case 5:  // result = lea base(cond, cond*4)
38747          case 8:  // result = lea base(    , cond*8)
38748          case 9:  // result = lea base(cond, cond*8)
38749            isFastMultiplier = true;
38750            break;
38751          }
38752        }
38753
38754        if (isFastMultiplier) {
38755          Cond = getSETCC(CC, Cond, DL ,DAG);
38756          // Zero extend the condition if needed.
38757          Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
38758                             Cond);
38759          // Scale the condition by the difference.
38760          if (Diff != 1)
38761            Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
38762                               DAG.getConstant(Diff, DL, Cond.getValueType()));
38763
38764          // Add the base if non-zero.
38765          if (FalseC->getAPIntValue() != 0)
38766            Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
38767                               SDValue(FalseC, 0));
38768          return Cond;
38769        }
38770      }
38771    }
38772  }
38773
38774  // Handle these cases:
38775  //   (select (x != c), e, c) -> select (x != c), e, x),
38776  //   (select (x == c), c, e) -> select (x == c), x, e)
38777  // where the c is an integer constant, and the "select" is the combination
38778  // of CMOV and CMP.
38779  //
38780  // The rationale for this change is that the conditional-move from a constant
38781  // needs two instructions, however, conditional-move from a register needs
38782  // only one instruction.
38783  //
38784  // CAVEAT: By replacing a constant with a symbolic value, it may obscure
38785  //  some instruction-combining opportunities. This opt needs to be
38786  //  postponed as late as possible.
38787  //
38788  if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
38789    // the DCI.xxxx conditions are provided to postpone the optimization as
38790    // late as possible.
38791
38792    ConstantSDNode *CmpAgainst = nullptr;
38793    if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
38794        (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
38795        !isa<ConstantSDNode>(Cond.getOperand(0))) {
38796
38797      if (CC == X86::COND_NE &&
38798          CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
38799        CC = X86::GetOppositeBranchCondition(CC);
38800        std::swap(TrueOp, FalseOp);
38801      }
38802
38803      if (CC == X86::COND_E &&
38804          CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
38805        SDValue Ops[] = {FalseOp, Cond.getOperand(0),
38806                         DAG.getTargetConstant(CC, DL, MVT::i8), Cond};
38807        return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
38808      }
38809    }
38810  }
38811
38812  // Fold and/or of setcc's to double CMOV:
38813  //   (CMOV F, T, ((cc1 | cc2) != 0)) -> (CMOV (CMOV F, T, cc1), T, cc2)
38814  //   (CMOV F, T, ((cc1 & cc2) != 0)) -> (CMOV (CMOV T, F, !cc1), F, !cc2)
38815  //
38816  // This combine lets us generate:
38817  //   cmovcc1 (jcc1 if we don't have CMOV)
38818  //   cmovcc2 (same)
38819  // instead of:
38820  //   setcc1
38821  //   setcc2
38822  //   and/or
38823  //   cmovne (jne if we don't have CMOV)
38824  // When we can't use the CMOV instruction, it might increase branch
38825  // mispredicts.
38826  // When we can use CMOV, or when there is no mispredict, this improves
38827  // throughput and reduces register pressure.
38828  //
38829  if (CC == X86::COND_NE) {
38830    SDValue Flags;
38831    X86::CondCode CC0, CC1;
38832    bool isAndSetCC;
38833    if (checkBoolTestAndOrSetCCCombine(Cond, CC0, CC1, Flags, isAndSetCC)) {
38834      if (isAndSetCC) {
38835        std::swap(FalseOp, TrueOp);
38836        CC0 = X86::GetOppositeBranchCondition(CC0);
38837        CC1 = X86::GetOppositeBranchCondition(CC1);
38838      }
38839
38840      SDValue LOps[] = {FalseOp, TrueOp,
38841                        DAG.getTargetConstant(CC0, DL, MVT::i8), Flags};
38842      SDValue LCMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), LOps);
38843      SDValue Ops[] = {LCMOV, TrueOp, DAG.getTargetConstant(CC1, DL, MVT::i8),
38844                       Flags};
38845      SDValue CMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
38846      return CMOV;
38847    }
38848  }
38849
38850  // Fold (CMOV C1, (ADD (CTTZ X), C2), (X != 0)) ->
38851  //      (ADD (CMOV C1-C2, (CTTZ X), (X != 0)), C2)
38852  // Or (CMOV (ADD (CTTZ X), C2), C1, (X == 0)) ->
38853  //    (ADD (CMOV (CTTZ X), C1-C2, (X == 0)), C2)
38854  if ((CC == X86::COND_NE || CC == X86::COND_E) &&
38855      Cond.getOpcode() == X86ISD::CMP && isNullConstant(Cond.getOperand(1))) {
38856    SDValue Add = TrueOp;
38857    SDValue Const = FalseOp;
38858    // Canonicalize the condition code for easier matching and output.
38859    if (CC == X86::COND_E)
38860      std::swap(Add, Const);
38861
38862    // We might have replaced the constant in the cmov with the LHS of the
38863    // compare. If so change it to the RHS of the compare.
38864    if (Const == Cond.getOperand(0))
38865      Const = Cond.getOperand(1);
38866
38867    // Ok, now make sure that Add is (add (cttz X), C2) and Const is a constant.
38868    if (isa<ConstantSDNode>(Const) && Add.getOpcode() == ISD::ADD &&
38869        Add.hasOneUse() && isa<ConstantSDNode>(Add.getOperand(1)) &&
38870        (Add.getOperand(0).getOpcode() == ISD::CTTZ_ZERO_UNDEF ||
38871         Add.getOperand(0).getOpcode() == ISD::CTTZ) &&
38872        Add.getOperand(0).getOperand(0) == Cond.getOperand(0)) {
38873      EVT VT = N->getValueType(0);
38874      // This should constant fold.
38875      SDValue Diff = DAG.getNode(ISD::SUB, DL, VT, Const, Add.getOperand(1));
38876      SDValue CMov =
38877          DAG.getNode(X86ISD::CMOV, DL, VT, Diff, Add.getOperand(0),
38878                      DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8), Cond);
38879      return DAG.getNode(ISD::ADD, DL, VT, CMov, Add.getOperand(1));
38880    }
38881  }
38882
38883  return SDValue();
38884}
38885
38886/// Different mul shrinking modes.
38887enum class ShrinkMode { MULS8, MULU8, MULS16, MULU16 };
38888
38889static bool canReduceVMulWidth(SDNode *N, SelectionDAG &DAG, ShrinkMode &Mode) {
38890  EVT VT = N->getOperand(0).getValueType();
38891  if (VT.getScalarSizeInBits() != 32)
38892    return false;
38893
38894  assert(N->getNumOperands() == 2 && "NumOperands of Mul are 2");
38895  unsigned SignBits[2] = {1, 1};
38896  bool IsPositive[2] = {false, false};
38897  for (unsigned i = 0; i < 2; i++) {
38898    SDValue Opd = N->getOperand(i);
38899
38900    SignBits[i] = DAG.ComputeNumSignBits(Opd);
38901    IsPositive[i] = DAG.SignBitIsZero(Opd);
38902  }
38903
38904  bool AllPositive = IsPositive[0] && IsPositive[1];
38905  unsigned MinSignBits = std::min(SignBits[0], SignBits[1]);
38906  // When ranges are from -128 ~ 127, use MULS8 mode.
38907  if (MinSignBits >= 25)
38908    Mode = ShrinkMode::MULS8;
38909  // When ranges are from 0 ~ 255, use MULU8 mode.
38910  else if (AllPositive && MinSignBits >= 24)
38911    Mode = ShrinkMode::MULU8;
38912  // When ranges are from -32768 ~ 32767, use MULS16 mode.
38913  else if (MinSignBits >= 17)
38914    Mode = ShrinkMode::MULS16;
38915  // When ranges are from 0 ~ 65535, use MULU16 mode.
38916  else if (AllPositive && MinSignBits >= 16)
38917    Mode = ShrinkMode::MULU16;
38918  else
38919    return false;
38920  return true;
38921}
38922
38923/// When the operands of vector mul are extended from smaller size values,
38924/// like i8 and i16, the type of mul may be shrinked to generate more
38925/// efficient code. Two typical patterns are handled:
38926/// Pattern1:
38927///     %2 = sext/zext <N x i8> %1 to <N x i32>
38928///     %4 = sext/zext <N x i8> %3 to <N x i32>
38929//   or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
38930///     %5 = mul <N x i32> %2, %4
38931///
38932/// Pattern2:
38933///     %2 = zext/sext <N x i16> %1 to <N x i32>
38934///     %4 = zext/sext <N x i16> %3 to <N x i32>
38935///  or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
38936///     %5 = mul <N x i32> %2, %4
38937///
38938/// There are four mul shrinking modes:
38939/// If %2 == sext32(trunc8(%2)), i.e., the scalar value range of %2 is
38940/// -128 to 128, and the scalar value range of %4 is also -128 to 128,
38941/// generate pmullw+sext32 for it (MULS8 mode).
38942/// If %2 == zext32(trunc8(%2)), i.e., the scalar value range of %2 is
38943/// 0 to 255, and the scalar value range of %4 is also 0 to 255,
38944/// generate pmullw+zext32 for it (MULU8 mode).
38945/// If %2 == sext32(trunc16(%2)), i.e., the scalar value range of %2 is
38946/// -32768 to 32767, and the scalar value range of %4 is also -32768 to 32767,
38947/// generate pmullw+pmulhw for it (MULS16 mode).
38948/// If %2 == zext32(trunc16(%2)), i.e., the scalar value range of %2 is
38949/// 0 to 65535, and the scalar value range of %4 is also 0 to 65535,
38950/// generate pmullw+pmulhuw for it (MULU16 mode).
38951static SDValue reduceVMULWidth(SDNode *N, SelectionDAG &DAG,
38952                               const X86Subtarget &Subtarget) {
38953  // Check for legality
38954  // pmullw/pmulhw are not supported by SSE.
38955  if (!Subtarget.hasSSE2())
38956    return SDValue();
38957
38958  // Check for profitability
38959  // pmulld is supported since SSE41. It is better to use pmulld
38960  // instead of pmullw+pmulhw, except for subtargets where pmulld is slower than
38961  // the expansion.
38962  bool OptForMinSize = DAG.getMachineFunction().getFunction().hasMinSize();
38963  if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow()))
38964    return SDValue();
38965
38966  ShrinkMode Mode;
38967  if (!canReduceVMulWidth(N, DAG, Mode))
38968    return SDValue();
38969
38970  SDLoc DL(N);
38971  SDValue N0 = N->getOperand(0);
38972  SDValue N1 = N->getOperand(1);
38973  EVT VT = N->getOperand(0).getValueType();
38974  unsigned NumElts = VT.getVectorNumElements();
38975  if ((NumElts % 2) != 0)
38976    return SDValue();
38977
38978  EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, NumElts);
38979
38980  // Shrink the operands of mul.
38981  SDValue NewN0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N0);
38982  SDValue NewN1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N1);
38983
38984  // Generate the lower part of mul: pmullw. For MULU8/MULS8, only the
38985  // lower part is needed.
38986  SDValue MulLo = DAG.getNode(ISD::MUL, DL, ReducedVT, NewN0, NewN1);
38987  if (Mode == ShrinkMode::MULU8 || Mode == ShrinkMode::MULS8)
38988    return DAG.getNode((Mode == ShrinkMode::MULU8) ? ISD::ZERO_EXTEND
38989                                                   : ISD::SIGN_EXTEND,
38990                       DL, VT, MulLo);
38991
38992  MVT ResVT = MVT::getVectorVT(MVT::i32, NumElts / 2);
38993  // Generate the higher part of mul: pmulhw/pmulhuw. For MULU16/MULS16,
38994  // the higher part is also needed.
38995  SDValue MulHi =
38996      DAG.getNode(Mode == ShrinkMode::MULS16 ? ISD::MULHS : ISD::MULHU, DL,
38997                  ReducedVT, NewN0, NewN1);
38998
38999  // Repack the lower part and higher part result of mul into a wider
39000  // result.
39001  // Generate shuffle functioning as punpcklwd.
39002  SmallVector<int, 16> ShuffleMask(NumElts);
39003  for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
39004    ShuffleMask[2 * i] = i;
39005    ShuffleMask[2 * i + 1] = i + NumElts;
39006  }
39007  SDValue ResLo =
39008      DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
39009  ResLo = DAG.getBitcast(ResVT, ResLo);
39010  // Generate shuffle functioning as punpckhwd.
39011  for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
39012    ShuffleMask[2 * i] = i + NumElts / 2;
39013    ShuffleMask[2 * i + 1] = i + NumElts * 3 / 2;
39014  }
39015  SDValue ResHi =
39016      DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
39017  ResHi = DAG.getBitcast(ResVT, ResHi);
39018  return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ResLo, ResHi);
39019}
39020
39021static SDValue combineMulSpecial(uint64_t MulAmt, SDNode *N, SelectionDAG &DAG,
39022                                 EVT VT, const SDLoc &DL) {
39023
39024  auto combineMulShlAddOrSub = [&](int Mult, int Shift, bool isAdd) {
39025    SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
39026                                 DAG.getConstant(Mult, DL, VT));
39027    Result = DAG.getNode(ISD::SHL, DL, VT, Result,
39028                         DAG.getConstant(Shift, DL, MVT::i8));
39029    Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
39030                         N->getOperand(0));
39031    return Result;
39032  };
39033
39034  auto combineMulMulAddOrSub = [&](int Mul1, int Mul2, bool isAdd) {
39035    SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
39036                                 DAG.getConstant(Mul1, DL, VT));
39037    Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, Result,
39038                         DAG.getConstant(Mul2, DL, VT));
39039    Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
39040                         N->getOperand(0));
39041    return Result;
39042  };
39043
39044  switch (MulAmt) {
39045  default:
39046    break;
39047  case 11:
39048    // mul x, 11 => add ((shl (mul x, 5), 1), x)
39049    return combineMulShlAddOrSub(5, 1, /*isAdd*/ true);
39050  case 21:
39051    // mul x, 21 => add ((shl (mul x, 5), 2), x)
39052    return combineMulShlAddOrSub(5, 2, /*isAdd*/ true);
39053  case 41:
39054    // mul x, 41 => add ((shl (mul x, 5), 3), x)
39055    return combineMulShlAddOrSub(5, 3, /*isAdd*/ true);
39056  case 22:
39057    // mul x, 22 => add (add ((shl (mul x, 5), 2), x), x)
39058    return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
39059                       combineMulShlAddOrSub(5, 2, /*isAdd*/ true));
39060  case 19:
39061    // mul x, 19 => add ((shl (mul x, 9), 1), x)
39062    return combineMulShlAddOrSub(9, 1, /*isAdd*/ true);
39063  case 37:
39064    // mul x, 37 => add ((shl (mul x, 9), 2), x)
39065    return combineMulShlAddOrSub(9, 2, /*isAdd*/ true);
39066  case 73:
39067    // mul x, 73 => add ((shl (mul x, 9), 3), x)
39068    return combineMulShlAddOrSub(9, 3, /*isAdd*/ true);
39069  case 13:
39070    // mul x, 13 => add ((shl (mul x, 3), 2), x)
39071    return combineMulShlAddOrSub(3, 2, /*isAdd*/ true);
39072  case 23:
39073    // mul x, 23 => sub ((shl (mul x, 3), 3), x)
39074    return combineMulShlAddOrSub(3, 3, /*isAdd*/ false);
39075  case 26:
39076    // mul x, 26 => add ((mul (mul x, 5), 5), x)
39077    return combineMulMulAddOrSub(5, 5, /*isAdd*/ true);
39078  case 28:
39079    // mul x, 28 => add ((mul (mul x, 9), 3), x)
39080    return combineMulMulAddOrSub(9, 3, /*isAdd*/ true);
39081  case 29:
39082    // mul x, 29 => add (add ((mul (mul x, 9), 3), x), x)
39083    return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
39084                       combineMulMulAddOrSub(9, 3, /*isAdd*/ true));
39085  }
39086
39087  // Another trick. If this is a power 2 + 2/4/8, we can use a shift followed
39088  // by a single LEA.
39089  // First check if this a sum of two power of 2s because that's easy. Then
39090  // count how many zeros are up to the first bit.
39091  // TODO: We can do this even without LEA at a cost of two shifts and an add.
39092  if (isPowerOf2_64(MulAmt & (MulAmt - 1))) {
39093    unsigned ScaleShift = countTrailingZeros(MulAmt);
39094    if (ScaleShift >= 1 && ScaleShift < 4) {
39095      unsigned ShiftAmt = Log2_64((MulAmt & (MulAmt - 1)));
39096      SDValue Shift1 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
39097                                   DAG.getConstant(ShiftAmt, DL, MVT::i8));
39098      SDValue Shift2 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
39099                                   DAG.getConstant(ScaleShift, DL, MVT::i8));
39100      return DAG.getNode(ISD::ADD, DL, VT, Shift1, Shift2);
39101    }
39102  }
39103
39104  return SDValue();
39105}
39106
39107// If the upper 17 bits of each element are zero then we can use PMADDWD,
39108// which is always at least as quick as PMULLD, except on KNL.
39109static SDValue combineMulToPMADDWD(SDNode *N, SelectionDAG &DAG,
39110                                   const X86Subtarget &Subtarget) {
39111  if (!Subtarget.hasSSE2())
39112    return SDValue();
39113
39114  if (Subtarget.isPMADDWDSlow())
39115    return SDValue();
39116
39117  EVT VT = N->getValueType(0);
39118
39119  // Only support vXi32 vectors.
39120  if (!VT.isVector() || VT.getVectorElementType() != MVT::i32)
39121    return SDValue();
39122
39123  // Make sure the vXi16 type is legal. This covers the AVX512 without BWI case.
39124  // Also allow v2i32 if it will be widened.
39125  MVT WVT = MVT::getVectorVT(MVT::i16, 2 * VT.getVectorNumElements());
39126  if (VT != MVT::v2i32 && !DAG.getTargetLoweringInfo().isTypeLegal(WVT))
39127    return SDValue();
39128
39129  SDValue N0 = N->getOperand(0);
39130  SDValue N1 = N->getOperand(1);
39131
39132  // If we are zero extending two steps without SSE4.1, its better to reduce
39133  // the vmul width instead.
39134  if (!Subtarget.hasSSE41() &&
39135      (N0.getOpcode() == ISD::ZERO_EXTEND &&
39136       N0.getOperand(0).getScalarValueSizeInBits() <= 8) &&
39137      (N1.getOpcode() == ISD::ZERO_EXTEND &&
39138       N1.getOperand(0).getScalarValueSizeInBits() <= 8))
39139    return SDValue();
39140
39141  APInt Mask17 = APInt::getHighBitsSet(32, 17);
39142  if (!DAG.MaskedValueIsZero(N1, Mask17) ||
39143      !DAG.MaskedValueIsZero(N0, Mask17))
39144    return SDValue();
39145
39146  // Use SplitOpsAndApply to handle AVX splitting.
39147  auto PMADDWDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
39148                           ArrayRef<SDValue> Ops) {
39149    MVT OpVT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
39150    return DAG.getNode(X86ISD::VPMADDWD, DL, OpVT, Ops);
39151  };
39152  return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT,
39153                          { DAG.getBitcast(WVT, N0), DAG.getBitcast(WVT, N1) },
39154                          PMADDWDBuilder);
39155}
39156
39157static SDValue combineMulToPMULDQ(SDNode *N, SelectionDAG &DAG,
39158                                  const X86Subtarget &Subtarget) {
39159  if (!Subtarget.hasSSE2())
39160    return SDValue();
39161
39162  EVT VT = N->getValueType(0);
39163
39164  // Only support vXi64 vectors.
39165  if (!VT.isVector() || VT.getVectorElementType() != MVT::i64 ||
39166      VT.getVectorNumElements() < 2 ||
39167      !isPowerOf2_32(VT.getVectorNumElements()))
39168    return SDValue();
39169
39170  SDValue N0 = N->getOperand(0);
39171  SDValue N1 = N->getOperand(1);
39172
39173  // MULDQ returns the 64-bit result of the signed multiplication of the lower
39174  // 32-bits. We can lower with this if the sign bits stretch that far.
39175  if (Subtarget.hasSSE41() && DAG.ComputeNumSignBits(N0) > 32 &&
39176      DAG.ComputeNumSignBits(N1) > 32) {
39177    auto PMULDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
39178                            ArrayRef<SDValue> Ops) {
39179      return DAG.getNode(X86ISD::PMULDQ, DL, Ops[0].getValueType(), Ops);
39180    };
39181    return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
39182                            PMULDQBuilder, /*CheckBWI*/false);
39183  }
39184
39185  // If the upper bits are zero we can use a single pmuludq.
39186  APInt Mask = APInt::getHighBitsSet(64, 32);
39187  if (DAG.MaskedValueIsZero(N0, Mask) && DAG.MaskedValueIsZero(N1, Mask)) {
39188    auto PMULUDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
39189                             ArrayRef<SDValue> Ops) {
39190      return DAG.getNode(X86ISD::PMULUDQ, DL, Ops[0].getValueType(), Ops);
39191    };
39192    return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
39193                            PMULUDQBuilder, /*CheckBWI*/false);
39194  }
39195
39196  return SDValue();
39197}
39198
39199/// Optimize a single multiply with constant into two operations in order to
39200/// implement it with two cheaper instructions, e.g. LEA + SHL, LEA + LEA.
39201static SDValue combineMul(SDNode *N, SelectionDAG &DAG,
39202                          TargetLowering::DAGCombinerInfo &DCI,
39203                          const X86Subtarget &Subtarget) {
39204  EVT VT = N->getValueType(0);
39205
39206  if (SDValue V = combineMulToPMADDWD(N, DAG, Subtarget))
39207    return V;
39208
39209  if (SDValue V = combineMulToPMULDQ(N, DAG, Subtarget))
39210    return V;
39211
39212  if (DCI.isBeforeLegalize() && VT.isVector())
39213    return reduceVMULWidth(N, DAG, Subtarget);
39214
39215  if (!MulConstantOptimization)
39216    return SDValue();
39217  // An imul is usually smaller than the alternative sequence.
39218  if (DAG.getMachineFunction().getFunction().hasMinSize())
39219    return SDValue();
39220
39221  if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
39222    return SDValue();
39223
39224  if (VT != MVT::i64 && VT != MVT::i32)
39225    return SDValue();
39226
39227  ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
39228  if (!C)
39229    return SDValue();
39230  if (isPowerOf2_64(C->getZExtValue()))
39231    return SDValue();
39232
39233  int64_t SignMulAmt = C->getSExtValue();
39234  assert(SignMulAmt != INT64_MIN && "Int min should have been handled!");
39235  uint64_t AbsMulAmt = SignMulAmt < 0 ? -SignMulAmt : SignMulAmt;
39236
39237  SDLoc DL(N);
39238  if (AbsMulAmt == 3 || AbsMulAmt == 5 || AbsMulAmt == 9) {
39239    SDValue NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
39240                                 DAG.getConstant(AbsMulAmt, DL, VT));
39241    if (SignMulAmt < 0)
39242      NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
39243                           NewMul);
39244
39245    return NewMul;
39246  }
39247
39248  uint64_t MulAmt1 = 0;
39249  uint64_t MulAmt2 = 0;
39250  if ((AbsMulAmt % 9) == 0) {
39251    MulAmt1 = 9;
39252    MulAmt2 = AbsMulAmt / 9;
39253  } else if ((AbsMulAmt % 5) == 0) {
39254    MulAmt1 = 5;
39255    MulAmt2 = AbsMulAmt / 5;
39256  } else if ((AbsMulAmt % 3) == 0) {
39257    MulAmt1 = 3;
39258    MulAmt2 = AbsMulAmt / 3;
39259  }
39260
39261  SDValue NewMul;
39262  // For negative multiply amounts, only allow MulAmt2 to be a power of 2.
39263  if (MulAmt2 &&
39264      (isPowerOf2_64(MulAmt2) ||
39265       (SignMulAmt >= 0 && (MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)))) {
39266
39267    if (isPowerOf2_64(MulAmt2) &&
39268        !(SignMulAmt >= 0 && N->hasOneUse() &&
39269          N->use_begin()->getOpcode() == ISD::ADD))
39270      // If second multiplifer is pow2, issue it first. We want the multiply by
39271      // 3, 5, or 9 to be folded into the addressing mode unless the lone use
39272      // is an add. Only do this for positive multiply amounts since the
39273      // negate would prevent it from being used as an address mode anyway.
39274      std::swap(MulAmt1, MulAmt2);
39275
39276    if (isPowerOf2_64(MulAmt1))
39277      NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
39278                           DAG.getConstant(Log2_64(MulAmt1), DL, MVT::i8));
39279    else
39280      NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
39281                           DAG.getConstant(MulAmt1, DL, VT));
39282
39283    if (isPowerOf2_64(MulAmt2))
39284      NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
39285                           DAG.getConstant(Log2_64(MulAmt2), DL, MVT::i8));
39286    else
39287      NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
39288                           DAG.getConstant(MulAmt2, DL, VT));
39289
39290    // Negate the result.
39291    if (SignMulAmt < 0)
39292      NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
39293                           NewMul);
39294  } else if (!Subtarget.slowLEA())
39295    NewMul = combineMulSpecial(C->getZExtValue(), N, DAG, VT, DL);
39296
39297  if (!NewMul) {
39298    assert(C->getZExtValue() != 0 &&
39299           C->getZExtValue() != (VT == MVT::i64 ? UINT64_MAX : UINT32_MAX) &&
39300           "Both cases that could cause potential overflows should have "
39301           "already been handled.");
39302    if (isPowerOf2_64(AbsMulAmt - 1)) {
39303      // (mul x, 2^N + 1) => (add (shl x, N), x)
39304      NewMul = DAG.getNode(
39305          ISD::ADD, DL, VT, N->getOperand(0),
39306          DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
39307                      DAG.getConstant(Log2_64(AbsMulAmt - 1), DL,
39308                                      MVT::i8)));
39309      // To negate, subtract the number from zero
39310      if (SignMulAmt < 0)
39311        NewMul = DAG.getNode(ISD::SUB, DL, VT,
39312                             DAG.getConstant(0, DL, VT), NewMul);
39313    } else if (isPowerOf2_64(AbsMulAmt + 1)) {
39314      // (mul x, 2^N - 1) => (sub (shl x, N), x)
39315      NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
39316                           DAG.getConstant(Log2_64(AbsMulAmt + 1),
39317                                           DL, MVT::i8));
39318      // To negate, reverse the operands of the subtract.
39319      if (SignMulAmt < 0)
39320        NewMul = DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), NewMul);
39321      else
39322        NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
39323    } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt - 2)) {
39324      // (mul x, 2^N + 2) => (add (add (shl x, N), x), x)
39325      NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
39326                           DAG.getConstant(Log2_64(AbsMulAmt - 2),
39327                                           DL, MVT::i8));
39328      NewMul = DAG.getNode(ISD::ADD, DL, VT, NewMul, N->getOperand(0));
39329      NewMul = DAG.getNode(ISD::ADD, DL, VT, NewMul, N->getOperand(0));
39330    } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt + 2)) {
39331      // (mul x, 2^N - 2) => (sub (sub (shl x, N), x), x)
39332      NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
39333                           DAG.getConstant(Log2_64(AbsMulAmt + 2),
39334                                           DL, MVT::i8));
39335      NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
39336      NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
39337    }
39338  }
39339
39340  return NewMul;
39341}
39342
39343static SDValue combineShiftLeft(SDNode *N, SelectionDAG &DAG) {
39344  SDValue N0 = N->getOperand(0);
39345  SDValue N1 = N->getOperand(1);
39346  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
39347  EVT VT = N0.getValueType();
39348
39349  // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
39350  // since the result of setcc_c is all zero's or all ones.
39351  if (VT.isInteger() && !VT.isVector() &&
39352      N1C && N0.getOpcode() == ISD::AND &&
39353      N0.getOperand(1).getOpcode() == ISD::Constant) {
39354    SDValue N00 = N0.getOperand(0);
39355    APInt Mask = N0.getConstantOperandAPInt(1);
39356    Mask <<= N1C->getAPIntValue();
39357    bool MaskOK = false;
39358    // We can handle cases concerning bit-widening nodes containing setcc_c if
39359    // we carefully interrogate the mask to make sure we are semantics
39360    // preserving.
39361    // The transform is not safe if the result of C1 << C2 exceeds the bitwidth
39362    // of the underlying setcc_c operation if the setcc_c was zero extended.
39363    // Consider the following example:
39364    //   zext(setcc_c)                 -> i32 0x0000FFFF
39365    //   c1                            -> i32 0x0000FFFF
39366    //   c2                            -> i32 0x00000001
39367    //   (shl (and (setcc_c), c1), c2) -> i32 0x0001FFFE
39368    //   (and setcc_c, (c1 << c2))     -> i32 0x0000FFFE
39369    if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
39370      MaskOK = true;
39371    } else if (N00.getOpcode() == ISD::SIGN_EXTEND &&
39372               N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
39373      MaskOK = true;
39374    } else if ((N00.getOpcode() == ISD::ZERO_EXTEND ||
39375                N00.getOpcode() == ISD::ANY_EXTEND) &&
39376               N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
39377      MaskOK = Mask.isIntN(N00.getOperand(0).getValueSizeInBits());
39378    }
39379    if (MaskOK && Mask != 0) {
39380      SDLoc DL(N);
39381      return DAG.getNode(ISD::AND, DL, VT, N00, DAG.getConstant(Mask, DL, VT));
39382    }
39383  }
39384
39385  // Hardware support for vector shifts is sparse which makes us scalarize the
39386  // vector operations in many cases. Also, on sandybridge ADD is faster than
39387  // shl.
39388  // (shl V, 1) -> add V,V
39389  if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
39390    if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
39391      assert(N0.getValueType().isVector() && "Invalid vector shift type");
39392      // We shift all of the values by one. In many cases we do not have
39393      // hardware support for this operation. This is better expressed as an ADD
39394      // of two values.
39395      if (N1SplatC->isOne())
39396        return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
39397    }
39398
39399  return SDValue();
39400}
39401
39402static SDValue combineShiftRightArithmetic(SDNode *N, SelectionDAG &DAG) {
39403  SDValue N0 = N->getOperand(0);
39404  SDValue N1 = N->getOperand(1);
39405  EVT VT = N0.getValueType();
39406  unsigned Size = VT.getSizeInBits();
39407
39408  // fold (ashr (shl, a, [56,48,32,24,16]), SarConst)
39409  // into (shl, (sext (a), [56,48,32,24,16] - SarConst)) or
39410  // into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
39411  // depending on sign of (SarConst - [56,48,32,24,16])
39412
39413  // sexts in X86 are MOVs. The MOVs have the same code size
39414  // as above SHIFTs (only SHIFT on 1 has lower code size).
39415  // However the MOVs have 2 advantages to a SHIFT:
39416  // 1. MOVs can write to a register that differs from source
39417  // 2. MOVs accept memory operands
39418
39419  if (VT.isVector() || N1.getOpcode() != ISD::Constant ||
39420      N0.getOpcode() != ISD::SHL || !N0.hasOneUse() ||
39421      N0.getOperand(1).getOpcode() != ISD::Constant)
39422    return SDValue();
39423
39424  SDValue N00 = N0.getOperand(0);
39425  SDValue N01 = N0.getOperand(1);
39426  APInt ShlConst = (cast<ConstantSDNode>(N01))->getAPIntValue();
39427  APInt SarConst = (cast<ConstantSDNode>(N1))->getAPIntValue();
39428  EVT CVT = N1.getValueType();
39429
39430  if (SarConst.isNegative())
39431    return SDValue();
39432
39433  for (MVT SVT : { MVT::i8, MVT::i16, MVT::i32 }) {
39434    unsigned ShiftSize = SVT.getSizeInBits();
39435    // skipping types without corresponding sext/zext and
39436    // ShlConst that is not one of [56,48,32,24,16]
39437    if (ShiftSize >= Size || ShlConst != Size - ShiftSize)
39438      continue;
39439    SDLoc DL(N);
39440    SDValue NN =
39441        DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, N00, DAG.getValueType(SVT));
39442    SarConst = SarConst - (Size - ShiftSize);
39443    if (SarConst == 0)
39444      return NN;
39445    else if (SarConst.isNegative())
39446      return DAG.getNode(ISD::SHL, DL, VT, NN,
39447                         DAG.getConstant(-SarConst, DL, CVT));
39448    else
39449      return DAG.getNode(ISD::SRA, DL, VT, NN,
39450                         DAG.getConstant(SarConst, DL, CVT));
39451  }
39452  return SDValue();
39453}
39454
39455static SDValue combineShiftRightLogical(SDNode *N, SelectionDAG &DAG,
39456                                        TargetLowering::DAGCombinerInfo &DCI) {
39457  SDValue N0 = N->getOperand(0);
39458  SDValue N1 = N->getOperand(1);
39459  EVT VT = N0.getValueType();
39460
39461  // Only do this on the last DAG combine as it can interfere with other
39462  // combines.
39463  if (!DCI.isAfterLegalizeDAG())
39464    return SDValue();
39465
39466  // Try to improve a sequence of srl (and X, C1), C2 by inverting the order.
39467  // TODO: This is a generic DAG combine that became an x86-only combine to
39468  // avoid shortcomings in other folds such as bswap, bit-test ('bt'), and
39469  // and-not ('andn').
39470  if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
39471    return SDValue();
39472
39473  auto *ShiftC = dyn_cast<ConstantSDNode>(N1);
39474  auto *AndC = dyn_cast<ConstantSDNode>(N0.getOperand(1));
39475  if (!ShiftC || !AndC)
39476    return SDValue();
39477
39478  // If we can shrink the constant mask below 8-bits or 32-bits, then this
39479  // transform should reduce code size. It may also enable secondary transforms
39480  // from improved known-bits analysis or instruction selection.
39481  APInt MaskVal = AndC->getAPIntValue();
39482
39483  // If this can be matched by a zero extend, don't optimize.
39484  if (MaskVal.isMask()) {
39485    unsigned TO = MaskVal.countTrailingOnes();
39486    if (TO >= 8 && isPowerOf2_32(TO))
39487      return SDValue();
39488  }
39489
39490  APInt NewMaskVal = MaskVal.lshr(ShiftC->getAPIntValue());
39491  unsigned OldMaskSize = MaskVal.getMinSignedBits();
39492  unsigned NewMaskSize = NewMaskVal.getMinSignedBits();
39493  if ((OldMaskSize > 8 && NewMaskSize <= 8) ||
39494      (OldMaskSize > 32 && NewMaskSize <= 32)) {
39495    // srl (and X, AndC), ShiftC --> and (srl X, ShiftC), (AndC >> ShiftC)
39496    SDLoc DL(N);
39497    SDValue NewMask = DAG.getConstant(NewMaskVal, DL, VT);
39498    SDValue NewShift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), N1);
39499    return DAG.getNode(ISD::AND, DL, VT, NewShift, NewMask);
39500  }
39501  return SDValue();
39502}
39503
39504static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
39505                                 TargetLowering::DAGCombinerInfo &DCI,
39506                                 const X86Subtarget &Subtarget) {
39507  unsigned Opcode = N->getOpcode();
39508  assert((X86ISD::PACKSS == Opcode || X86ISD::PACKUS == Opcode) &&
39509         "Unexpected shift opcode");
39510
39511  EVT VT = N->getValueType(0);
39512  SDValue N0 = N->getOperand(0);
39513  SDValue N1 = N->getOperand(1);
39514  unsigned DstBitsPerElt = VT.getScalarSizeInBits();
39515  unsigned SrcBitsPerElt = 2 * DstBitsPerElt;
39516  assert(N0.getScalarValueSizeInBits() == SrcBitsPerElt &&
39517         N1.getScalarValueSizeInBits() == SrcBitsPerElt &&
39518         "Unexpected PACKSS/PACKUS input type");
39519
39520  bool IsSigned = (X86ISD::PACKSS == Opcode);
39521
39522  // Constant Folding.
39523  APInt UndefElts0, UndefElts1;
39524  SmallVector<APInt, 32> EltBits0, EltBits1;
39525  if ((N0.isUndef() || N->isOnlyUserOf(N0.getNode())) &&
39526      (N1.isUndef() || N->isOnlyUserOf(N1.getNode())) &&
39527      getTargetConstantBitsFromNode(N0, SrcBitsPerElt, UndefElts0, EltBits0) &&
39528      getTargetConstantBitsFromNode(N1, SrcBitsPerElt, UndefElts1, EltBits1)) {
39529    unsigned NumLanes = VT.getSizeInBits() / 128;
39530    unsigned NumDstElts = VT.getVectorNumElements();
39531    unsigned NumSrcElts = NumDstElts / 2;
39532    unsigned NumDstEltsPerLane = NumDstElts / NumLanes;
39533    unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes;
39534
39535    APInt Undefs(NumDstElts, 0);
39536    SmallVector<APInt, 32> Bits(NumDstElts, APInt::getNullValue(DstBitsPerElt));
39537    for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
39538      for (unsigned Elt = 0; Elt != NumDstEltsPerLane; ++Elt) {
39539        unsigned SrcIdx = Lane * NumSrcEltsPerLane + Elt % NumSrcEltsPerLane;
39540        auto &UndefElts = (Elt >= NumSrcEltsPerLane ? UndefElts1 : UndefElts0);
39541        auto &EltBits = (Elt >= NumSrcEltsPerLane ? EltBits1 : EltBits0);
39542
39543        if (UndefElts[SrcIdx]) {
39544          Undefs.setBit(Lane * NumDstEltsPerLane + Elt);
39545          continue;
39546        }
39547
39548        APInt &Val = EltBits[SrcIdx];
39549        if (IsSigned) {
39550          // PACKSS: Truncate signed value with signed saturation.
39551          // Source values less than dst minint are saturated to minint.
39552          // Source values greater than dst maxint are saturated to maxint.
39553          if (Val.isSignedIntN(DstBitsPerElt))
39554            Val = Val.trunc(DstBitsPerElt);
39555          else if (Val.isNegative())
39556            Val = APInt::getSignedMinValue(DstBitsPerElt);
39557          else
39558            Val = APInt::getSignedMaxValue(DstBitsPerElt);
39559        } else {
39560          // PACKUS: Truncate signed value with unsigned saturation.
39561          // Source values less than zero are saturated to zero.
39562          // Source values greater than dst maxuint are saturated to maxuint.
39563          if (Val.isIntN(DstBitsPerElt))
39564            Val = Val.trunc(DstBitsPerElt);
39565          else if (Val.isNegative())
39566            Val = APInt::getNullValue(DstBitsPerElt);
39567          else
39568            Val = APInt::getAllOnesValue(DstBitsPerElt);
39569        }
39570        Bits[Lane * NumDstEltsPerLane + Elt] = Val;
39571      }
39572    }
39573
39574    return getConstVector(Bits, Undefs, VT.getSimpleVT(), DAG, SDLoc(N));
39575  }
39576
39577  // Try to combine a PACKUSWB/PACKSSWB implemented truncate with a regular
39578  // truncate to create a larger truncate.
39579  if (Subtarget.hasAVX512() &&
39580      N0.getOpcode() == ISD::TRUNCATE && N1.isUndef() && VT == MVT::v16i8 &&
39581      N0.getOperand(0).getValueType() == MVT::v8i32) {
39582    if ((IsSigned && DAG.ComputeNumSignBits(N0) > 8) ||
39583        (!IsSigned &&
39584         DAG.MaskedValueIsZero(N0, APInt::getHighBitsSet(16, 8)))) {
39585      if (Subtarget.hasVLX())
39586        return DAG.getNode(X86ISD::VTRUNC, SDLoc(N), VT, N0.getOperand(0));
39587
39588      // Widen input to v16i32 so we can truncate that.
39589      SDLoc dl(N);
39590      SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i32,
39591                                   N0.getOperand(0), DAG.getUNDEF(MVT::v8i32));
39592      return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Concat);
39593    }
39594  }
39595
39596  // Attempt to combine as shuffle.
39597  SDValue Op(N, 0);
39598  if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
39599    return Res;
39600
39601  return SDValue();
39602}
39603
39604static SDValue combineVectorShiftVar(SDNode *N, SelectionDAG &DAG,
39605                                     TargetLowering::DAGCombinerInfo &DCI,
39606                                     const X86Subtarget &Subtarget) {
39607  assert((X86ISD::VSHL == N->getOpcode() || X86ISD::VSRA == N->getOpcode() ||
39608          X86ISD::VSRL == N->getOpcode()) &&
39609         "Unexpected shift opcode");
39610  EVT VT = N->getValueType(0);
39611  SDValue N0 = N->getOperand(0);
39612  SDValue N1 = N->getOperand(1);
39613
39614  // Shift zero -> zero.
39615  if (ISD::isBuildVectorAllZeros(N0.getNode()))
39616    return DAG.getConstant(0, SDLoc(N), VT);
39617
39618  // Detect constant shift amounts.
39619  APInt UndefElts;
39620  SmallVector<APInt, 32> EltBits;
39621  if (getTargetConstantBitsFromNode(N1, 64, UndefElts, EltBits, true, false)) {
39622    unsigned X86Opc = getTargetVShiftUniformOpcode(N->getOpcode(), false);
39623    return getTargetVShiftByConstNode(X86Opc, SDLoc(N), VT.getSimpleVT(), N0,
39624                                      EltBits[0].getZExtValue(), DAG);
39625  }
39626
39627  APInt KnownUndef, KnownZero;
39628  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39629  APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
39630  if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
39631                                     KnownZero, DCI))
39632    return SDValue(N, 0);
39633
39634  return SDValue();
39635}
39636
39637static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG,
39638                                     TargetLowering::DAGCombinerInfo &DCI,
39639                                     const X86Subtarget &Subtarget) {
39640  unsigned Opcode = N->getOpcode();
39641  assert((X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode ||
39642          X86ISD::VSRLI == Opcode) &&
39643         "Unexpected shift opcode");
39644  bool LogicalShift = X86ISD::VSHLI == Opcode || X86ISD::VSRLI == Opcode;
39645  EVT VT = N->getValueType(0);
39646  SDValue N0 = N->getOperand(0);
39647  unsigned NumBitsPerElt = VT.getScalarSizeInBits();
39648  assert(VT == N0.getValueType() && (NumBitsPerElt % 8) == 0 &&
39649         "Unexpected value type");
39650  assert(N->getOperand(1).getValueType() == MVT::i8 &&
39651         "Unexpected shift amount type");
39652
39653  // Out of range logical bit shifts are guaranteed to be zero.
39654  // Out of range arithmetic bit shifts splat the sign bit.
39655  unsigned ShiftVal = N->getConstantOperandVal(1);
39656  if (ShiftVal >= NumBitsPerElt) {
39657    if (LogicalShift)
39658      return DAG.getConstant(0, SDLoc(N), VT);
39659    else
39660      ShiftVal = NumBitsPerElt - 1;
39661  }
39662
39663  // Shift N0 by zero -> N0.
39664  if (!ShiftVal)
39665    return N0;
39666
39667  // Shift zero -> zero.
39668  if (ISD::isBuildVectorAllZeros(N0.getNode()))
39669    return DAG.getConstant(0, SDLoc(N), VT);
39670
39671  // Fold (VSRAI (VSRAI X, C1), C2) --> (VSRAI X, (C1 + C2)) with (C1 + C2)
39672  // clamped to (NumBitsPerElt - 1).
39673  if (Opcode == X86ISD::VSRAI && N0.getOpcode() == X86ISD::VSRAI) {
39674    unsigned ShiftVal2 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
39675    unsigned NewShiftVal = ShiftVal + ShiftVal2;
39676    if (NewShiftVal >= NumBitsPerElt)
39677      NewShiftVal = NumBitsPerElt - 1;
39678    return DAG.getNode(X86ISD::VSRAI, SDLoc(N), VT, N0.getOperand(0),
39679                       DAG.getTargetConstant(NewShiftVal, SDLoc(N), MVT::i8));
39680  }
39681
39682  // We can decode 'whole byte' logical bit shifts as shuffles.
39683  if (LogicalShift && (ShiftVal % 8) == 0) {
39684    SDValue Op(N, 0);
39685    if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
39686      return Res;
39687  }
39688
39689  // Constant Folding.
39690  APInt UndefElts;
39691  SmallVector<APInt, 32> EltBits;
39692  if (N->isOnlyUserOf(N0.getNode()) &&
39693      getTargetConstantBitsFromNode(N0, NumBitsPerElt, UndefElts, EltBits)) {
39694    assert(EltBits.size() == VT.getVectorNumElements() &&
39695           "Unexpected shift value type");
39696    for (APInt &Elt : EltBits) {
39697      if (X86ISD::VSHLI == Opcode)
39698        Elt <<= ShiftVal;
39699      else if (X86ISD::VSRAI == Opcode)
39700        Elt.ashrInPlace(ShiftVal);
39701      else
39702        Elt.lshrInPlace(ShiftVal);
39703    }
39704    return getConstVector(EltBits, UndefElts, VT.getSimpleVT(), DAG, SDLoc(N));
39705  }
39706
39707  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39708  if (TLI.SimplifyDemandedBits(SDValue(N, 0),
39709                               APInt::getAllOnesValue(NumBitsPerElt), DCI))
39710    return SDValue(N, 0);
39711
39712  return SDValue();
39713}
39714
39715static SDValue combineVectorInsert(SDNode *N, SelectionDAG &DAG,
39716                                   TargetLowering::DAGCombinerInfo &DCI,
39717                                   const X86Subtarget &Subtarget) {
39718  EVT VT = N->getValueType(0);
39719  assert(((N->getOpcode() == X86ISD::PINSRB && VT == MVT::v16i8) ||
39720          (N->getOpcode() == X86ISD::PINSRW && VT == MVT::v8i16)) &&
39721         "Unexpected vector insertion");
39722
39723  unsigned NumBitsPerElt = VT.getScalarSizeInBits();
39724  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39725  if (TLI.SimplifyDemandedBits(SDValue(N, 0),
39726                               APInt::getAllOnesValue(NumBitsPerElt), DCI))
39727    return SDValue(N, 0);
39728
39729  // Attempt to combine PINSRB/PINSRW patterns to a shuffle.
39730  SDValue Op(N, 0);
39731  if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
39732    return Res;
39733
39734  return SDValue();
39735}
39736
39737/// Recognize the distinctive (AND (setcc ...) (setcc ..)) where both setccs
39738/// reference the same FP CMP, and rewrite for CMPEQSS and friends. Likewise for
39739/// OR -> CMPNEQSS.
39740static SDValue combineCompareEqual(SDNode *N, SelectionDAG &DAG,
39741                                   TargetLowering::DAGCombinerInfo &DCI,
39742                                   const X86Subtarget &Subtarget) {
39743  unsigned opcode;
39744
39745  // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
39746  // we're requiring SSE2 for both.
39747  if (Subtarget.hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
39748    SDValue N0 = N->getOperand(0);
39749    SDValue N1 = N->getOperand(1);
39750    SDValue CMP0 = N0.getOperand(1);
39751    SDValue CMP1 = N1.getOperand(1);
39752    SDLoc DL(N);
39753
39754    // The SETCCs should both refer to the same CMP.
39755    if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
39756      return SDValue();
39757
39758    SDValue CMP00 = CMP0->getOperand(0);
39759    SDValue CMP01 = CMP0->getOperand(1);
39760    EVT     VT    = CMP00.getValueType();
39761
39762    if (VT == MVT::f32 || VT == MVT::f64) {
39763      bool ExpectingFlags = false;
39764      // Check for any users that want flags:
39765      for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
39766           !ExpectingFlags && UI != UE; ++UI)
39767        switch (UI->getOpcode()) {
39768        default:
39769        case ISD::BR_CC:
39770        case ISD::BRCOND:
39771        case ISD::SELECT:
39772          ExpectingFlags = true;
39773          break;
39774        case ISD::CopyToReg:
39775        case ISD::SIGN_EXTEND:
39776        case ISD::ZERO_EXTEND:
39777        case ISD::ANY_EXTEND:
39778          break;
39779        }
39780
39781      if (!ExpectingFlags) {
39782        enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
39783        enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
39784
39785        if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
39786          X86::CondCode tmp = cc0;
39787          cc0 = cc1;
39788          cc1 = tmp;
39789        }
39790
39791        if ((cc0 == X86::COND_E  && cc1 == X86::COND_NP) ||
39792            (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
39793          // FIXME: need symbolic constants for these magic numbers.
39794          // See X86ATTInstPrinter.cpp:printSSECC().
39795          unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
39796          if (Subtarget.hasAVX512()) {
39797            SDValue FSetCC =
39798                DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CMP00, CMP01,
39799                            DAG.getTargetConstant(x86cc, DL, MVT::i8));
39800            // Need to fill with zeros to ensure the bitcast will produce zeroes
39801            // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
39802            SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v16i1,
39803                                      DAG.getConstant(0, DL, MVT::v16i1),
39804                                      FSetCC, DAG.getIntPtrConstant(0, DL));
39805            return DAG.getZExtOrTrunc(DAG.getBitcast(MVT::i16, Ins), DL,
39806                                      N->getSimpleValueType(0));
39807          }
39808          SDValue OnesOrZeroesF =
39809              DAG.getNode(X86ISD::FSETCC, DL, CMP00.getValueType(), CMP00,
39810                          CMP01, DAG.getTargetConstant(x86cc, DL, MVT::i8));
39811
39812          bool is64BitFP = (CMP00.getValueType() == MVT::f64);
39813          MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
39814
39815          if (is64BitFP && !Subtarget.is64Bit()) {
39816            // On a 32-bit target, we cannot bitcast the 64-bit float to a
39817            // 64-bit integer, since that's not a legal type. Since
39818            // OnesOrZeroesF is all ones of all zeroes, we don't need all the
39819            // bits, but can do this little dance to extract the lowest 32 bits
39820            // and work with those going forward.
39821            SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
39822                                           OnesOrZeroesF);
39823            SDValue Vector32 = DAG.getBitcast(MVT::v4f32, Vector64);
39824            OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
39825                                        Vector32, DAG.getIntPtrConstant(0, DL));
39826            IntVT = MVT::i32;
39827          }
39828
39829          SDValue OnesOrZeroesI = DAG.getBitcast(IntVT, OnesOrZeroesF);
39830          SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
39831                                      DAG.getConstant(1, DL, IntVT));
39832          SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
39833                                              ANDed);
39834          return OneBitOfTruth;
39835        }
39836      }
39837    }
39838  }
39839  return SDValue();
39840}
39841
39842/// Try to fold: (and (xor X, -1), Y) -> (andnp X, Y).
39843static SDValue combineANDXORWithAllOnesIntoANDNP(SDNode *N, SelectionDAG &DAG) {
39844  assert(N->getOpcode() == ISD::AND);
39845
39846  MVT VT = N->getSimpleValueType(0);
39847  if (!VT.is128BitVector() && !VT.is256BitVector() && !VT.is512BitVector())
39848    return SDValue();
39849
39850  SDValue X, Y;
39851  SDValue N0 = N->getOperand(0);
39852  SDValue N1 = N->getOperand(1);
39853
39854  if (SDValue Not = IsNOT(N0, DAG)) {
39855    X = Not;
39856    Y = N1;
39857  } else if (SDValue Not = IsNOT(N1, DAG)) {
39858    X = Not;
39859    Y = N0;
39860  } else
39861    return SDValue();
39862
39863  X = DAG.getBitcast(VT, X);
39864  Y = DAG.getBitcast(VT, Y);
39865  return DAG.getNode(X86ISD::ANDNP, SDLoc(N), VT, X, Y);
39866}
39867
39868// On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
39869// register. In most cases we actually compare or select YMM-sized registers
39870// and mixing the two types creates horrible code. This method optimizes
39871// some of the transition sequences.
39872// Even with AVX-512 this is still useful for removing casts around logical
39873// operations on vXi1 mask types.
39874static SDValue PromoteMaskArithmetic(SDNode *N, SelectionDAG &DAG,
39875                                     const X86Subtarget &Subtarget) {
39876  EVT VT = N->getValueType(0);
39877  assert(VT.isVector() && "Expected vector type");
39878
39879  assert((N->getOpcode() == ISD::ANY_EXTEND ||
39880          N->getOpcode() == ISD::ZERO_EXTEND ||
39881          N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
39882
39883  SDValue Narrow = N->getOperand(0);
39884  EVT NarrowVT = Narrow.getValueType();
39885
39886  if (Narrow->getOpcode() != ISD::XOR &&
39887      Narrow->getOpcode() != ISD::AND &&
39888      Narrow->getOpcode() != ISD::OR)
39889    return SDValue();
39890
39891  SDValue N0  = Narrow->getOperand(0);
39892  SDValue N1  = Narrow->getOperand(1);
39893  SDLoc DL(Narrow);
39894
39895  // The Left side has to be a trunc.
39896  if (N0.getOpcode() != ISD::TRUNCATE)
39897    return SDValue();
39898
39899  // The type of the truncated inputs.
39900  if (N0.getOperand(0).getValueType() != VT)
39901    return SDValue();
39902
39903  // The right side has to be a 'trunc' or a constant vector.
39904  bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE &&
39905                  N1.getOperand(0).getValueType() == VT;
39906  if (!RHSTrunc &&
39907      !ISD::isBuildVectorOfConstantSDNodes(N1.getNode()))
39908    return SDValue();
39909
39910  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39911
39912  if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), VT))
39913    return SDValue();
39914
39915  // Set N0 and N1 to hold the inputs to the new wide operation.
39916  N0 = N0.getOperand(0);
39917  if (RHSTrunc)
39918    N1 = N1.getOperand(0);
39919  else
39920    N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N1);
39921
39922  // Generate the wide operation.
39923  SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, VT, N0, N1);
39924  unsigned Opcode = N->getOpcode();
39925  switch (Opcode) {
39926  default: llvm_unreachable("Unexpected opcode");
39927  case ISD::ANY_EXTEND:
39928    return Op;
39929  case ISD::ZERO_EXTEND:
39930    return DAG.getZeroExtendInReg(Op, DL, NarrowVT.getScalarType());
39931  case ISD::SIGN_EXTEND:
39932    return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
39933                       Op, DAG.getValueType(NarrowVT));
39934  }
39935}
39936
39937/// If both input operands of a logic op are being cast from floating point
39938/// types, try to convert this into a floating point logic node to avoid
39939/// unnecessary moves from SSE to integer registers.
39940static SDValue convertIntLogicToFPLogic(SDNode *N, SelectionDAG &DAG,
39941                                        const X86Subtarget &Subtarget) {
39942  EVT VT = N->getValueType(0);
39943  SDValue N0 = N->getOperand(0);
39944  SDValue N1 = N->getOperand(1);
39945  SDLoc DL(N);
39946
39947  if (N0.getOpcode() != ISD::BITCAST || N1.getOpcode() != ISD::BITCAST)
39948    return SDValue();
39949
39950  SDValue N00 = N0.getOperand(0);
39951  SDValue N10 = N1.getOperand(0);
39952  EVT N00Type = N00.getValueType();
39953  EVT N10Type = N10.getValueType();
39954
39955  // Ensure that both types are the same and are legal scalar fp types.
39956  if (N00Type != N10Type ||
39957      !((Subtarget.hasSSE1() && N00Type == MVT::f32) ||
39958        (Subtarget.hasSSE2() && N00Type == MVT::f64)))
39959    return SDValue();
39960
39961  unsigned FPOpcode;
39962  switch (N->getOpcode()) {
39963  default: llvm_unreachable("Unexpected input node for FP logic conversion");
39964  case ISD::AND: FPOpcode = X86ISD::FAND; break;
39965  case ISD::OR:  FPOpcode = X86ISD::FOR;  break;
39966  case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
39967  }
39968
39969  SDValue FPLogic = DAG.getNode(FPOpcode, DL, N00Type, N00, N10);
39970  return DAG.getBitcast(VT, FPLogic);
39971}
39972
39973/// If this is a zero/all-bits result that is bitwise-anded with a low bits
39974/// mask. (Mask == 1 for the x86 lowering of a SETCC + ZEXT), replace the 'and'
39975/// with a shift-right to eliminate loading the vector constant mask value.
39976static SDValue combineAndMaskToShift(SDNode *N, SelectionDAG &DAG,
39977                                     const X86Subtarget &Subtarget) {
39978  SDValue Op0 = peekThroughBitcasts(N->getOperand(0));
39979  SDValue Op1 = peekThroughBitcasts(N->getOperand(1));
39980  EVT VT0 = Op0.getValueType();
39981  EVT VT1 = Op1.getValueType();
39982
39983  if (VT0 != VT1 || !VT0.isSimple() || !VT0.isInteger())
39984    return SDValue();
39985
39986  APInt SplatVal;
39987  if (!ISD::isConstantSplatVector(Op1.getNode(), SplatVal) ||
39988      !SplatVal.isMask())
39989    return SDValue();
39990
39991  // Don't prevent creation of ANDN.
39992  if (isBitwiseNot(Op0))
39993    return SDValue();
39994
39995  if (!SupportedVectorShiftWithImm(VT0.getSimpleVT(), Subtarget, ISD::SRL))
39996    return SDValue();
39997
39998  unsigned EltBitWidth = VT0.getScalarSizeInBits();
39999  if (EltBitWidth != DAG.ComputeNumSignBits(Op0))
40000    return SDValue();
40001
40002  SDLoc DL(N);
40003  unsigned ShiftVal = SplatVal.countTrailingOnes();
40004  SDValue ShAmt = DAG.getTargetConstant(EltBitWidth - ShiftVal, DL, MVT::i8);
40005  SDValue Shift = DAG.getNode(X86ISD::VSRLI, DL, VT0, Op0, ShAmt);
40006  return DAG.getBitcast(N->getValueType(0), Shift);
40007}
40008
40009// Get the index node from the lowered DAG of a GEP IR instruction with one
40010// indexing dimension.
40011static SDValue getIndexFromUnindexedLoad(LoadSDNode *Ld) {
40012  if (Ld->isIndexed())
40013    return SDValue();
40014
40015  SDValue Base = Ld->getBasePtr();
40016
40017  if (Base.getOpcode() != ISD::ADD)
40018    return SDValue();
40019
40020  SDValue ShiftedIndex = Base.getOperand(0);
40021
40022  if (ShiftedIndex.getOpcode() != ISD::SHL)
40023    return SDValue();
40024
40025  return ShiftedIndex.getOperand(0);
40026
40027}
40028
40029static bool hasBZHI(const X86Subtarget &Subtarget, MVT VT) {
40030  if (Subtarget.hasBMI2() && VT.isScalarInteger()) {
40031    switch (VT.getSizeInBits()) {
40032    default: return false;
40033    case 64: return Subtarget.is64Bit() ? true : false;
40034    case 32: return true;
40035    }
40036  }
40037  return false;
40038}
40039
40040// This function recognizes cases where X86 bzhi instruction can replace and
40041// 'and-load' sequence.
40042// In case of loading integer value from an array of constants which is defined
40043// as follows:
40044//
40045//   int array[SIZE] = {0x0, 0x1, 0x3, 0x7, 0xF ..., 2^(SIZE-1) - 1}
40046//
40047// then applying a bitwise and on the result with another input.
40048// It's equivalent to performing bzhi (zero high bits) on the input, with the
40049// same index of the load.
40050static SDValue combineAndLoadToBZHI(SDNode *Node, SelectionDAG &DAG,
40051                                    const X86Subtarget &Subtarget) {
40052  MVT VT = Node->getSimpleValueType(0);
40053  SDLoc dl(Node);
40054
40055  // Check if subtarget has BZHI instruction for the node's type
40056  if (!hasBZHI(Subtarget, VT))
40057    return SDValue();
40058
40059  // Try matching the pattern for both operands.
40060  for (unsigned i = 0; i < 2; i++) {
40061    SDValue N = Node->getOperand(i);
40062    LoadSDNode *Ld = dyn_cast<LoadSDNode>(N.getNode());
40063
40064     // continue if the operand is not a load instruction
40065    if (!Ld)
40066      return SDValue();
40067
40068    const Value *MemOp = Ld->getMemOperand()->getValue();
40069
40070    if (!MemOp)
40071      return SDValue();
40072
40073    if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(MemOp)) {
40074      if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) {
40075        if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
40076
40077          Constant *Init = GV->getInitializer();
40078          Type *Ty = Init->getType();
40079          if (!isa<ConstantDataArray>(Init) ||
40080              !Ty->getArrayElementType()->isIntegerTy() ||
40081              Ty->getArrayElementType()->getScalarSizeInBits() !=
40082                  VT.getSizeInBits() ||
40083              Ty->getArrayNumElements() >
40084                  Ty->getArrayElementType()->getScalarSizeInBits())
40085            continue;
40086
40087          // Check if the array's constant elements are suitable to our case.
40088          uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
40089          bool ConstantsMatch = true;
40090          for (uint64_t j = 0; j < ArrayElementCount; j++) {
40091            ConstantInt *Elem =
40092                dyn_cast<ConstantInt>(Init->getAggregateElement(j));
40093            if (Elem->getZExtValue() != (((uint64_t)1 << j) - 1)) {
40094              ConstantsMatch = false;
40095              break;
40096            }
40097          }
40098          if (!ConstantsMatch)
40099            continue;
40100
40101          // Do the transformation (For 32-bit type):
40102          // -> (and (load arr[idx]), inp)
40103          // <- (and (srl 0xFFFFFFFF, (sub 32, idx)))
40104          //    that will be replaced with one bzhi instruction.
40105          SDValue Inp = (i == 0) ? Node->getOperand(1) : Node->getOperand(0);
40106          SDValue SizeC = DAG.getConstant(VT.getSizeInBits(), dl, MVT::i32);
40107
40108          // Get the Node which indexes into the array.
40109          SDValue Index = getIndexFromUnindexedLoad(Ld);
40110          if (!Index)
40111            return SDValue();
40112          Index = DAG.getZExtOrTrunc(Index, dl, MVT::i32);
40113
40114          SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, SizeC, Index);
40115          Sub = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Sub);
40116
40117          SDValue AllOnes = DAG.getAllOnesConstant(dl, VT);
40118          SDValue LShr = DAG.getNode(ISD::SRL, dl, VT, AllOnes, Sub);
40119
40120          return DAG.getNode(ISD::AND, dl, VT, Inp, LShr);
40121        }
40122      }
40123    }
40124  }
40125  return SDValue();
40126}
40127
40128// Look for (and (ctpop X), 1) which is the IR form of __builtin_parity.
40129// Turn it into series of XORs and a setnp.
40130static SDValue combineParity(SDNode *N, SelectionDAG &DAG,
40131                             const X86Subtarget &Subtarget) {
40132  EVT VT = N->getValueType(0);
40133
40134  // We only support 64-bit and 32-bit. 64-bit requires special handling
40135  // unless the 64-bit popcnt instruction is legal.
40136  if (VT != MVT::i32 && VT != MVT::i64)
40137    return SDValue();
40138
40139  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40140  if (TLI.isTypeLegal(VT) && TLI.isOperationLegal(ISD::CTPOP, VT))
40141    return SDValue();
40142
40143  SDValue N0 = N->getOperand(0);
40144  SDValue N1 = N->getOperand(1);
40145
40146  // LHS needs to be a single use CTPOP.
40147  if (N0.getOpcode() != ISD::CTPOP || !N0.hasOneUse())
40148    return SDValue();
40149
40150  // RHS needs to be 1.
40151  if (!isOneConstant(N1))
40152    return SDValue();
40153
40154  SDLoc DL(N);
40155  SDValue X = N0.getOperand(0);
40156
40157  // If this is 64-bit, its always best to xor the two 32-bit pieces together
40158  // even if we have popcnt.
40159  if (VT == MVT::i64) {
40160    SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32,
40161                             DAG.getNode(ISD::SRL, DL, VT, X,
40162                                         DAG.getConstant(32, DL, MVT::i8)));
40163    SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, X);
40164    X = DAG.getNode(ISD::XOR, DL, MVT::i32, Lo, Hi);
40165    // Generate a 32-bit parity idiom. This will bring us back here if we need
40166    // to expand it too.
40167    SDValue Parity = DAG.getNode(ISD::AND, DL, MVT::i32,
40168                                 DAG.getNode(ISD::CTPOP, DL, MVT::i32, X),
40169                                 DAG.getConstant(1, DL, MVT::i32));
40170    return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Parity);
40171  }
40172  assert(VT == MVT::i32 && "Unexpected VT!");
40173
40174  // Xor the high and low 16-bits together using a 32-bit operation.
40175  SDValue Hi16 = DAG.getNode(ISD::SRL, DL, VT, X,
40176                             DAG.getConstant(16, DL, MVT::i8));
40177  X = DAG.getNode(ISD::XOR, DL, VT, X, Hi16);
40178
40179  // Finally xor the low 2 bytes together and use a 8-bit flag setting xor.
40180  // This should allow an h-reg to be used to save a shift.
40181  // FIXME: We only get an h-reg in 32-bit mode.
40182  SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
40183                           DAG.getNode(ISD::SRL, DL, VT, X,
40184                                       DAG.getConstant(8, DL, MVT::i8)));
40185  SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, X);
40186  SDVTList VTs = DAG.getVTList(MVT::i8, MVT::i32);
40187  SDValue Flags = DAG.getNode(X86ISD::XOR, DL, VTs, Lo, Hi).getValue(1);
40188
40189  // Copy the inverse of the parity flag into a register with setcc.
40190  SDValue Setnp = getSETCC(X86::COND_NP, Flags, DL, DAG);
40191  // Zero extend to original type.
40192  return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0), Setnp);
40193}
40194
40195
40196// Look for (and (bitcast (vXi1 (concat_vectors (vYi1 setcc), undef,))), C)
40197// Where C is a mask containing the same number of bits as the setcc and
40198// where the setcc will freely 0 upper bits of k-register. We can replace the
40199// undef in the concat with 0s and remove the AND. This mainly helps with
40200// v2i1/v4i1 setcc being casted to scalar.
40201static SDValue combineScalarAndWithMaskSetcc(SDNode *N, SelectionDAG &DAG,
40202                                             const X86Subtarget &Subtarget) {
40203  assert(N->getOpcode() == ISD::AND && "Unexpected opcode!");
40204
40205  EVT VT = N->getValueType(0);
40206
40207  // Make sure this is an AND with constant. We will check the value of the
40208  // constant later.
40209  if (!isa<ConstantSDNode>(N->getOperand(1)))
40210    return SDValue();
40211
40212  // This is implied by the ConstantSDNode.
40213  assert(!VT.isVector() && "Expected scalar VT!");
40214
40215  if (N->getOperand(0).getOpcode() != ISD::BITCAST ||
40216      !N->getOperand(0).hasOneUse() ||
40217      !N->getOperand(0).getOperand(0).hasOneUse())
40218    return SDValue();
40219
40220  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40221  SDValue Src = N->getOperand(0).getOperand(0);
40222  EVT SrcVT = Src.getValueType();
40223  if (!SrcVT.isVector() || SrcVT.getVectorElementType() != MVT::i1 ||
40224      !TLI.isTypeLegal(SrcVT))
40225    return SDValue();
40226
40227  if (Src.getOpcode() != ISD::CONCAT_VECTORS)
40228    return SDValue();
40229
40230  // We only care about the first subvector of the concat, we expect the
40231  // other subvectors to be ignored due to the AND if we make the change.
40232  SDValue SubVec = Src.getOperand(0);
40233  EVT SubVecVT = SubVec.getValueType();
40234
40235  // First subvector should be a setcc with a legal result type. The RHS of the
40236  // AND should be a mask with this many bits.
40237  if (SubVec.getOpcode() != ISD::SETCC || !TLI.isTypeLegal(SubVecVT) ||
40238      !N->getConstantOperandAPInt(1).isMask(SubVecVT.getVectorNumElements()))
40239    return SDValue();
40240
40241  EVT SetccVT = SubVec.getOperand(0).getValueType();
40242  if (!TLI.isTypeLegal(SetccVT) ||
40243      !(Subtarget.hasVLX() || SetccVT.is512BitVector()))
40244    return SDValue();
40245
40246  if (!(Subtarget.hasBWI() || SetccVT.getScalarSizeInBits() >= 32))
40247    return SDValue();
40248
40249  // We passed all the checks. Rebuild the concat_vectors with zeroes
40250  // and cast it back to VT.
40251  SDLoc dl(N);
40252  SmallVector<SDValue, 4> Ops(Src.getNumOperands(),
40253                              DAG.getConstant(0, dl, SubVecVT));
40254  Ops[0] = SubVec;
40255  SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT,
40256                               Ops);
40257  return DAG.getBitcast(VT, Concat);
40258}
40259
40260static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
40261                          TargetLowering::DAGCombinerInfo &DCI,
40262                          const X86Subtarget &Subtarget) {
40263  EVT VT = N->getValueType(0);
40264
40265  // If this is SSE1 only convert to FAND to avoid scalarization.
40266  if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
40267    return DAG.getBitcast(
40268        MVT::v4i32, DAG.getNode(X86ISD::FAND, SDLoc(N), MVT::v4f32,
40269                                DAG.getBitcast(MVT::v4f32, N->getOperand(0)),
40270                                DAG.getBitcast(MVT::v4f32, N->getOperand(1))));
40271  }
40272
40273  // Use a 32-bit and+zext if upper bits known zero.
40274  if (VT == MVT::i64 && Subtarget.is64Bit() &&
40275      !isa<ConstantSDNode>(N->getOperand(1))) {
40276    APInt HiMask = APInt::getHighBitsSet(64, 32);
40277    if (DAG.MaskedValueIsZero(N->getOperand(1), HiMask) ||
40278        DAG.MaskedValueIsZero(N->getOperand(0), HiMask)) {
40279      SDLoc dl(N);
40280      SDValue LHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N->getOperand(0));
40281      SDValue RHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N->getOperand(1));
40282      return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64,
40283                         DAG.getNode(ISD::AND, dl, MVT::i32, LHS, RHS));
40284    }
40285  }
40286
40287  // This must be done before legalization has expanded the ctpop.
40288  if (SDValue V = combineParity(N, DAG, Subtarget))
40289    return V;
40290
40291  // Match all-of bool scalar reductions into a bitcast/movmsk + cmp.
40292  // TODO: Support multiple SrcOps.
40293  if (VT == MVT::i1) {
40294    SmallVector<SDValue, 2> SrcOps;
40295    if (matchScalarReduction(SDValue(N, 0), ISD::AND, SrcOps) &&
40296        SrcOps.size() == 1) {
40297      SDLoc dl(N);
40298      const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40299      unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
40300      EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
40301      SDValue Mask = combineBitcastvxi1(DAG, MaskVT, SrcOps[0], dl, Subtarget);
40302      if (!Mask && TLI.isTypeLegal(SrcOps[0].getValueType()))
40303        Mask = DAG.getBitcast(MaskVT, SrcOps[0]);
40304      if (Mask) {
40305        APInt AllBits = APInt::getAllOnesValue(NumElts);
40306        return DAG.getSetCC(dl, MVT::i1, Mask,
40307                            DAG.getConstant(AllBits, dl, MaskVT), ISD::SETEQ);
40308      }
40309    }
40310  }
40311
40312  if (SDValue V = combineScalarAndWithMaskSetcc(N, DAG, Subtarget))
40313    return V;
40314
40315  if (DCI.isBeforeLegalizeOps())
40316    return SDValue();
40317
40318  if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
40319    return R;
40320
40321  if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
40322    return FPLogic;
40323
40324  if (SDValue R = combineANDXORWithAllOnesIntoANDNP(N, DAG))
40325    return R;
40326
40327  if (SDValue ShiftRight = combineAndMaskToShift(N, DAG, Subtarget))
40328    return ShiftRight;
40329
40330  if (SDValue R = combineAndLoadToBZHI(N, DAG, Subtarget))
40331    return R;
40332
40333  // Attempt to recursively combine a bitmask AND with shuffles.
40334  if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
40335    SDValue Op(N, 0);
40336    if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
40337      return Res;
40338  }
40339
40340  // Attempt to combine a scalar bitmask AND with an extracted shuffle.
40341  if ((VT.getScalarSizeInBits() % 8) == 0 &&
40342      N->getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
40343      isa<ConstantSDNode>(N->getOperand(0).getOperand(1))) {
40344    SDValue BitMask = N->getOperand(1);
40345    SDValue SrcVec = N->getOperand(0).getOperand(0);
40346    EVT SrcVecVT = SrcVec.getValueType();
40347
40348    // Check that the constant bitmask masks whole bytes.
40349    APInt UndefElts;
40350    SmallVector<APInt, 64> EltBits;
40351    if (VT == SrcVecVT.getScalarType() &&
40352        N->getOperand(0)->isOnlyUserOf(SrcVec.getNode()) &&
40353        getTargetConstantBitsFromNode(BitMask, 8, UndefElts, EltBits) &&
40354        llvm::all_of(EltBits, [](APInt M) {
40355          return M.isNullValue() || M.isAllOnesValue();
40356        })) {
40357      unsigned NumElts = SrcVecVT.getVectorNumElements();
40358      unsigned Scale = SrcVecVT.getScalarSizeInBits() / 8;
40359      unsigned Idx = N->getOperand(0).getConstantOperandVal(1);
40360
40361      // Create a root shuffle mask from the byte mask and the extracted index.
40362      SmallVector<int, 16> ShuffleMask(NumElts * Scale, SM_SentinelUndef);
40363      for (unsigned i = 0; i != Scale; ++i) {
40364        if (UndefElts[i])
40365          continue;
40366        int VecIdx = Scale * Idx + i;
40367        ShuffleMask[VecIdx] =
40368            EltBits[i].isNullValue() ? SM_SentinelZero : VecIdx;
40369      }
40370
40371      if (SDValue Shuffle = combineX86ShufflesRecursively(
40372              {SrcVec}, 0, SrcVec, ShuffleMask, {}, /*Depth*/ 1,
40373              /*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget))
40374        return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), VT, Shuffle,
40375                           N->getOperand(0).getOperand(1));
40376    }
40377  }
40378
40379  return SDValue();
40380}
40381
40382// Canonicalize OR(AND(X,C),AND(Y,~C)) -> OR(AND(X,C),ANDNP(C,Y))
40383static SDValue canonicalizeBitSelect(SDNode *N, SelectionDAG &DAG,
40384                                     const X86Subtarget &Subtarget) {
40385  assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
40386
40387  MVT VT = N->getSimpleValueType(0);
40388  if (!VT.isVector() || (VT.getScalarSizeInBits() % 8) != 0)
40389    return SDValue();
40390
40391  SDValue N0 = peekThroughBitcasts(N->getOperand(0));
40392  SDValue N1 = peekThroughBitcasts(N->getOperand(1));
40393  if (N0.getOpcode() != ISD::AND || N1.getOpcode() != ISD::AND)
40394    return SDValue();
40395
40396  // On XOP we'll lower to PCMOV so accept one use. With AVX512, we can use
40397  // VPTERNLOG. Otherwise only do this if either mask has multiple uses already.
40398  bool UseVPTERNLOG = (Subtarget.hasAVX512() && VT.is512BitVector()) ||
40399                      Subtarget.hasVLX();
40400  if (!(Subtarget.hasXOP() || UseVPTERNLOG ||
40401        !N0.getOperand(1).hasOneUse() || !N1.getOperand(1).hasOneUse()))
40402    return SDValue();
40403
40404  // Attempt to extract constant byte masks.
40405  APInt UndefElts0, UndefElts1;
40406  SmallVector<APInt, 32> EltBits0, EltBits1;
40407  if (!getTargetConstantBitsFromNode(N0.getOperand(1), 8, UndefElts0, EltBits0,
40408                                     false, false))
40409    return SDValue();
40410  if (!getTargetConstantBitsFromNode(N1.getOperand(1), 8, UndefElts1, EltBits1,
40411                                     false, false))
40412    return SDValue();
40413
40414  for (unsigned i = 0, e = EltBits0.size(); i != e; ++i) {
40415    // TODO - add UNDEF elts support.
40416    if (UndefElts0[i] || UndefElts1[i])
40417      return SDValue();
40418    if (EltBits0[i] != ~EltBits1[i])
40419      return SDValue();
40420  }
40421
40422  SDLoc DL(N);
40423  SDValue X = N->getOperand(0);
40424  SDValue Y =
40425      DAG.getNode(X86ISD::ANDNP, DL, VT, DAG.getBitcast(VT, N0.getOperand(1)),
40426                  DAG.getBitcast(VT, N1.getOperand(0)));
40427  return DAG.getNode(ISD::OR, DL, VT, X, Y);
40428}
40429
40430// Try to match OR(AND(~MASK,X),AND(MASK,Y)) logic pattern.
40431static bool matchLogicBlend(SDNode *N, SDValue &X, SDValue &Y, SDValue &Mask) {
40432  if (N->getOpcode() != ISD::OR)
40433    return false;
40434
40435  SDValue N0 = N->getOperand(0);
40436  SDValue N1 = N->getOperand(1);
40437
40438  // Canonicalize AND to LHS.
40439  if (N1.getOpcode() == ISD::AND)
40440    std::swap(N0, N1);
40441
40442  // Attempt to match OR(AND(M,Y),ANDNP(M,X)).
40443  if (N0.getOpcode() != ISD::AND || N1.getOpcode() != X86ISD::ANDNP)
40444    return false;
40445
40446  Mask = N1.getOperand(0);
40447  X = N1.getOperand(1);
40448
40449  // Check to see if the mask appeared in both the AND and ANDNP.
40450  if (N0.getOperand(0) == Mask)
40451    Y = N0.getOperand(1);
40452  else if (N0.getOperand(1) == Mask)
40453    Y = N0.getOperand(0);
40454  else
40455    return false;
40456
40457  // TODO: Attempt to match against AND(XOR(-1,M),Y) as well, waiting for
40458  // ANDNP combine allows other combines to happen that prevent matching.
40459  return true;
40460}
40461
40462// Try to fold:
40463//   (or (and (m, y), (pandn m, x)))
40464// into:
40465//   (vselect m, x, y)
40466// As a special case, try to fold:
40467//   (or (and (m, (sub 0, x)), (pandn m, x)))
40468// into:
40469//   (sub (xor X, M), M)
40470static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
40471                                            const X86Subtarget &Subtarget) {
40472  assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
40473
40474  EVT VT = N->getValueType(0);
40475  if (!((VT.is128BitVector() && Subtarget.hasSSE2()) ||
40476        (VT.is256BitVector() && Subtarget.hasInt256())))
40477    return SDValue();
40478
40479  SDValue X, Y, Mask;
40480  if (!matchLogicBlend(N, X, Y, Mask))
40481    return SDValue();
40482
40483  // Validate that X, Y, and Mask are bitcasts, and see through them.
40484  Mask = peekThroughBitcasts(Mask);
40485  X = peekThroughBitcasts(X);
40486  Y = peekThroughBitcasts(Y);
40487
40488  EVT MaskVT = Mask.getValueType();
40489  unsigned EltBits = MaskVT.getScalarSizeInBits();
40490
40491  // TODO: Attempt to handle floating point cases as well?
40492  if (!MaskVT.isInteger() || DAG.ComputeNumSignBits(Mask) != EltBits)
40493    return SDValue();
40494
40495  SDLoc DL(N);
40496
40497  // Attempt to combine to conditional negate: (sub (xor X, M), M)
40498  if (SDValue Res = combineLogicBlendIntoConditionalNegate(VT, Mask, X, Y, DL,
40499                                                           DAG, Subtarget))
40500    return Res;
40501
40502  // PBLENDVB is only available on SSE 4.1.
40503  if (!Subtarget.hasSSE41())
40504    return SDValue();
40505
40506  MVT BlendVT = VT.is256BitVector() ? MVT::v32i8 : MVT::v16i8;
40507
40508  X = DAG.getBitcast(BlendVT, X);
40509  Y = DAG.getBitcast(BlendVT, Y);
40510  Mask = DAG.getBitcast(BlendVT, Mask);
40511  Mask = DAG.getSelect(DL, BlendVT, Mask, Y, X);
40512  return DAG.getBitcast(VT, Mask);
40513}
40514
40515// Helper function for combineOrCmpEqZeroToCtlzSrl
40516// Transforms:
40517//   seteq(cmp x, 0)
40518//   into:
40519//   srl(ctlz x), log2(bitsize(x))
40520// Input pattern is checked by caller.
40521static SDValue lowerX86CmpEqZeroToCtlzSrl(SDValue Op, EVT ExtTy,
40522                                          SelectionDAG &DAG) {
40523  SDValue Cmp = Op.getOperand(1);
40524  EVT VT = Cmp.getOperand(0).getValueType();
40525  unsigned Log2b = Log2_32(VT.getSizeInBits());
40526  SDLoc dl(Op);
40527  SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Cmp->getOperand(0));
40528  // The result of the shift is true or false, and on X86, the 32-bit
40529  // encoding of shr and lzcnt is more desirable.
40530  SDValue Trunc = DAG.getZExtOrTrunc(Clz, dl, MVT::i32);
40531  SDValue Scc = DAG.getNode(ISD::SRL, dl, MVT::i32, Trunc,
40532                            DAG.getConstant(Log2b, dl, MVT::i8));
40533  return DAG.getZExtOrTrunc(Scc, dl, ExtTy);
40534}
40535
40536// Try to transform:
40537//   zext(or(setcc(eq, (cmp x, 0)), setcc(eq, (cmp y, 0))))
40538//   into:
40539//   srl(or(ctlz(x), ctlz(y)), log2(bitsize(x))
40540// Will also attempt to match more generic cases, eg:
40541//   zext(or(or(setcc(eq, cmp 0), setcc(eq, cmp 0)), setcc(eq, cmp 0)))
40542// Only applies if the target supports the FastLZCNT feature.
40543static SDValue combineOrCmpEqZeroToCtlzSrl(SDNode *N, SelectionDAG &DAG,
40544                                           TargetLowering::DAGCombinerInfo &DCI,
40545                                           const X86Subtarget &Subtarget) {
40546  if (DCI.isBeforeLegalize() || !Subtarget.getTargetLowering()->isCtlzFast())
40547    return SDValue();
40548
40549  auto isORCandidate = [](SDValue N) {
40550    return (N->getOpcode() == ISD::OR && N->hasOneUse());
40551  };
40552
40553  // Check the zero extend is extending to 32-bit or more. The code generated by
40554  // srl(ctlz) for 16-bit or less variants of the pattern would require extra
40555  // instructions to clear the upper bits.
40556  if (!N->hasOneUse() || !N->getSimpleValueType(0).bitsGE(MVT::i32) ||
40557      !isORCandidate(N->getOperand(0)))
40558    return SDValue();
40559
40560  // Check the node matches: setcc(eq, cmp 0)
40561  auto isSetCCCandidate = [](SDValue N) {
40562    return N->getOpcode() == X86ISD::SETCC && N->hasOneUse() &&
40563           X86::CondCode(N->getConstantOperandVal(0)) == X86::COND_E &&
40564           N->getOperand(1).getOpcode() == X86ISD::CMP &&
40565           isNullConstant(N->getOperand(1).getOperand(1)) &&
40566           N->getOperand(1).getValueType().bitsGE(MVT::i32);
40567  };
40568
40569  SDNode *OR = N->getOperand(0).getNode();
40570  SDValue LHS = OR->getOperand(0);
40571  SDValue RHS = OR->getOperand(1);
40572
40573  // Save nodes matching or(or, setcc(eq, cmp 0)).
40574  SmallVector<SDNode *, 2> ORNodes;
40575  while (((isORCandidate(LHS) && isSetCCCandidate(RHS)) ||
40576          (isORCandidate(RHS) && isSetCCCandidate(LHS)))) {
40577    ORNodes.push_back(OR);
40578    OR = (LHS->getOpcode() == ISD::OR) ? LHS.getNode() : RHS.getNode();
40579    LHS = OR->getOperand(0);
40580    RHS = OR->getOperand(1);
40581  }
40582
40583  // The last OR node should match or(setcc(eq, cmp 0), setcc(eq, cmp 0)).
40584  if (!(isSetCCCandidate(LHS) && isSetCCCandidate(RHS)) ||
40585      !isORCandidate(SDValue(OR, 0)))
40586    return SDValue();
40587
40588  // We have a or(setcc(eq, cmp 0), setcc(eq, cmp 0)) pattern, try to lower it
40589  // to
40590  // or(srl(ctlz),srl(ctlz)).
40591  // The dag combiner can then fold it into:
40592  // srl(or(ctlz, ctlz)).
40593  EVT VT = OR->getValueType(0);
40594  SDValue NewLHS = lowerX86CmpEqZeroToCtlzSrl(LHS, VT, DAG);
40595  SDValue Ret, NewRHS;
40596  if (NewLHS && (NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, VT, DAG)))
40597    Ret = DAG.getNode(ISD::OR, SDLoc(OR), VT, NewLHS, NewRHS);
40598
40599  if (!Ret)
40600    return SDValue();
40601
40602  // Try to lower nodes matching the or(or, setcc(eq, cmp 0)) pattern.
40603  while (ORNodes.size() > 0) {
40604    OR = ORNodes.pop_back_val();
40605    LHS = OR->getOperand(0);
40606    RHS = OR->getOperand(1);
40607    // Swap rhs with lhs to match or(setcc(eq, cmp, 0), or).
40608    if (RHS->getOpcode() == ISD::OR)
40609      std::swap(LHS, RHS);
40610    NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, VT, DAG);
40611    if (!NewRHS)
40612      return SDValue();
40613    Ret = DAG.getNode(ISD::OR, SDLoc(OR), VT, Ret, NewRHS);
40614  }
40615
40616  if (Ret)
40617    Ret = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), Ret);
40618
40619  return Ret;
40620}
40621
40622static SDValue combineOrShiftToFunnelShift(SDNode *N, SelectionDAG &DAG,
40623                                           const X86Subtarget &Subtarget) {
40624  assert(N->getOpcode() == ISD::OR && "Expected ISD::OR node");
40625  SDValue N0 = N->getOperand(0);
40626  SDValue N1 = N->getOperand(1);
40627  EVT VT = N->getValueType(0);
40628  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40629
40630  if (!TLI.isOperationLegalOrCustom(ISD::FSHL, VT) ||
40631      !TLI.isOperationLegalOrCustom(ISD::FSHR, VT))
40632    return SDValue();
40633
40634  // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
40635  bool OptForSize = DAG.shouldOptForSize();
40636  unsigned Bits = VT.getScalarSizeInBits();
40637
40638  // SHLD/SHRD instructions have lower register pressure, but on some
40639  // platforms they have higher latency than the equivalent
40640  // series of shifts/or that would otherwise be generated.
40641  // Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
40642  // have higher latencies and we are not optimizing for size.
40643  if (!OptForSize && Subtarget.isSHLDSlow())
40644    return SDValue();
40645
40646  if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
40647    std::swap(N0, N1);
40648  if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
40649    return SDValue();
40650  if (!N0.hasOneUse() || !N1.hasOneUse())
40651    return SDValue();
40652
40653  EVT ShiftVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout());
40654
40655  SDValue ShAmt0 = N0.getOperand(1);
40656  if (ShAmt0.getValueType() != ShiftVT)
40657    return SDValue();
40658  SDValue ShAmt1 = N1.getOperand(1);
40659  if (ShAmt1.getValueType() != ShiftVT)
40660    return SDValue();
40661
40662  // Peek through any modulo shift masks.
40663  SDValue ShMsk0;
40664  if (ShAmt0.getOpcode() == ISD::AND &&
40665      isa<ConstantSDNode>(ShAmt0.getOperand(1)) &&
40666      ShAmt0.getConstantOperandAPInt(1) == (Bits - 1)) {
40667    ShMsk0 = ShAmt0;
40668    ShAmt0 = ShAmt0.getOperand(0);
40669  }
40670  SDValue ShMsk1;
40671  if (ShAmt1.getOpcode() == ISD::AND &&
40672      isa<ConstantSDNode>(ShAmt1.getOperand(1)) &&
40673      ShAmt1.getConstantOperandAPInt(1) == (Bits - 1)) {
40674    ShMsk1 = ShAmt1;
40675    ShAmt1 = ShAmt1.getOperand(0);
40676  }
40677
40678  if (ShAmt0.getOpcode() == ISD::TRUNCATE)
40679    ShAmt0 = ShAmt0.getOperand(0);
40680  if (ShAmt1.getOpcode() == ISD::TRUNCATE)
40681    ShAmt1 = ShAmt1.getOperand(0);
40682
40683  SDLoc DL(N);
40684  unsigned Opc = ISD::FSHL;
40685  SDValue Op0 = N0.getOperand(0);
40686  SDValue Op1 = N1.getOperand(0);
40687  if (ShAmt0.getOpcode() == ISD::SUB || ShAmt0.getOpcode() == ISD::XOR) {
40688    Opc = ISD::FSHR;
40689    std::swap(Op0, Op1);
40690    std::swap(ShAmt0, ShAmt1);
40691    std::swap(ShMsk0, ShMsk1);
40692  }
40693
40694  auto GetFunnelShift = [&DAG, &DL, VT, Opc, &ShiftVT](SDValue Op0, SDValue Op1,
40695                                                       SDValue Amt) {
40696    if (Opc == ISD::FSHR)
40697      std::swap(Op0, Op1);
40698    return DAG.getNode(Opc, DL, VT, Op0, Op1,
40699                       DAG.getNode(ISD::TRUNCATE, DL, ShiftVT, Amt));
40700  };
40701
40702  // OR( SHL( X, C ), SRL( Y, 32 - C ) ) -> FSHL( X, Y, C )
40703  // OR( SRL( X, C ), SHL( Y, 32 - C ) ) -> FSHR( Y, X, C )
40704  // OR( SHL( X, C ), SRL( SRL( Y, 1 ), XOR( C, 31 ) ) ) -> FSHL( X, Y, C )
40705  // OR( SRL( X, C ), SHL( SHL( Y, 1 ), XOR( C, 31 ) ) ) -> FSHR( Y, X, C )
40706  // OR( SHL( X, AND( C, 31 ) ), SRL( Y, AND( 0 - C, 31 ) ) ) -> FSHL( X, Y, C )
40707  // OR( SRL( X, AND( C, 31 ) ), SHL( Y, AND( 0 - C, 31 ) ) ) -> FSHR( Y, X, C )
40708  if (ShAmt1.getOpcode() == ISD::SUB) {
40709    SDValue Sum = ShAmt1.getOperand(0);
40710    if (auto *SumC = dyn_cast<ConstantSDNode>(Sum)) {
40711      SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
40712      if (ShAmt1Op1.getOpcode() == ISD::AND &&
40713          isa<ConstantSDNode>(ShAmt1Op1.getOperand(1)) &&
40714          ShAmt1Op1.getConstantOperandAPInt(1) == (Bits - 1)) {
40715        ShMsk1 = ShAmt1Op1;
40716        ShAmt1Op1 = ShAmt1Op1.getOperand(0);
40717      }
40718      if (ShAmt1Op1.getOpcode() == ISD::TRUNCATE)
40719        ShAmt1Op1 = ShAmt1Op1.getOperand(0);
40720      if ((SumC->getAPIntValue() == Bits ||
40721           (SumC->getAPIntValue() == 0 && ShMsk1)) &&
40722          ShAmt1Op1 == ShAmt0)
40723        return GetFunnelShift(Op0, Op1, ShAmt0);
40724    }
40725  } else if (auto *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
40726    auto *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
40727    if (ShAmt0C && (ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue()) == Bits)
40728      return GetFunnelShift(Op0, Op1, ShAmt0);
40729  } else if (ShAmt1.getOpcode() == ISD::XOR) {
40730    SDValue Mask = ShAmt1.getOperand(1);
40731    if (auto *MaskC = dyn_cast<ConstantSDNode>(Mask)) {
40732      unsigned InnerShift = (ISD::FSHL == Opc ? ISD::SRL : ISD::SHL);
40733      SDValue ShAmt1Op0 = ShAmt1.getOperand(0);
40734      if (ShAmt1Op0.getOpcode() == ISD::TRUNCATE)
40735        ShAmt1Op0 = ShAmt1Op0.getOperand(0);
40736      if (MaskC->getSExtValue() == (Bits - 1) &&
40737          (ShAmt1Op0 == ShAmt0 || ShAmt1Op0 == ShMsk0)) {
40738        if (Op1.getOpcode() == InnerShift &&
40739            isa<ConstantSDNode>(Op1.getOperand(1)) &&
40740            Op1.getConstantOperandAPInt(1).isOneValue()) {
40741          return GetFunnelShift(Op0, Op1.getOperand(0), ShAmt0);
40742        }
40743        // Test for ADD( Y, Y ) as an equivalent to SHL( Y, 1 ).
40744        if (InnerShift == ISD::SHL && Op1.getOpcode() == ISD::ADD &&
40745            Op1.getOperand(0) == Op1.getOperand(1)) {
40746          return GetFunnelShift(Op0, Op1.getOperand(0), ShAmt0);
40747        }
40748      }
40749    }
40750  }
40751
40752  return SDValue();
40753}
40754
40755static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
40756                         TargetLowering::DAGCombinerInfo &DCI,
40757                         const X86Subtarget &Subtarget) {
40758  SDValue N0 = N->getOperand(0);
40759  SDValue N1 = N->getOperand(1);
40760  EVT VT = N->getValueType(0);
40761
40762  // If this is SSE1 only convert to FOR to avoid scalarization.
40763  if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
40764    return DAG.getBitcast(MVT::v4i32,
40765                          DAG.getNode(X86ISD::FOR, SDLoc(N), MVT::v4f32,
40766                                      DAG.getBitcast(MVT::v4f32, N0),
40767                                      DAG.getBitcast(MVT::v4f32, N1)));
40768  }
40769
40770  // Match any-of bool scalar reductions into a bitcast/movmsk + cmp.
40771  // TODO: Support multiple SrcOps.
40772  if (VT == MVT::i1) {
40773    SmallVector<SDValue, 2> SrcOps;
40774    if (matchScalarReduction(SDValue(N, 0), ISD::OR, SrcOps) &&
40775        SrcOps.size() == 1) {
40776      SDLoc dl(N);
40777      const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40778      unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
40779      EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
40780      SDValue Mask = combineBitcastvxi1(DAG, MaskVT, SrcOps[0], dl, Subtarget);
40781      if (!Mask && TLI.isTypeLegal(SrcOps[0].getValueType()))
40782        Mask = DAG.getBitcast(MaskVT, SrcOps[0]);
40783      if (Mask) {
40784        APInt AllBits = APInt::getNullValue(NumElts);
40785        return DAG.getSetCC(dl, MVT::i1, Mask,
40786                            DAG.getConstant(AllBits, dl, MaskVT), ISD::SETNE);
40787      }
40788    }
40789  }
40790
40791  if (DCI.isBeforeLegalizeOps())
40792    return SDValue();
40793
40794  if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
40795    return R;
40796
40797  if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
40798    return FPLogic;
40799
40800  if (SDValue R = canonicalizeBitSelect(N, DAG, Subtarget))
40801    return R;
40802
40803  if (SDValue R = combineLogicBlendIntoPBLENDV(N, DAG, Subtarget))
40804    return R;
40805
40806  if (SDValue R = combineOrShiftToFunnelShift(N, DAG, Subtarget))
40807    return R;
40808
40809  // Attempt to recursively combine an OR of shuffles.
40810  if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
40811    SDValue Op(N, 0);
40812    if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
40813      return Res;
40814  }
40815
40816  return SDValue();
40817}
40818
40819/// Try to turn tests against the signbit in the form of:
40820///   XOR(TRUNCATE(SRL(X, size(X)-1)), 1)
40821/// into:
40822///   SETGT(X, -1)
40823static SDValue foldXorTruncShiftIntoCmp(SDNode *N, SelectionDAG &DAG) {
40824  // This is only worth doing if the output type is i8 or i1.
40825  EVT ResultType = N->getValueType(0);
40826  if (ResultType != MVT::i8 && ResultType != MVT::i1)
40827    return SDValue();
40828
40829  SDValue N0 = N->getOperand(0);
40830  SDValue N1 = N->getOperand(1);
40831
40832  // We should be performing an xor against a truncated shift.
40833  if (N0.getOpcode() != ISD::TRUNCATE || !N0.hasOneUse())
40834    return SDValue();
40835
40836  // Make sure we are performing an xor against one.
40837  if (!isOneConstant(N1))
40838    return SDValue();
40839
40840  // SetCC on x86 zero extends so only act on this if it's a logical shift.
40841  SDValue Shift = N0.getOperand(0);
40842  if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse())
40843    return SDValue();
40844
40845  // Make sure we are truncating from one of i16, i32 or i64.
40846  EVT ShiftTy = Shift.getValueType();
40847  if (ShiftTy != MVT::i16 && ShiftTy != MVT::i32 && ShiftTy != MVT::i64)
40848    return SDValue();
40849
40850  // Make sure the shift amount extracts the sign bit.
40851  if (!isa<ConstantSDNode>(Shift.getOperand(1)) ||
40852      Shift.getConstantOperandAPInt(1) != (ShiftTy.getSizeInBits() - 1))
40853    return SDValue();
40854
40855  // Create a greater-than comparison against -1.
40856  // N.B. Using SETGE against 0 works but we want a canonical looking
40857  // comparison, using SETGT matches up with what TranslateX86CC.
40858  SDLoc DL(N);
40859  SDValue ShiftOp = Shift.getOperand(0);
40860  EVT ShiftOpTy = ShiftOp.getValueType();
40861  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40862  EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
40863                                               *DAG.getContext(), ResultType);
40864  SDValue Cond = DAG.getSetCC(DL, SetCCResultType, ShiftOp,
40865                              DAG.getConstant(-1, DL, ShiftOpTy), ISD::SETGT);
40866  if (SetCCResultType != ResultType)
40867    Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, ResultType, Cond);
40868  return Cond;
40869}
40870
40871/// Turn vector tests of the signbit in the form of:
40872///   xor (sra X, elt_size(X)-1), -1
40873/// into:
40874///   pcmpgt X, -1
40875///
40876/// This should be called before type legalization because the pattern may not
40877/// persist after that.
40878static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
40879                                         const X86Subtarget &Subtarget) {
40880  EVT VT = N->getValueType(0);
40881  if (!VT.isSimple())
40882    return SDValue();
40883
40884  switch (VT.getSimpleVT().SimpleTy) {
40885  default: return SDValue();
40886  case MVT::v16i8:
40887  case MVT::v8i16:
40888  case MVT::v4i32:
40889  case MVT::v2i64: if (!Subtarget.hasSSE2()) return SDValue(); break;
40890  case MVT::v32i8:
40891  case MVT::v16i16:
40892  case MVT::v8i32:
40893  case MVT::v4i64: if (!Subtarget.hasAVX2()) return SDValue(); break;
40894  }
40895
40896  // There must be a shift right algebraic before the xor, and the xor must be a
40897  // 'not' operation.
40898  SDValue Shift = N->getOperand(0);
40899  SDValue Ones = N->getOperand(1);
40900  if (Shift.getOpcode() != ISD::SRA || !Shift.hasOneUse() ||
40901      !ISD::isBuildVectorAllOnes(Ones.getNode()))
40902    return SDValue();
40903
40904  // The shift should be smearing the sign bit across each vector element.
40905  auto *ShiftAmt =
40906      isConstOrConstSplat(Shift.getOperand(1), /*AllowUndefs*/ true);
40907  if (!ShiftAmt ||
40908      ShiftAmt->getAPIntValue() != (Shift.getScalarValueSizeInBits() - 1))
40909    return SDValue();
40910
40911  // Create a greater-than comparison against -1. We don't use the more obvious
40912  // greater-than-or-equal-to-zero because SSE/AVX don't have that instruction.
40913  return DAG.getSetCC(SDLoc(N), VT, Shift.getOperand(0), Ones, ISD::SETGT);
40914}
40915
40916/// Detect patterns of truncation with unsigned saturation:
40917///
40918/// 1. (truncate (umin (x, unsigned_max_of_dest_type)) to dest_type).
40919///   Return the source value x to be truncated or SDValue() if the pattern was
40920///   not matched.
40921///
40922/// 2. (truncate (smin (smax (x, C1), C2)) to dest_type),
40923///   where C1 >= 0 and C2 is unsigned max of destination type.
40924///
40925///    (truncate (smax (smin (x, C2), C1)) to dest_type)
40926///   where C1 >= 0, C2 is unsigned max of destination type and C1 <= C2.
40927///
40928///   These two patterns are equivalent to:
40929///   (truncate (umin (smax(x, C1), unsigned_max_of_dest_type)) to dest_type)
40930///   So return the smax(x, C1) value to be truncated or SDValue() if the
40931///   pattern was not matched.
40932static SDValue detectUSatPattern(SDValue In, EVT VT, SelectionDAG &DAG,
40933                                 const SDLoc &DL) {
40934  EVT InVT = In.getValueType();
40935
40936  // Saturation with truncation. We truncate from InVT to VT.
40937  assert(InVT.getScalarSizeInBits() > VT.getScalarSizeInBits() &&
40938         "Unexpected types for truncate operation");
40939
40940  // Match min/max and return limit value as a parameter.
40941  auto MatchMinMax = [](SDValue V, unsigned Opcode, APInt &Limit) -> SDValue {
40942    if (V.getOpcode() == Opcode &&
40943        ISD::isConstantSplatVector(V.getOperand(1).getNode(), Limit))
40944      return V.getOperand(0);
40945    return SDValue();
40946  };
40947
40948  APInt C1, C2;
40949  if (SDValue UMin = MatchMinMax(In, ISD::UMIN, C2))
40950    // C2 should be equal to UINT32_MAX / UINT16_MAX / UINT8_MAX according
40951    // the element size of the destination type.
40952    if (C2.isMask(VT.getScalarSizeInBits()))
40953      return UMin;
40954
40955  if (SDValue SMin = MatchMinMax(In, ISD::SMIN, C2))
40956    if (MatchMinMax(SMin, ISD::SMAX, C1))
40957      if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()))
40958        return SMin;
40959
40960  if (SDValue SMax = MatchMinMax(In, ISD::SMAX, C1))
40961    if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, C2))
40962      if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()) &&
40963          C2.uge(C1)) {
40964        return DAG.getNode(ISD::SMAX, DL, InVT, SMin, In.getOperand(1));
40965      }
40966
40967  return SDValue();
40968}
40969
40970/// Detect patterns of truncation with signed saturation:
40971/// (truncate (smin ((smax (x, signed_min_of_dest_type)),
40972///                  signed_max_of_dest_type)) to dest_type)
40973/// or:
40974/// (truncate (smax ((smin (x, signed_max_of_dest_type)),
40975///                  signed_min_of_dest_type)) to dest_type).
40976/// With MatchPackUS, the smax/smin range is [0, unsigned_max_of_dest_type].
40977/// Return the source value to be truncated or SDValue() if the pattern was not
40978/// matched.
40979static SDValue detectSSatPattern(SDValue In, EVT VT, bool MatchPackUS = false) {
40980  unsigned NumDstBits = VT.getScalarSizeInBits();
40981  unsigned NumSrcBits = In.getScalarValueSizeInBits();
40982  assert(NumSrcBits > NumDstBits && "Unexpected types for truncate operation");
40983
40984  auto MatchMinMax = [](SDValue V, unsigned Opcode,
40985                        const APInt &Limit) -> SDValue {
40986    APInt C;
40987    if (V.getOpcode() == Opcode &&
40988        ISD::isConstantSplatVector(V.getOperand(1).getNode(), C) && C == Limit)
40989      return V.getOperand(0);
40990    return SDValue();
40991  };
40992
40993  APInt SignedMax, SignedMin;
40994  if (MatchPackUS) {
40995    SignedMax = APInt::getAllOnesValue(NumDstBits).zext(NumSrcBits);
40996    SignedMin = APInt(NumSrcBits, 0);
40997  } else {
40998    SignedMax = APInt::getSignedMaxValue(NumDstBits).sext(NumSrcBits);
40999    SignedMin = APInt::getSignedMinValue(NumDstBits).sext(NumSrcBits);
41000  }
41001
41002  if (SDValue SMin = MatchMinMax(In, ISD::SMIN, SignedMax))
41003    if (SDValue SMax = MatchMinMax(SMin, ISD::SMAX, SignedMin))
41004      return SMax;
41005
41006  if (SDValue SMax = MatchMinMax(In, ISD::SMAX, SignedMin))
41007    if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, SignedMax))
41008      return SMin;
41009
41010  return SDValue();
41011}
41012
41013static SDValue combineTruncateWithSat(SDValue In, EVT VT, const SDLoc &DL,
41014                                      SelectionDAG &DAG,
41015                                      const X86Subtarget &Subtarget) {
41016  if (!Subtarget.hasSSE2() || !VT.isVector())
41017    return SDValue();
41018
41019  EVT SVT = VT.getVectorElementType();
41020  EVT InVT = In.getValueType();
41021  EVT InSVT = InVT.getVectorElementType();
41022
41023  // If we're clamping a signed 32-bit vector to 0-255 and the 32-bit vector is
41024  // split across two registers. We can use a packusdw+perm to clamp to 0-65535
41025  // and concatenate at the same time. Then we can use a final vpmovuswb to
41026  // clip to 0-255.
41027  if (Subtarget.hasBWI() && !Subtarget.useAVX512Regs() &&
41028      InVT == MVT::v16i32 && VT == MVT::v16i8) {
41029    if (auto USatVal = detectSSatPattern(In, VT, true)) {
41030      // Emit a VPACKUSDW+VPERMQ followed by a VPMOVUSWB.
41031      SDValue Mid = truncateVectorWithPACK(X86ISD::PACKUS, MVT::v16i16, USatVal,
41032                                           DL, DAG, Subtarget);
41033      assert(Mid && "Failed to pack!");
41034      return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, Mid);
41035    }
41036  }
41037
41038  // vXi32 truncate instructions are available with AVX512F.
41039  // vXi16 truncate instructions are only available with AVX512BW.
41040  // For 256-bit or smaller vectors, we require VLX.
41041  // FIXME: We could widen truncates to 512 to remove the VLX restriction.
41042  // If the result type is 256-bits or larger and we have disable 512-bit
41043  // registers, we should go ahead and use the pack instructions if possible.
41044  bool PreferAVX512 = ((Subtarget.hasAVX512() && InSVT == MVT::i32) ||
41045                       (Subtarget.hasBWI() && InSVT == MVT::i16)) &&
41046                      (InVT.getSizeInBits() > 128) &&
41047                      (Subtarget.hasVLX() || InVT.getSizeInBits() > 256) &&
41048                      !(!Subtarget.useAVX512Regs() && VT.getSizeInBits() >= 256);
41049
41050  if (isPowerOf2_32(VT.getVectorNumElements()) && !PreferAVX512 &&
41051      VT.getSizeInBits() >= 64 &&
41052      (SVT == MVT::i8 || SVT == MVT::i16) &&
41053      (InSVT == MVT::i16 || InSVT == MVT::i32)) {
41054    if (auto USatVal = detectSSatPattern(In, VT, true)) {
41055      // vXi32 -> vXi8 must be performed as PACKUSWB(PACKSSDW,PACKSSDW).
41056      // Only do this when the result is at least 64 bits or we'll leaving
41057      // dangling PACKSSDW nodes.
41058      if (SVT == MVT::i8 && InSVT == MVT::i32) {
41059        EVT MidVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
41060                                     VT.getVectorNumElements());
41061        SDValue Mid = truncateVectorWithPACK(X86ISD::PACKSS, MidVT, USatVal, DL,
41062                                             DAG, Subtarget);
41063        assert(Mid && "Failed to pack!");
41064        SDValue V = truncateVectorWithPACK(X86ISD::PACKUS, VT, Mid, DL, DAG,
41065                                           Subtarget);
41066        assert(V && "Failed to pack!");
41067        return V;
41068      } else if (SVT == MVT::i8 || Subtarget.hasSSE41())
41069        return truncateVectorWithPACK(X86ISD::PACKUS, VT, USatVal, DL, DAG,
41070                                      Subtarget);
41071    }
41072    if (auto SSatVal = detectSSatPattern(In, VT))
41073      return truncateVectorWithPACK(X86ISD::PACKSS, VT, SSatVal, DL, DAG,
41074                                    Subtarget);
41075  }
41076
41077  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
41078  if (TLI.isTypeLegal(InVT) && InVT.isVector() && SVT != MVT::i1 &&
41079      Subtarget.hasAVX512() && (InSVT != MVT::i16 || Subtarget.hasBWI())) {
41080    unsigned TruncOpc = 0;
41081    SDValue SatVal;
41082    if (auto SSatVal = detectSSatPattern(In, VT)) {
41083      SatVal = SSatVal;
41084      TruncOpc = X86ISD::VTRUNCS;
41085    } else if (auto USatVal = detectUSatPattern(In, VT, DAG, DL)) {
41086      SatVal = USatVal;
41087      TruncOpc = X86ISD::VTRUNCUS;
41088    }
41089    if (SatVal) {
41090      unsigned ResElts = VT.getVectorNumElements();
41091      // If the input type is less than 512 bits and we don't have VLX, we need
41092      // to widen to 512 bits.
41093      if (!Subtarget.hasVLX() && !InVT.is512BitVector()) {
41094        unsigned NumConcats = 512 / InVT.getSizeInBits();
41095        ResElts *= NumConcats;
41096        SmallVector<SDValue, 4> ConcatOps(NumConcats, DAG.getUNDEF(InVT));
41097        ConcatOps[0] = SatVal;
41098        InVT = EVT::getVectorVT(*DAG.getContext(), InSVT,
41099                                NumConcats * InVT.getVectorNumElements());
41100        SatVal = DAG.getNode(ISD::CONCAT_VECTORS, DL, InVT, ConcatOps);
41101      }
41102      // Widen the result if its narrower than 128 bits.
41103      if (ResElts * SVT.getSizeInBits() < 128)
41104        ResElts = 128 / SVT.getSizeInBits();
41105      EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), SVT, ResElts);
41106      SDValue Res = DAG.getNode(TruncOpc, DL, TruncVT, SatVal);
41107      return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
41108                         DAG.getIntPtrConstant(0, DL));
41109    }
41110  }
41111
41112  return SDValue();
41113}
41114
41115/// This function detects the AVG pattern between vectors of unsigned i8/i16,
41116/// which is c = (a + b + 1) / 2, and replace this operation with the efficient
41117/// X86ISD::AVG instruction.
41118static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
41119                                const X86Subtarget &Subtarget,
41120                                const SDLoc &DL) {
41121  if (!VT.isVector())
41122    return SDValue();
41123  EVT InVT = In.getValueType();
41124  unsigned NumElems = VT.getVectorNumElements();
41125
41126  EVT ScalarVT = VT.getVectorElementType();
41127  if (!((ScalarVT == MVT::i8 || ScalarVT == MVT::i16) &&
41128        NumElems >= 2 && isPowerOf2_32(NumElems)))
41129    return SDValue();
41130
41131  // InScalarVT is the intermediate type in AVG pattern and it should be greater
41132  // than the original input type (i8/i16).
41133  EVT InScalarVT = InVT.getVectorElementType();
41134  if (InScalarVT.getSizeInBits() <= ScalarVT.getSizeInBits())
41135    return SDValue();
41136
41137  if (!Subtarget.hasSSE2())
41138    return SDValue();
41139
41140  // Detect the following pattern:
41141  //
41142  //   %1 = zext <N x i8> %a to <N x i32>
41143  //   %2 = zext <N x i8> %b to <N x i32>
41144  //   %3 = add nuw nsw <N x i32> %1, <i32 1 x N>
41145  //   %4 = add nuw nsw <N x i32> %3, %2
41146  //   %5 = lshr <N x i32> %N, <i32 1 x N>
41147  //   %6 = trunc <N x i32> %5 to <N x i8>
41148  //
41149  // In AVX512, the last instruction can also be a trunc store.
41150  if (In.getOpcode() != ISD::SRL)
41151    return SDValue();
41152
41153  // A lambda checking the given SDValue is a constant vector and each element
41154  // is in the range [Min, Max].
41155  auto IsConstVectorInRange = [](SDValue V, unsigned Min, unsigned Max) {
41156    BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(V);
41157    if (!BV || !BV->isConstant())
41158      return false;
41159    for (SDValue Op : V->ops()) {
41160      ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
41161      if (!C)
41162        return false;
41163      const APInt &Val = C->getAPIntValue();
41164      if (Val.ult(Min) || Val.ugt(Max))
41165        return false;
41166    }
41167    return true;
41168  };
41169
41170  // Check if each element of the vector is right-shifted by one.
41171  auto LHS = In.getOperand(0);
41172  auto RHS = In.getOperand(1);
41173  if (!IsConstVectorInRange(RHS, 1, 1))
41174    return SDValue();
41175  if (LHS.getOpcode() != ISD::ADD)
41176    return SDValue();
41177
41178  // Detect a pattern of a + b + 1 where the order doesn't matter.
41179  SDValue Operands[3];
41180  Operands[0] = LHS.getOperand(0);
41181  Operands[1] = LHS.getOperand(1);
41182
41183  auto AVGBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
41184                       ArrayRef<SDValue> Ops) {
41185    return DAG.getNode(X86ISD::AVG, DL, Ops[0].getValueType(), Ops);
41186  };
41187
41188  // Take care of the case when one of the operands is a constant vector whose
41189  // element is in the range [1, 256].
41190  if (IsConstVectorInRange(Operands[1], 1, ScalarVT == MVT::i8 ? 256 : 65536) &&
41191      Operands[0].getOpcode() == ISD::ZERO_EXTEND &&
41192      Operands[0].getOperand(0).getValueType() == VT) {
41193    // The pattern is detected. Subtract one from the constant vector, then
41194    // demote it and emit X86ISD::AVG instruction.
41195    SDValue VecOnes = DAG.getConstant(1, DL, InVT);
41196    Operands[1] = DAG.getNode(ISD::SUB, DL, InVT, Operands[1], VecOnes);
41197    Operands[1] = DAG.getNode(ISD::TRUNCATE, DL, VT, Operands[1]);
41198    return SplitOpsAndApply(DAG, Subtarget, DL, VT,
41199                            { Operands[0].getOperand(0), Operands[1] },
41200                            AVGBuilder);
41201  }
41202
41203  // Matches 'add like' patterns: add(Op0,Op1) + zext(or(Op0,Op1)).
41204  // Match the or case only if its 'add-like' - can be replaced by an add.
41205  auto FindAddLike = [&](SDValue V, SDValue &Op0, SDValue &Op1) {
41206    if (ISD::ADD == V.getOpcode()) {
41207      Op0 = V.getOperand(0);
41208      Op1 = V.getOperand(1);
41209      return true;
41210    }
41211    if (ISD::ZERO_EXTEND != V.getOpcode())
41212      return false;
41213    V = V.getOperand(0);
41214    if (V.getValueType() != VT || ISD::OR != V.getOpcode() ||
41215        !DAG.haveNoCommonBitsSet(V.getOperand(0), V.getOperand(1)))
41216      return false;
41217    Op0 = V.getOperand(0);
41218    Op1 = V.getOperand(1);
41219    return true;
41220  };
41221
41222  SDValue Op0, Op1;
41223  if (FindAddLike(Operands[0], Op0, Op1))
41224    std::swap(Operands[0], Operands[1]);
41225  else if (!FindAddLike(Operands[1], Op0, Op1))
41226    return SDValue();
41227  Operands[2] = Op0;
41228  Operands[1] = Op1;
41229
41230  // Now we have three operands of two additions. Check that one of them is a
41231  // constant vector with ones, and the other two can be promoted from i8/i16.
41232  for (int i = 0; i < 3; ++i) {
41233    if (!IsConstVectorInRange(Operands[i], 1, 1))
41234      continue;
41235    std::swap(Operands[i], Operands[2]);
41236
41237    // Check if Operands[0] and Operands[1] are results of type promotion.
41238    for (int j = 0; j < 2; ++j)
41239      if (Operands[j].getValueType() != VT) {
41240        if (Operands[j].getOpcode() != ISD::ZERO_EXTEND ||
41241            Operands[j].getOperand(0).getValueType() != VT)
41242          return SDValue();
41243        Operands[j] = Operands[j].getOperand(0);
41244      }
41245
41246    // The pattern is detected, emit X86ISD::AVG instruction(s).
41247    return SplitOpsAndApply(DAG, Subtarget, DL, VT, {Operands[0], Operands[1]},
41248                            AVGBuilder);
41249  }
41250
41251  return SDValue();
41252}
41253
41254static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
41255                           TargetLowering::DAGCombinerInfo &DCI,
41256                           const X86Subtarget &Subtarget) {
41257  LoadSDNode *Ld = cast<LoadSDNode>(N);
41258  EVT RegVT = Ld->getValueType(0);
41259  EVT MemVT = Ld->getMemoryVT();
41260  SDLoc dl(Ld);
41261  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
41262
41263  // For chips with slow 32-byte unaligned loads, break the 32-byte operation
41264  // into two 16-byte operations. Also split non-temporal aligned loads on
41265  // pre-AVX2 targets as 32-byte loads will lower to regular temporal loads.
41266  ISD::LoadExtType Ext = Ld->getExtensionType();
41267  bool Fast;
41268  unsigned Alignment = Ld->getAlignment();
41269  if (RegVT.is256BitVector() && !DCI.isBeforeLegalizeOps() &&
41270      Ext == ISD::NON_EXTLOAD &&
41271      ((Ld->isNonTemporal() && !Subtarget.hasInt256() && Alignment >= 16) ||
41272       (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), RegVT,
41273                               *Ld->getMemOperand(), &Fast) &&
41274        !Fast))) {
41275    unsigned NumElems = RegVT.getVectorNumElements();
41276    if (NumElems < 2)
41277      return SDValue();
41278
41279    unsigned HalfAlign = 16;
41280    SDValue Ptr1 = Ld->getBasePtr();
41281    SDValue Ptr2 = DAG.getMemBasePlusOffset(Ptr1, HalfAlign, dl);
41282    EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
41283                                  NumElems / 2);
41284    SDValue Load1 =
41285        DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr1, Ld->getPointerInfo(),
41286                    Alignment, Ld->getMemOperand()->getFlags());
41287    SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr2,
41288                                Ld->getPointerInfo().getWithOffset(HalfAlign),
41289                                MinAlign(Alignment, HalfAlign),
41290                                Ld->getMemOperand()->getFlags());
41291    SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
41292                             Load1.getValue(1), Load2.getValue(1));
41293
41294    SDValue NewVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, RegVT, Load1, Load2);
41295    return DCI.CombineTo(N, NewVec, TF, true);
41296  }
41297
41298  // Bool vector load - attempt to cast to an integer, as we have good
41299  // (vXiY *ext(vXi1 bitcast(iX))) handling.
41300  if (Ext == ISD::NON_EXTLOAD && !Subtarget.hasAVX512() && RegVT.isVector() &&
41301      RegVT.getScalarType() == MVT::i1 && DCI.isBeforeLegalize()) {
41302    unsigned NumElts = RegVT.getVectorNumElements();
41303    EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
41304    if (TLI.isTypeLegal(IntVT)) {
41305      SDValue IntLoad = DAG.getLoad(IntVT, dl, Ld->getChain(), Ld->getBasePtr(),
41306                                    Ld->getPointerInfo(), Alignment,
41307                                    Ld->getMemOperand()->getFlags());
41308      SDValue BoolVec = DAG.getBitcast(RegVT, IntLoad);
41309      return DCI.CombineTo(N, BoolVec, IntLoad.getValue(1), true);
41310    }
41311  }
41312
41313  return SDValue();
41314}
41315
41316/// If V is a build vector of boolean constants and exactly one of those
41317/// constants is true, return the operand index of that true element.
41318/// Otherwise, return -1.
41319static int getOneTrueElt(SDValue V) {
41320  // This needs to be a build vector of booleans.
41321  // TODO: Checking for the i1 type matches the IR definition for the mask,
41322  // but the mask check could be loosened to i8 or other types. That might
41323  // also require checking more than 'allOnesValue'; eg, the x86 HW
41324  // instructions only require that the MSB is set for each mask element.
41325  // The ISD::MSTORE comments/definition do not specify how the mask operand
41326  // is formatted.
41327  auto *BV = dyn_cast<BuildVectorSDNode>(V);
41328  if (!BV || BV->getValueType(0).getVectorElementType() != MVT::i1)
41329    return -1;
41330
41331  int TrueIndex = -1;
41332  unsigned NumElts = BV->getValueType(0).getVectorNumElements();
41333  for (unsigned i = 0; i < NumElts; ++i) {
41334    const SDValue &Op = BV->getOperand(i);
41335    if (Op.isUndef())
41336      continue;
41337    auto *ConstNode = dyn_cast<ConstantSDNode>(Op);
41338    if (!ConstNode)
41339      return -1;
41340    if (ConstNode->getAPIntValue().isAllOnesValue()) {
41341      // If we already found a one, this is too many.
41342      if (TrueIndex >= 0)
41343        return -1;
41344      TrueIndex = i;
41345    }
41346  }
41347  return TrueIndex;
41348}
41349
41350/// Given a masked memory load/store operation, return true if it has one mask
41351/// bit set. If it has one mask bit set, then also return the memory address of
41352/// the scalar element to load/store, the vector index to insert/extract that
41353/// scalar element, and the alignment for the scalar memory access.
41354static bool getParamsForOneTrueMaskedElt(MaskedLoadStoreSDNode *MaskedOp,
41355                                         SelectionDAG &DAG, SDValue &Addr,
41356                                         SDValue &Index, unsigned &Alignment) {
41357  int TrueMaskElt = getOneTrueElt(MaskedOp->getMask());
41358  if (TrueMaskElt < 0)
41359    return false;
41360
41361  // Get the address of the one scalar element that is specified by the mask
41362  // using the appropriate offset from the base pointer.
41363  EVT EltVT = MaskedOp->getMemoryVT().getVectorElementType();
41364  Addr = MaskedOp->getBasePtr();
41365  if (TrueMaskElt != 0) {
41366    unsigned Offset = TrueMaskElt * EltVT.getStoreSize();
41367    Addr = DAG.getMemBasePlusOffset(Addr, Offset, SDLoc(MaskedOp));
41368  }
41369
41370  Index = DAG.getIntPtrConstant(TrueMaskElt, SDLoc(MaskedOp));
41371  Alignment = MinAlign(MaskedOp->getAlignment(), EltVT.getStoreSize());
41372  return true;
41373}
41374
41375/// If exactly one element of the mask is set for a non-extending masked load,
41376/// it is a scalar load and vector insert.
41377/// Note: It is expected that the degenerate cases of an all-zeros or all-ones
41378/// mask have already been optimized in IR, so we don't bother with those here.
41379static SDValue
41380reduceMaskedLoadToScalarLoad(MaskedLoadSDNode *ML, SelectionDAG &DAG,
41381                             TargetLowering::DAGCombinerInfo &DCI) {
41382  assert(ML->isUnindexed() && "Unexpected indexed masked load!");
41383  // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
41384  // However, some target hooks may need to be added to know when the transform
41385  // is profitable. Endianness would also have to be considered.
41386
41387  SDValue Addr, VecIndex;
41388  unsigned Alignment;
41389  if (!getParamsForOneTrueMaskedElt(ML, DAG, Addr, VecIndex, Alignment))
41390    return SDValue();
41391
41392  // Load the one scalar element that is specified by the mask using the
41393  // appropriate offset from the base pointer.
41394  SDLoc DL(ML);
41395  EVT VT = ML->getValueType(0);
41396  EVT EltVT = VT.getVectorElementType();
41397  SDValue Load =
41398      DAG.getLoad(EltVT, DL, ML->getChain(), Addr, ML->getPointerInfo(),
41399                  Alignment, ML->getMemOperand()->getFlags());
41400
41401  // Insert the loaded element into the appropriate place in the vector.
41402  SDValue Insert = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT,
41403                               ML->getPassThru(), Load, VecIndex);
41404  return DCI.CombineTo(ML, Insert, Load.getValue(1), true);
41405}
41406
41407static SDValue
41408combineMaskedLoadConstantMask(MaskedLoadSDNode *ML, SelectionDAG &DAG,
41409                              TargetLowering::DAGCombinerInfo &DCI) {
41410  assert(ML->isUnindexed() && "Unexpected indexed masked load!");
41411  if (!ISD::isBuildVectorOfConstantSDNodes(ML->getMask().getNode()))
41412    return SDValue();
41413
41414  SDLoc DL(ML);
41415  EVT VT = ML->getValueType(0);
41416
41417  // If we are loading the first and last elements of a vector, it is safe and
41418  // always faster to load the whole vector. Replace the masked load with a
41419  // vector load and select.
41420  unsigned NumElts = VT.getVectorNumElements();
41421  BuildVectorSDNode *MaskBV = cast<BuildVectorSDNode>(ML->getMask());
41422  bool LoadFirstElt = !isNullConstant(MaskBV->getOperand(0));
41423  bool LoadLastElt = !isNullConstant(MaskBV->getOperand(NumElts - 1));
41424  if (LoadFirstElt && LoadLastElt) {
41425    SDValue VecLd = DAG.getLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
41426                                ML->getMemOperand());
41427    SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), VecLd,
41428                                  ML->getPassThru());
41429    return DCI.CombineTo(ML, Blend, VecLd.getValue(1), true);
41430  }
41431
41432  // Convert a masked load with a constant mask into a masked load and a select.
41433  // This allows the select operation to use a faster kind of select instruction
41434  // (for example, vblendvps -> vblendps).
41435
41436  // Don't try this if the pass-through operand is already undefined. That would
41437  // cause an infinite loop because that's what we're about to create.
41438  if (ML->getPassThru().isUndef())
41439    return SDValue();
41440
41441  if (ISD::isBuildVectorAllZeros(ML->getPassThru().getNode()))
41442    return SDValue();
41443
41444  // The new masked load has an undef pass-through operand. The select uses the
41445  // original pass-through operand.
41446  SDValue NewML = DAG.getMaskedLoad(
41447      VT, DL, ML->getChain(), ML->getBasePtr(), ML->getOffset(), ML->getMask(),
41448      DAG.getUNDEF(VT), ML->getMemoryVT(), ML->getMemOperand(),
41449      ML->getAddressingMode(), ML->getExtensionType());
41450  SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), NewML,
41451                                ML->getPassThru());
41452
41453  return DCI.CombineTo(ML, Blend, NewML.getValue(1), true);
41454}
41455
41456static SDValue combineMaskedLoad(SDNode *N, SelectionDAG &DAG,
41457                                 TargetLowering::DAGCombinerInfo &DCI,
41458                                 const X86Subtarget &Subtarget) {
41459  MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
41460
41461  // TODO: Expanding load with constant mask may be optimized as well.
41462  if (Mld->isExpandingLoad())
41463    return SDValue();
41464
41465  if (Mld->getExtensionType() == ISD::NON_EXTLOAD) {
41466    if (SDValue ScalarLoad = reduceMaskedLoadToScalarLoad(Mld, DAG, DCI))
41467      return ScalarLoad;
41468    // TODO: Do some AVX512 subsets benefit from this transform?
41469    if (!Subtarget.hasAVX512())
41470      if (SDValue Blend = combineMaskedLoadConstantMask(Mld, DAG, DCI))
41471        return Blend;
41472  }
41473
41474  return SDValue();
41475}
41476
41477/// If exactly one element of the mask is set for a non-truncating masked store,
41478/// it is a vector extract and scalar store.
41479/// Note: It is expected that the degenerate cases of an all-zeros or all-ones
41480/// mask have already been optimized in IR, so we don't bother with those here.
41481static SDValue reduceMaskedStoreToScalarStore(MaskedStoreSDNode *MS,
41482                                              SelectionDAG &DAG) {
41483  // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
41484  // However, some target hooks may need to be added to know when the transform
41485  // is profitable. Endianness would also have to be considered.
41486
41487  SDValue Addr, VecIndex;
41488  unsigned Alignment;
41489  if (!getParamsForOneTrueMaskedElt(MS, DAG, Addr, VecIndex, Alignment))
41490    return SDValue();
41491
41492  // Extract the one scalar element that is actually being stored.
41493  SDLoc DL(MS);
41494  EVT VT = MS->getValue().getValueType();
41495  EVT EltVT = VT.getVectorElementType();
41496  SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,
41497                                MS->getValue(), VecIndex);
41498
41499  // Store that element at the appropriate offset from the base pointer.
41500  return DAG.getStore(MS->getChain(), DL, Extract, Addr, MS->getPointerInfo(),
41501                      Alignment, MS->getMemOperand()->getFlags());
41502}
41503
41504static SDValue combineMaskedStore(SDNode *N, SelectionDAG &DAG,
41505                                  TargetLowering::DAGCombinerInfo &DCI,
41506                                  const X86Subtarget &Subtarget) {
41507  MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
41508  if (Mst->isCompressingStore())
41509    return SDValue();
41510
41511  EVT VT = Mst->getValue().getValueType();
41512  SDLoc dl(Mst);
41513  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
41514
41515  if (Mst->isTruncatingStore())
41516    return SDValue();
41517
41518  if (SDValue ScalarStore = reduceMaskedStoreToScalarStore(Mst, DAG))
41519    return ScalarStore;
41520
41521  // If the mask value has been legalized to a non-boolean vector, try to
41522  // simplify ops leading up to it. We only demand the MSB of each lane.
41523  SDValue Mask = Mst->getMask();
41524  if (Mask.getScalarValueSizeInBits() != 1) {
41525    APInt DemandedMask(APInt::getSignMask(VT.getScalarSizeInBits()));
41526    if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI))
41527      return SDValue(N, 0);
41528  }
41529
41530  SDValue Value = Mst->getValue();
41531  if (Value.getOpcode() == ISD::TRUNCATE && Value.getNode()->hasOneUse() &&
41532      TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(),
41533                            Mst->getMemoryVT())) {
41534    return DAG.getMaskedStore(Mst->getChain(), SDLoc(N), Value.getOperand(0),
41535                              Mst->getBasePtr(), Mst->getOffset(), Mask,
41536                              Mst->getMemoryVT(), Mst->getMemOperand(),
41537                              Mst->getAddressingMode(), true);
41538  }
41539
41540  return SDValue();
41541}
41542
41543static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
41544                            TargetLowering::DAGCombinerInfo &DCI,
41545                            const X86Subtarget &Subtarget) {
41546  StoreSDNode *St = cast<StoreSDNode>(N);
41547  EVT StVT = St->getMemoryVT();
41548  SDLoc dl(St);
41549  unsigned Alignment = St->getAlignment();
41550  SDValue StoredVal = St->getValue();
41551  EVT VT = StoredVal.getValueType();
41552  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
41553
41554  // Convert a store of vXi1 into a store of iX and a bitcast.
41555  if (!Subtarget.hasAVX512() && VT == StVT && VT.isVector() &&
41556      VT.getVectorElementType() == MVT::i1) {
41557
41558    EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), VT.getVectorNumElements());
41559    StoredVal = DAG.getBitcast(NewVT, StoredVal);
41560
41561    return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
41562                        St->getPointerInfo(), St->getAlignment(),
41563                        St->getMemOperand()->getFlags());
41564  }
41565
41566  // If this is a store of a scalar_to_vector to v1i1, just use a scalar store.
41567  // This will avoid a copy to k-register.
41568  if (VT == MVT::v1i1 && VT == StVT && Subtarget.hasAVX512() &&
41569      StoredVal.getOpcode() == ISD::SCALAR_TO_VECTOR &&
41570      StoredVal.getOperand(0).getValueType() == MVT::i8) {
41571    return DAG.getStore(St->getChain(), dl, StoredVal.getOperand(0),
41572                        St->getBasePtr(), St->getPointerInfo(),
41573                        St->getAlignment(), St->getMemOperand()->getFlags());
41574  }
41575
41576  // Widen v2i1/v4i1 stores to v8i1.
41577  if ((VT == MVT::v2i1 || VT == MVT::v4i1) && VT == StVT &&
41578      Subtarget.hasAVX512()) {
41579    unsigned NumConcats = 8 / VT.getVectorNumElements();
41580    SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(VT));
41581    Ops[0] = StoredVal;
41582    StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
41583    return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
41584                        St->getPointerInfo(), St->getAlignment(),
41585                        St->getMemOperand()->getFlags());
41586  }
41587
41588  // Turn vXi1 stores of constants into a scalar store.
41589  if ((VT == MVT::v8i1 || VT == MVT::v16i1 || VT == MVT::v32i1 ||
41590       VT == MVT::v64i1) && VT == StVT && TLI.isTypeLegal(VT) &&
41591      ISD::isBuildVectorOfConstantSDNodes(StoredVal.getNode())) {
41592    // If its a v64i1 store without 64-bit support, we need two stores.
41593    if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
41594      SDValue Lo = DAG.getBuildVector(MVT::v32i1, dl,
41595                                      StoredVal->ops().slice(0, 32));
41596      Lo = combinevXi1ConstantToInteger(Lo, DAG);
41597      SDValue Hi = DAG.getBuildVector(MVT::v32i1, dl,
41598                                      StoredVal->ops().slice(32, 32));
41599      Hi = combinevXi1ConstantToInteger(Hi, DAG);
41600
41601      SDValue Ptr0 = St->getBasePtr();
41602      SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, 4, dl);
41603
41604      SDValue Ch0 =
41605          DAG.getStore(St->getChain(), dl, Lo, Ptr0, St->getPointerInfo(),
41606                       Alignment, St->getMemOperand()->getFlags());
41607      SDValue Ch1 =
41608          DAG.getStore(St->getChain(), dl, Hi, Ptr1,
41609                       St->getPointerInfo().getWithOffset(4),
41610                       MinAlign(Alignment, 4U),
41611                       St->getMemOperand()->getFlags());
41612      return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
41613    }
41614
41615    StoredVal = combinevXi1ConstantToInteger(StoredVal, DAG);
41616    return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
41617                        St->getPointerInfo(), St->getAlignment(),
41618                        St->getMemOperand()->getFlags());
41619  }
41620
41621  // If we are saving a 32-byte vector and 32-byte stores are slow, such as on
41622  // Sandy Bridge, perform two 16-byte stores.
41623  bool Fast;
41624  if (VT.is256BitVector() && StVT == VT &&
41625      TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
41626                             *St->getMemOperand(), &Fast) &&
41627      !Fast) {
41628    unsigned NumElems = VT.getVectorNumElements();
41629    if (NumElems < 2)
41630      return SDValue();
41631
41632    return splitVectorStore(St, DAG);
41633  }
41634
41635  // Split under-aligned vector non-temporal stores.
41636  if (St->isNonTemporal() && StVT == VT && Alignment < VT.getStoreSize()) {
41637    // ZMM/YMM nt-stores - either it can be stored as a series of shorter
41638    // vectors or the legalizer can scalarize it to use MOVNTI.
41639    if (VT.is256BitVector() || VT.is512BitVector()) {
41640      unsigned NumElems = VT.getVectorNumElements();
41641      if (NumElems < 2)
41642        return SDValue();
41643      return splitVectorStore(St, DAG);
41644    }
41645
41646    // XMM nt-stores - scalarize this to f64 nt-stores on SSE4A, else i32/i64
41647    // to use MOVNTI.
41648    if (VT.is128BitVector() && Subtarget.hasSSE2()) {
41649      MVT NTVT = Subtarget.hasSSE4A()
41650                     ? MVT::v2f64
41651                     : (TLI.isTypeLegal(MVT::i64) ? MVT::v2i64 : MVT::v4i32);
41652      return scalarizeVectorStore(St, NTVT, DAG);
41653    }
41654  }
41655
41656  // Try to optimize v16i16->v16i8 truncating stores when BWI is not
41657  // supported, but avx512f is by extending to v16i32 and truncating.
41658  if (!St->isTruncatingStore() && VT == MVT::v16i8 && !Subtarget.hasBWI() &&
41659      St->getValue().getOpcode() == ISD::TRUNCATE &&
41660      St->getValue().getOperand(0).getValueType() == MVT::v16i16 &&
41661      TLI.isTruncStoreLegal(MVT::v16i32, MVT::v16i8) &&
41662      St->getValue().hasOneUse() && !DCI.isBeforeLegalizeOps()) {
41663    SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::v16i32, St->getValue());
41664    return DAG.getTruncStore(St->getChain(), dl, Ext, St->getBasePtr(),
41665                             MVT::v16i8, St->getMemOperand());
41666  }
41667
41668  // Try to fold a VTRUNCUS or VTRUNCS into a truncating store.
41669  if (!St->isTruncatingStore() && StoredVal.hasOneUse() &&
41670      (StoredVal.getOpcode() == X86ISD::VTRUNCUS ||
41671       StoredVal.getOpcode() == X86ISD::VTRUNCS) &&
41672      TLI.isTruncStoreLegal(StoredVal.getOperand(0).getValueType(), VT)) {
41673    bool IsSigned = StoredVal.getOpcode() == X86ISD::VTRUNCS;
41674    return EmitTruncSStore(IsSigned, St->getChain(),
41675                           dl, StoredVal.getOperand(0), St->getBasePtr(),
41676                           VT, St->getMemOperand(), DAG);
41677  }
41678
41679  // Optimize trunc store (of multiple scalars) to shuffle and store.
41680  // First, pack all of the elements in one place. Next, store to memory
41681  // in fewer chunks.
41682  if (St->isTruncatingStore() && VT.isVector()) {
41683    // Check if we can detect an AVG pattern from the truncation. If yes,
41684    // replace the trunc store by a normal store with the result of X86ISD::AVG
41685    // instruction.
41686    if (DCI.isBeforeLegalize() || TLI.isTypeLegal(St->getMemoryVT()))
41687      if (SDValue Avg = detectAVGPattern(St->getValue(), St->getMemoryVT(), DAG,
41688                                         Subtarget, dl))
41689        return DAG.getStore(St->getChain(), dl, Avg, St->getBasePtr(),
41690                            St->getPointerInfo(), St->getAlignment(),
41691                            St->getMemOperand()->getFlags());
41692
41693    if (TLI.isTruncStoreLegal(VT, StVT)) {
41694      if (SDValue Val = detectSSatPattern(St->getValue(), St->getMemoryVT()))
41695        return EmitTruncSStore(true /* Signed saturation */, St->getChain(),
41696                               dl, Val, St->getBasePtr(),
41697                               St->getMemoryVT(), St->getMemOperand(), DAG);
41698      if (SDValue Val = detectUSatPattern(St->getValue(), St->getMemoryVT(),
41699                                          DAG, dl))
41700        return EmitTruncSStore(false /* Unsigned saturation */, St->getChain(),
41701                               dl, Val, St->getBasePtr(),
41702                               St->getMemoryVT(), St->getMemOperand(), DAG);
41703    }
41704
41705    return SDValue();
41706  }
41707
41708  // Turn load->store of MMX types into GPR load/stores.  This avoids clobbering
41709  // the FP state in cases where an emms may be missing.
41710  // A preferable solution to the general problem is to figure out the right
41711  // places to insert EMMS.  This qualifies as a quick hack.
41712
41713  // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
41714  if (VT.getSizeInBits() != 64)
41715    return SDValue();
41716
41717  const Function &F = DAG.getMachineFunction().getFunction();
41718  bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
41719  bool F64IsLegal =
41720      !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2();
41721  if ((VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit()) &&
41722      isa<LoadSDNode>(St->getValue()) &&
41723      cast<LoadSDNode>(St->getValue())->isSimple() &&
41724      St->getChain().hasOneUse() && St->isSimple()) {
41725    LoadSDNode *Ld = cast<LoadSDNode>(St->getValue().getNode());
41726
41727    if (!ISD::isNormalLoad(Ld))
41728      return SDValue();
41729
41730    // Avoid the transformation if there are multiple uses of the loaded value.
41731    if (!Ld->hasNUsesOfValue(1, 0))
41732      return SDValue();
41733
41734    SDLoc LdDL(Ld);
41735    SDLoc StDL(N);
41736    // Lower to a single movq load/store pair.
41737    SDValue NewLd = DAG.getLoad(MVT::f64, LdDL, Ld->getChain(),
41738                                Ld->getBasePtr(), Ld->getMemOperand());
41739
41740    // Make sure new load is placed in same chain order.
41741    DAG.makeEquivalentMemoryOrdering(Ld, NewLd);
41742    return DAG.getStore(St->getChain(), StDL, NewLd, St->getBasePtr(),
41743                        St->getMemOperand());
41744  }
41745
41746  // This is similar to the above case, but here we handle a scalar 64-bit
41747  // integer store that is extracted from a vector on a 32-bit target.
41748  // If we have SSE2, then we can treat it like a floating-point double
41749  // to get past legalization. The execution dependencies fixup pass will
41750  // choose the optimal machine instruction for the store if this really is
41751  // an integer or v2f32 rather than an f64.
41752  if (VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit() &&
41753      St->getOperand(1).getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
41754    SDValue OldExtract = St->getOperand(1);
41755    SDValue ExtOp0 = OldExtract.getOperand(0);
41756    unsigned VecSize = ExtOp0.getValueSizeInBits();
41757    EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, VecSize / 64);
41758    SDValue BitCast = DAG.getBitcast(VecVT, ExtOp0);
41759    SDValue NewExtract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
41760                                     BitCast, OldExtract.getOperand(1));
41761    return DAG.getStore(St->getChain(), dl, NewExtract, St->getBasePtr(),
41762                        St->getPointerInfo(), St->getAlignment(),
41763                        St->getMemOperand()->getFlags());
41764  }
41765
41766  return SDValue();
41767}
41768
41769/// Return 'true' if this vector operation is "horizontal"
41770/// and return the operands for the horizontal operation in LHS and RHS.  A
41771/// horizontal operation performs the binary operation on successive elements
41772/// of its first operand, then on successive elements of its second operand,
41773/// returning the resulting values in a vector.  For example, if
41774///   A = < float a0, float a1, float a2, float a3 >
41775/// and
41776///   B = < float b0, float b1, float b2, float b3 >
41777/// then the result of doing a horizontal operation on A and B is
41778///   A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
41779/// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
41780/// A horizontal-op B, for some already available A and B, and if so then LHS is
41781/// set to A, RHS to B, and the routine returns 'true'.
41782static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, SelectionDAG &DAG,
41783                              const X86Subtarget &Subtarget,
41784                              bool IsCommutative) {
41785  // If either operand is undef, bail out. The binop should be simplified.
41786  if (LHS.isUndef() || RHS.isUndef())
41787    return false;
41788
41789  // Look for the following pattern:
41790  //   A = < float a0, float a1, float a2, float a3 >
41791  //   B = < float b0, float b1, float b2, float b3 >
41792  // and
41793  //   LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
41794  //   RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
41795  // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
41796  // which is A horizontal-op B.
41797
41798  MVT VT = LHS.getSimpleValueType();
41799  assert((VT.is128BitVector() || VT.is256BitVector()) &&
41800         "Unsupported vector type for horizontal add/sub");
41801  unsigned NumElts = VT.getVectorNumElements();
41802
41803  // TODO - can we make a general helper method that does all of this for us?
41804  auto GetShuffle = [&](SDValue Op, SDValue &N0, SDValue &N1,
41805                        SmallVectorImpl<int> &ShuffleMask) {
41806    if (Op.getOpcode() == ISD::VECTOR_SHUFFLE) {
41807      if (!Op.getOperand(0).isUndef())
41808        N0 = Op.getOperand(0);
41809      if (!Op.getOperand(1).isUndef())
41810        N1 = Op.getOperand(1);
41811      ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op)->getMask();
41812      ShuffleMask.append(Mask.begin(), Mask.end());
41813      return;
41814    }
41815    bool UseSubVector = false;
41816    if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
41817        Op.getOperand(0).getValueType().is256BitVector() &&
41818        llvm::isNullConstant(Op.getOperand(1))) {
41819      Op = Op.getOperand(0);
41820      UseSubVector = true;
41821    }
41822    bool IsUnary;
41823    SmallVector<SDValue, 2> SrcOps;
41824    SmallVector<int, 16> SrcShuffleMask;
41825    SDValue BC = peekThroughBitcasts(Op);
41826    if (isTargetShuffle(BC.getOpcode()) &&
41827        getTargetShuffleMask(BC.getNode(), BC.getSimpleValueType(), false,
41828                             SrcOps, SrcShuffleMask, IsUnary)) {
41829      if (!UseSubVector && SrcShuffleMask.size() == NumElts &&
41830          SrcOps.size() <= 2) {
41831        N0 = SrcOps.size() > 0 ? SrcOps[0] : SDValue();
41832        N1 = SrcOps.size() > 1 ? SrcOps[1] : SDValue();
41833        ShuffleMask.append(SrcShuffleMask.begin(), SrcShuffleMask.end());
41834      }
41835      if (UseSubVector && (SrcShuffleMask.size() == (NumElts * 2)) &&
41836          SrcOps.size() == 1) {
41837        N0 = extract128BitVector(SrcOps[0], 0, DAG, SDLoc(Op));
41838        N1 = extract128BitVector(SrcOps[0], NumElts, DAG, SDLoc(Op));
41839        ArrayRef<int> Mask = ArrayRef<int>(SrcShuffleMask).slice(0, NumElts);
41840        ShuffleMask.append(Mask.begin(), Mask.end());
41841      }
41842    }
41843  };
41844
41845  // View LHS in the form
41846  //   LHS = VECTOR_SHUFFLE A, B, LMask
41847  // If LHS is not a shuffle, then pretend it is the identity shuffle:
41848  //   LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
41849  // NOTE: A default initialized SDValue represents an UNDEF of type VT.
41850  SDValue A, B;
41851  SmallVector<int, 16> LMask;
41852  GetShuffle(LHS, A, B, LMask);
41853
41854  // Likewise, view RHS in the form
41855  //   RHS = VECTOR_SHUFFLE C, D, RMask
41856  SDValue C, D;
41857  SmallVector<int, 16> RMask;
41858  GetShuffle(RHS, C, D, RMask);
41859
41860  // At least one of the operands should be a vector shuffle.
41861  unsigned NumShuffles = (LMask.empty() ? 0 : 1) + (RMask.empty() ? 0 : 1);
41862  if (NumShuffles == 0)
41863    return false;
41864
41865  if (LMask.empty()) {
41866    A = LHS;
41867    for (unsigned i = 0; i != NumElts; ++i)
41868      LMask.push_back(i);
41869  }
41870
41871  if (RMask.empty()) {
41872    C = RHS;
41873    for (unsigned i = 0; i != NumElts; ++i)
41874      RMask.push_back(i);
41875  }
41876
41877  // If A and B occur in reverse order in RHS, then canonicalize by commuting
41878  // RHS operands and shuffle mask.
41879  if (A != C) {
41880    std::swap(C, D);
41881    ShuffleVectorSDNode::commuteMask(RMask);
41882  }
41883  // Check that the shuffles are both shuffling the same vectors.
41884  if (!(A == C && B == D))
41885    return false;
41886
41887  // LHS and RHS are now:
41888  //   LHS = shuffle A, B, LMask
41889  //   RHS = shuffle A, B, RMask
41890  // Check that the masks correspond to performing a horizontal operation.
41891  // AVX defines horizontal add/sub to operate independently on 128-bit lanes,
41892  // so we just repeat the inner loop if this is a 256-bit op.
41893  unsigned Num128BitChunks = VT.getSizeInBits() / 128;
41894  unsigned NumEltsPer128BitChunk = NumElts / Num128BitChunks;
41895  assert((NumEltsPer128BitChunk % 2 == 0) &&
41896         "Vector type should have an even number of elements in each lane");
41897  for (unsigned j = 0; j != NumElts; j += NumEltsPer128BitChunk) {
41898    for (unsigned i = 0; i != NumEltsPer128BitChunk; ++i) {
41899      // Ignore undefined components.
41900      int LIdx = LMask[i + j], RIdx = RMask[i + j];
41901      if (LIdx < 0 || RIdx < 0 ||
41902          (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
41903          (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
41904        continue;
41905
41906      // The  low half of the 128-bit result must choose from A.
41907      // The high half of the 128-bit result must choose from B,
41908      // unless B is undef. In that case, we are always choosing from A.
41909      unsigned NumEltsPer64BitChunk = NumEltsPer128BitChunk / 2;
41910      unsigned Src = B.getNode() ? i >= NumEltsPer64BitChunk : 0;
41911
41912      // Check that successive elements are being operated on. If not, this is
41913      // not a horizontal operation.
41914      int Index = 2 * (i % NumEltsPer64BitChunk) + NumElts * Src + j;
41915      if (!(LIdx == Index && RIdx == Index + 1) &&
41916          !(IsCommutative && LIdx == Index + 1 && RIdx == Index))
41917        return false;
41918    }
41919  }
41920
41921  LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
41922  RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
41923
41924  if (!shouldUseHorizontalOp(LHS == RHS && NumShuffles < 2, DAG, Subtarget))
41925    return false;
41926
41927  LHS = DAG.getBitcast(VT, LHS);
41928  RHS = DAG.getBitcast(VT, RHS);
41929  return true;
41930}
41931
41932/// Do target-specific dag combines on floating-point adds/subs.
41933static SDValue combineFaddFsub(SDNode *N, SelectionDAG &DAG,
41934                               const X86Subtarget &Subtarget) {
41935  EVT VT = N->getValueType(0);
41936  SDValue LHS = N->getOperand(0);
41937  SDValue RHS = N->getOperand(1);
41938  bool IsFadd = N->getOpcode() == ISD::FADD;
41939  auto HorizOpcode = IsFadd ? X86ISD::FHADD : X86ISD::FHSUB;
41940  assert((IsFadd || N->getOpcode() == ISD::FSUB) && "Wrong opcode");
41941
41942  // Try to synthesize horizontal add/sub from adds/subs of shuffles.
41943  if (((Subtarget.hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
41944       (Subtarget.hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
41945      isHorizontalBinOp(LHS, RHS, DAG, Subtarget, IsFadd))
41946    return DAG.getNode(HorizOpcode, SDLoc(N), VT, LHS, RHS);
41947
41948  return SDValue();
41949}
41950
41951/// Attempt to pre-truncate inputs to arithmetic ops if it will simplify
41952/// the codegen.
41953/// e.g. TRUNC( BINOP( X, Y ) ) --> BINOP( TRUNC( X ), TRUNC( Y ) )
41954/// TODO: This overlaps with the generic combiner's visitTRUNCATE. Remove
41955///       anything that is guaranteed to be transformed by DAGCombiner.
41956static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG,
41957                                          const X86Subtarget &Subtarget,
41958                                          const SDLoc &DL) {
41959  assert(N->getOpcode() == ISD::TRUNCATE && "Wrong opcode");
41960  SDValue Src = N->getOperand(0);
41961  unsigned SrcOpcode = Src.getOpcode();
41962  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
41963
41964  EVT VT = N->getValueType(0);
41965  EVT SrcVT = Src.getValueType();
41966
41967  auto IsFreeTruncation = [VT](SDValue Op) {
41968    unsigned TruncSizeInBits = VT.getScalarSizeInBits();
41969
41970    // See if this has been extended from a smaller/equal size to
41971    // the truncation size, allowing a truncation to combine with the extend.
41972    unsigned Opcode = Op.getOpcode();
41973    if ((Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND ||
41974         Opcode == ISD::ZERO_EXTEND) &&
41975        Op.getOperand(0).getScalarValueSizeInBits() <= TruncSizeInBits)
41976      return true;
41977
41978    // See if this is a single use constant which can be constant folded.
41979    // NOTE: We don't peek throught bitcasts here because there is currently
41980    // no support for constant folding truncate+bitcast+vector_of_constants. So
41981    // we'll just send up with a truncate on both operands which will
41982    // get turned back into (truncate (binop)) causing an infinite loop.
41983    return ISD::isBuildVectorOfConstantSDNodes(Op.getNode());
41984  };
41985
41986  auto TruncateArithmetic = [&](SDValue N0, SDValue N1) {
41987    SDValue Trunc0 = DAG.getNode(ISD::TRUNCATE, DL, VT, N0);
41988    SDValue Trunc1 = DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
41989    return DAG.getNode(SrcOpcode, DL, VT, Trunc0, Trunc1);
41990  };
41991
41992  // Don't combine if the operation has other uses.
41993  if (!Src.hasOneUse())
41994    return SDValue();
41995
41996  // Only support vector truncation for now.
41997  // TODO: i64 scalar math would benefit as well.
41998  if (!VT.isVector())
41999    return SDValue();
42000
42001  // In most cases its only worth pre-truncating if we're only facing the cost
42002  // of one truncation.
42003  // i.e. if one of the inputs will constant fold or the input is repeated.
42004  switch (SrcOpcode) {
42005  case ISD::AND:
42006  case ISD::XOR:
42007  case ISD::OR: {
42008    SDValue Op0 = Src.getOperand(0);
42009    SDValue Op1 = Src.getOperand(1);
42010    if (TLI.isOperationLegalOrPromote(SrcOpcode, VT) &&
42011        (Op0 == Op1 || IsFreeTruncation(Op0) || IsFreeTruncation(Op1)))
42012      return TruncateArithmetic(Op0, Op1);
42013    break;
42014  }
42015
42016  case ISD::MUL:
42017    // X86 is rubbish at scalar and vector i64 multiplies (until AVX512DQ) - its
42018    // better to truncate if we have the chance.
42019    if (SrcVT.getScalarType() == MVT::i64 &&
42020        TLI.isOperationLegal(SrcOpcode, VT) &&
42021        !TLI.isOperationLegal(SrcOpcode, SrcVT))
42022      return TruncateArithmetic(Src.getOperand(0), Src.getOperand(1));
42023    LLVM_FALLTHROUGH;
42024  case ISD::ADD: {
42025    SDValue Op0 = Src.getOperand(0);
42026    SDValue Op1 = Src.getOperand(1);
42027    if (TLI.isOperationLegal(SrcOpcode, VT) &&
42028        (Op0 == Op1 || IsFreeTruncation(Op0) || IsFreeTruncation(Op1)))
42029      return TruncateArithmetic(Op0, Op1);
42030    break;
42031  }
42032  case ISD::SUB: {
42033    // TODO: ISD::SUB We are conservative and require both sides to be freely
42034    // truncatable to avoid interfering with combineSubToSubus.
42035    SDValue Op0 = Src.getOperand(0);
42036    SDValue Op1 = Src.getOperand(1);
42037    if (TLI.isOperationLegal(SrcOpcode, VT) &&
42038        (Op0 == Op1 || (IsFreeTruncation(Op0) && IsFreeTruncation(Op1))))
42039      return TruncateArithmetic(Op0, Op1);
42040    break;
42041  }
42042  }
42043
42044  return SDValue();
42045}
42046
42047/// Truncate using ISD::AND mask and X86ISD::PACKUS.
42048/// e.g. trunc <8 x i32> X to <8 x i16> -->
42049/// MaskX = X & 0xffff (clear high bits to prevent saturation)
42050/// packus (extract_subv MaskX, 0), (extract_subv MaskX, 1)
42051static SDValue combineVectorTruncationWithPACKUS(SDNode *N, const SDLoc &DL,
42052                                                 const X86Subtarget &Subtarget,
42053                                                 SelectionDAG &DAG) {
42054  SDValue In = N->getOperand(0);
42055  EVT InVT = In.getValueType();
42056  EVT OutVT = N->getValueType(0);
42057
42058  APInt Mask = APInt::getLowBitsSet(InVT.getScalarSizeInBits(),
42059                                    OutVT.getScalarSizeInBits());
42060  In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(Mask, DL, InVT));
42061  return truncateVectorWithPACK(X86ISD::PACKUS, OutVT, In, DL, DAG, Subtarget);
42062}
42063
42064/// Truncate a group of v4i32 into v8i16 using X86ISD::PACKSS.
42065static SDValue combineVectorTruncationWithPACKSS(SDNode *N, const SDLoc &DL,
42066                                                 const X86Subtarget &Subtarget,
42067                                                 SelectionDAG &DAG) {
42068  SDValue In = N->getOperand(0);
42069  EVT InVT = In.getValueType();
42070  EVT OutVT = N->getValueType(0);
42071  In = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, InVT, In,
42072                   DAG.getValueType(OutVT));
42073  return truncateVectorWithPACK(X86ISD::PACKSS, OutVT, In, DL, DAG, Subtarget);
42074}
42075
42076/// This function transforms truncation from vXi32/vXi64 to vXi8/vXi16 into
42077/// X86ISD::PACKUS/X86ISD::PACKSS operations. We do it here because after type
42078/// legalization the truncation will be translated into a BUILD_VECTOR with each
42079/// element that is extracted from a vector and then truncated, and it is
42080/// difficult to do this optimization based on them.
42081static SDValue combineVectorTruncation(SDNode *N, SelectionDAG &DAG,
42082                                       const X86Subtarget &Subtarget) {
42083  EVT OutVT = N->getValueType(0);
42084  if (!OutVT.isVector())
42085    return SDValue();
42086
42087  SDValue In = N->getOperand(0);
42088  if (!In.getValueType().isSimple())
42089    return SDValue();
42090
42091  EVT InVT = In.getValueType();
42092  unsigned NumElems = OutVT.getVectorNumElements();
42093
42094  // TODO: On AVX2, the behavior of X86ISD::PACKUS is different from that on
42095  // SSE2, and we need to take care of it specially.
42096  // AVX512 provides vpmovdb.
42097  if (!Subtarget.hasSSE2() || Subtarget.hasAVX2())
42098    return SDValue();
42099
42100  EVT OutSVT = OutVT.getVectorElementType();
42101  EVT InSVT = InVT.getVectorElementType();
42102  if (!((InSVT == MVT::i32 || InSVT == MVT::i64) &&
42103        (OutSVT == MVT::i8 || OutSVT == MVT::i16) && isPowerOf2_32(NumElems) &&
42104        NumElems >= 8))
42105    return SDValue();
42106
42107  // SSSE3's pshufb results in less instructions in the cases below.
42108  if (Subtarget.hasSSSE3() && NumElems == 8 &&
42109      ((OutSVT == MVT::i8 && InSVT != MVT::i64) ||
42110       (InSVT == MVT::i32 && OutSVT == MVT::i16)))
42111    return SDValue();
42112
42113  SDLoc DL(N);
42114  // SSE2 provides PACKUS for only 2 x v8i16 -> v16i8 and SSE4.1 provides PACKUS
42115  // for 2 x v4i32 -> v8i16. For SSSE3 and below, we need to use PACKSS to
42116  // truncate 2 x v4i32 to v8i16.
42117  if (Subtarget.hasSSE41() || OutSVT == MVT::i8)
42118    return combineVectorTruncationWithPACKUS(N, DL, Subtarget, DAG);
42119  if (InSVT == MVT::i32)
42120    return combineVectorTruncationWithPACKSS(N, DL, Subtarget, DAG);
42121
42122  return SDValue();
42123}
42124
42125/// This function transforms vector truncation of 'extended sign-bits' or
42126/// 'extended zero-bits' values.
42127/// vXi16/vXi32/vXi64 to vXi8/vXi16/vXi32 into X86ISD::PACKSS/PACKUS operations.
42128static SDValue combineVectorSignBitsTruncation(SDNode *N, const SDLoc &DL,
42129                                               SelectionDAG &DAG,
42130                                               const X86Subtarget &Subtarget) {
42131  // Requires SSE2.
42132  if (!Subtarget.hasSSE2())
42133    return SDValue();
42134
42135  if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple())
42136    return SDValue();
42137
42138  SDValue In = N->getOperand(0);
42139  if (!In.getValueType().isSimple())
42140    return SDValue();
42141
42142  MVT VT = N->getValueType(0).getSimpleVT();
42143  MVT SVT = VT.getScalarType();
42144
42145  MVT InVT = In.getValueType().getSimpleVT();
42146  MVT InSVT = InVT.getScalarType();
42147
42148  // Check we have a truncation suited for PACKSS/PACKUS.
42149  if (!VT.is128BitVector() && !VT.is256BitVector())
42150    return SDValue();
42151  if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32)
42152    return SDValue();
42153  if (InSVT != MVT::i16 && InSVT != MVT::i32 && InSVT != MVT::i64)
42154    return SDValue();
42155
42156  // AVX512 has fast truncate, but if the input is already going to be split,
42157  // there's no harm in trying pack.
42158  if (Subtarget.hasAVX512() &&
42159      !(!Subtarget.useAVX512Regs() && VT.is256BitVector() &&
42160        InVT.is512BitVector()))
42161    return SDValue();
42162
42163  unsigned NumPackedSignBits = std::min<unsigned>(SVT.getSizeInBits(), 16);
42164  unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
42165
42166  // Use PACKUS if the input has zero-bits that extend all the way to the
42167  // packed/truncated value. e.g. masks, zext_in_reg, etc.
42168  KnownBits Known = DAG.computeKnownBits(In);
42169  unsigned NumLeadingZeroBits = Known.countMinLeadingZeros();
42170  if (NumLeadingZeroBits >= (InSVT.getSizeInBits() - NumPackedZeroBits))
42171    return truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget);
42172
42173  // Use PACKSS if the input has sign-bits that extend all the way to the
42174  // packed/truncated value. e.g. Comparison result, sext_in_reg, etc.
42175  unsigned NumSignBits = DAG.ComputeNumSignBits(In);
42176  if (NumSignBits > (InSVT.getSizeInBits() - NumPackedSignBits))
42177    return truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget);
42178
42179  return SDValue();
42180}
42181
42182// Try to form a MULHU or MULHS node by looking for
42183// (trunc (srl (mul ext, ext), 16))
42184// TODO: This is X86 specific because we want to be able to handle wide types
42185// before type legalization. But we can only do it if the vector will be
42186// legalized via widening/splitting. Type legalization can't handle promotion
42187// of a MULHU/MULHS. There isn't a way to convey this to the generic DAG
42188// combiner.
42189static SDValue combinePMULH(SDValue Src, EVT VT, const SDLoc &DL,
42190                            SelectionDAG &DAG, const X86Subtarget &Subtarget) {
42191  // First instruction should be a right shift of a multiply.
42192  if (Src.getOpcode() != ISD::SRL ||
42193      Src.getOperand(0).getOpcode() != ISD::MUL)
42194    return SDValue();
42195
42196  if (!Subtarget.hasSSE2())
42197    return SDValue();
42198
42199  // Only handle vXi16 types that are at least 128-bits unless they will be
42200  // widened.
42201  if (!VT.isVector() || VT.getVectorElementType() != MVT::i16)
42202    return SDValue();
42203
42204  // Input type should be vXi32.
42205  EVT InVT = Src.getValueType();
42206  if (InVT.getVectorElementType() != MVT::i32)
42207    return SDValue();
42208
42209  // Need a shift by 16.
42210  APInt ShiftAmt;
42211  if (!ISD::isConstantSplatVector(Src.getOperand(1).getNode(), ShiftAmt) ||
42212      ShiftAmt != 16)
42213    return SDValue();
42214
42215  SDValue LHS = Src.getOperand(0).getOperand(0);
42216  SDValue RHS = Src.getOperand(0).getOperand(1);
42217
42218  unsigned ExtOpc = LHS.getOpcode();
42219  if ((ExtOpc != ISD::SIGN_EXTEND && ExtOpc != ISD::ZERO_EXTEND) ||
42220      RHS.getOpcode() != ExtOpc)
42221    return SDValue();
42222
42223  // Peek through the extends.
42224  LHS = LHS.getOperand(0);
42225  RHS = RHS.getOperand(0);
42226
42227  // Ensure the input types match.
42228  if (LHS.getValueType() != VT || RHS.getValueType() != VT)
42229    return SDValue();
42230
42231  unsigned Opc = ExtOpc == ISD::SIGN_EXTEND ? ISD::MULHS : ISD::MULHU;
42232  return DAG.getNode(Opc, DL, VT, LHS, RHS);
42233}
42234
42235// Attempt to match PMADDUBSW, which multiplies corresponding unsigned bytes
42236// from one vector with signed bytes from another vector, adds together
42237// adjacent pairs of 16-bit products, and saturates the result before
42238// truncating to 16-bits.
42239//
42240// Which looks something like this:
42241// (i16 (ssat (add (mul (zext (even elts (i8 A))), (sext (even elts (i8 B)))),
42242//                 (mul (zext (odd elts (i8 A)), (sext (odd elts (i8 B))))))))
42243static SDValue detectPMADDUBSW(SDValue In, EVT VT, SelectionDAG &DAG,
42244                               const X86Subtarget &Subtarget,
42245                               const SDLoc &DL) {
42246  if (!VT.isVector() || !Subtarget.hasSSSE3())
42247    return SDValue();
42248
42249  unsigned NumElems = VT.getVectorNumElements();
42250  EVT ScalarVT = VT.getVectorElementType();
42251  if (ScalarVT != MVT::i16 || NumElems < 8 || !isPowerOf2_32(NumElems))
42252    return SDValue();
42253
42254  SDValue SSatVal = detectSSatPattern(In, VT);
42255  if (!SSatVal || SSatVal.getOpcode() != ISD::ADD)
42256    return SDValue();
42257
42258  // Ok this is a signed saturation of an ADD. See if this ADD is adding pairs
42259  // of multiplies from even/odd elements.
42260  SDValue N0 = SSatVal.getOperand(0);
42261  SDValue N1 = SSatVal.getOperand(1);
42262
42263  if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
42264    return SDValue();
42265
42266  SDValue N00 = N0.getOperand(0);
42267  SDValue N01 = N0.getOperand(1);
42268  SDValue N10 = N1.getOperand(0);
42269  SDValue N11 = N1.getOperand(1);
42270
42271  // TODO: Handle constant vectors and use knownbits/computenumsignbits?
42272  // Canonicalize zero_extend to LHS.
42273  if (N01.getOpcode() == ISD::ZERO_EXTEND)
42274    std::swap(N00, N01);
42275  if (N11.getOpcode() == ISD::ZERO_EXTEND)
42276    std::swap(N10, N11);
42277
42278  // Ensure we have a zero_extend and a sign_extend.
42279  if (N00.getOpcode() != ISD::ZERO_EXTEND ||
42280      N01.getOpcode() != ISD::SIGN_EXTEND ||
42281      N10.getOpcode() != ISD::ZERO_EXTEND ||
42282      N11.getOpcode() != ISD::SIGN_EXTEND)
42283    return SDValue();
42284
42285  // Peek through the extends.
42286  N00 = N00.getOperand(0);
42287  N01 = N01.getOperand(0);
42288  N10 = N10.getOperand(0);
42289  N11 = N11.getOperand(0);
42290
42291  // Ensure the extend is from vXi8.
42292  if (N00.getValueType().getVectorElementType() != MVT::i8 ||
42293      N01.getValueType().getVectorElementType() != MVT::i8 ||
42294      N10.getValueType().getVectorElementType() != MVT::i8 ||
42295      N11.getValueType().getVectorElementType() != MVT::i8)
42296    return SDValue();
42297
42298  // All inputs should be build_vectors.
42299  if (N00.getOpcode() != ISD::BUILD_VECTOR ||
42300      N01.getOpcode() != ISD::BUILD_VECTOR ||
42301      N10.getOpcode() != ISD::BUILD_VECTOR ||
42302      N11.getOpcode() != ISD::BUILD_VECTOR)
42303    return SDValue();
42304
42305  // N00/N10 are zero extended. N01/N11 are sign extended.
42306
42307  // For each element, we need to ensure we have an odd element from one vector
42308  // multiplied by the odd element of another vector and the even element from
42309  // one of the same vectors being multiplied by the even element from the
42310  // other vector. So we need to make sure for each element i, this operator
42311  // is being performed:
42312  //  A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
42313  SDValue ZExtIn, SExtIn;
42314  for (unsigned i = 0; i != NumElems; ++i) {
42315    SDValue N00Elt = N00.getOperand(i);
42316    SDValue N01Elt = N01.getOperand(i);
42317    SDValue N10Elt = N10.getOperand(i);
42318    SDValue N11Elt = N11.getOperand(i);
42319    // TODO: Be more tolerant to undefs.
42320    if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
42321        N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
42322        N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
42323        N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
42324      return SDValue();
42325    auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
42326    auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
42327    auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
42328    auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
42329    if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
42330      return SDValue();
42331    unsigned IdxN00 = ConstN00Elt->getZExtValue();
42332    unsigned IdxN01 = ConstN01Elt->getZExtValue();
42333    unsigned IdxN10 = ConstN10Elt->getZExtValue();
42334    unsigned IdxN11 = ConstN11Elt->getZExtValue();
42335    // Add is commutative so indices can be reordered.
42336    if (IdxN00 > IdxN10) {
42337      std::swap(IdxN00, IdxN10);
42338      std::swap(IdxN01, IdxN11);
42339    }
42340    // N0 indices be the even element. N1 indices must be the next odd element.
42341    if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
42342        IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
42343      return SDValue();
42344    SDValue N00In = N00Elt.getOperand(0);
42345    SDValue N01In = N01Elt.getOperand(0);
42346    SDValue N10In = N10Elt.getOperand(0);
42347    SDValue N11In = N11Elt.getOperand(0);
42348    // First time we find an input capture it.
42349    if (!ZExtIn) {
42350      ZExtIn = N00In;
42351      SExtIn = N01In;
42352    }
42353    if (ZExtIn != N00In || SExtIn != N01In ||
42354        ZExtIn != N10In || SExtIn != N11In)
42355      return SDValue();
42356  }
42357
42358  auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
42359                         ArrayRef<SDValue> Ops) {
42360    // Shrink by adding truncate nodes and let DAGCombine fold with the
42361    // sources.
42362    EVT InVT = Ops[0].getValueType();
42363    assert(InVT.getScalarType() == MVT::i8 &&
42364           "Unexpected scalar element type");
42365    assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
42366    EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
42367                                 InVT.getVectorNumElements() / 2);
42368    return DAG.getNode(X86ISD::VPMADDUBSW, DL, ResVT, Ops[0], Ops[1]);
42369  };
42370  return SplitOpsAndApply(DAG, Subtarget, DL, VT, { ZExtIn, SExtIn },
42371                          PMADDBuilder);
42372}
42373
42374static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG,
42375                               const X86Subtarget &Subtarget) {
42376  EVT VT = N->getValueType(0);
42377  SDValue Src = N->getOperand(0);
42378  SDLoc DL(N);
42379
42380  // Attempt to pre-truncate inputs to arithmetic ops instead.
42381  if (SDValue V = combineTruncatedArithmetic(N, DAG, Subtarget, DL))
42382    return V;
42383
42384  // Try to detect AVG pattern first.
42385  if (SDValue Avg = detectAVGPattern(Src, VT, DAG, Subtarget, DL))
42386    return Avg;
42387
42388  // Try to detect PMADD
42389  if (SDValue PMAdd = detectPMADDUBSW(Src, VT, DAG, Subtarget, DL))
42390    return PMAdd;
42391
42392  // Try to combine truncation with signed/unsigned saturation.
42393  if (SDValue Val = combineTruncateWithSat(Src, VT, DL, DAG, Subtarget))
42394    return Val;
42395
42396  // Try to combine PMULHUW/PMULHW for vXi16.
42397  if (SDValue V = combinePMULH(Src, VT, DL, DAG, Subtarget))
42398    return V;
42399
42400  // The bitcast source is a direct mmx result.
42401  // Detect bitcasts between i32 to x86mmx
42402  if (Src.getOpcode() == ISD::BITCAST && VT == MVT::i32) {
42403    SDValue BCSrc = Src.getOperand(0);
42404    if (BCSrc.getValueType() == MVT::x86mmx)
42405      return DAG.getNode(X86ISD::MMX_MOVD2W, DL, MVT::i32, BCSrc);
42406  }
42407
42408  // Try to truncate extended sign/zero bits with PACKSS/PACKUS.
42409  if (SDValue V = combineVectorSignBitsTruncation(N, DL, DAG, Subtarget))
42410    return V;
42411
42412  return combineVectorTruncation(N, DAG, Subtarget);
42413}
42414
42415static SDValue combineVTRUNC(SDNode *N, SelectionDAG &DAG) {
42416  EVT VT = N->getValueType(0);
42417  SDValue In = N->getOperand(0);
42418  SDLoc DL(N);
42419
42420  if (auto SSatVal = detectSSatPattern(In, VT))
42421    return DAG.getNode(X86ISD::VTRUNCS, DL, VT, SSatVal);
42422  if (auto USatVal = detectUSatPattern(In, VT, DAG, DL))
42423    return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, USatVal);
42424
42425  return SDValue();
42426}
42427
42428/// Returns the negated value if the node \p N flips sign of FP value.
42429///
42430/// FP-negation node may have different forms: FNEG(x), FXOR (x, 0x80000000)
42431/// or FSUB(0, x)
42432/// AVX512F does not have FXOR, so FNEG is lowered as
42433/// (bitcast (xor (bitcast x), (bitcast ConstantFP(0x80000000)))).
42434/// In this case we go though all bitcasts.
42435/// This also recognizes splat of a negated value and returns the splat of that
42436/// value.
42437static SDValue isFNEG(SelectionDAG &DAG, SDNode *N, unsigned Depth = 0) {
42438  if (N->getOpcode() == ISD::FNEG)
42439    return N->getOperand(0);
42440
42441  // Don't recurse exponentially.
42442  if (Depth > SelectionDAG::MaxRecursionDepth)
42443    return SDValue();
42444
42445  unsigned ScalarSize = N->getValueType(0).getScalarSizeInBits();
42446
42447  SDValue Op = peekThroughBitcasts(SDValue(N, 0));
42448  EVT VT = Op->getValueType(0);
42449
42450  // Make sure the element size doesn't change.
42451  if (VT.getScalarSizeInBits() != ScalarSize)
42452    return SDValue();
42453
42454  unsigned Opc = Op.getOpcode();
42455  switch (Opc) {
42456  case ISD::VECTOR_SHUFFLE: {
42457    // For a VECTOR_SHUFFLE(VEC1, VEC2), if the VEC2 is undef, then the negate
42458    // of this is VECTOR_SHUFFLE(-VEC1, UNDEF).  The mask can be anything here.
42459    if (!Op.getOperand(1).isUndef())
42460      return SDValue();
42461    if (SDValue NegOp0 = isFNEG(DAG, Op.getOperand(0).getNode(), Depth + 1))
42462      if (NegOp0.getValueType() == VT) // FIXME: Can we do better?
42463        return DAG.getVectorShuffle(VT, SDLoc(Op), NegOp0, DAG.getUNDEF(VT),
42464                                    cast<ShuffleVectorSDNode>(Op)->getMask());
42465    break;
42466  }
42467  case ISD::INSERT_VECTOR_ELT: {
42468    // Negate of INSERT_VECTOR_ELT(UNDEF, V, INDEX) is INSERT_VECTOR_ELT(UNDEF,
42469    // -V, INDEX).
42470    SDValue InsVector = Op.getOperand(0);
42471    SDValue InsVal = Op.getOperand(1);
42472    if (!InsVector.isUndef())
42473      return SDValue();
42474    if (SDValue NegInsVal = isFNEG(DAG, InsVal.getNode(), Depth + 1))
42475      if (NegInsVal.getValueType() == VT.getVectorElementType()) // FIXME
42476        return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(Op), VT, InsVector,
42477                           NegInsVal, Op.getOperand(2));
42478    break;
42479  }
42480  case ISD::FSUB:
42481  case ISD::XOR:
42482  case X86ISD::FXOR: {
42483    SDValue Op1 = Op.getOperand(1);
42484    SDValue Op0 = Op.getOperand(0);
42485
42486    // For XOR and FXOR, we want to check if constant
42487    // bits of Op1 are sign bit masks. For FSUB, we
42488    // have to check if constant bits of Op0 are sign
42489    // bit masks and hence we swap the operands.
42490    if (Opc == ISD::FSUB)
42491      std::swap(Op0, Op1);
42492
42493    APInt UndefElts;
42494    SmallVector<APInt, 16> EltBits;
42495    // Extract constant bits and see if they are all
42496    // sign bit masks. Ignore the undef elements.
42497    if (getTargetConstantBitsFromNode(Op1, ScalarSize, UndefElts, EltBits,
42498                                      /* AllowWholeUndefs */ true,
42499                                      /* AllowPartialUndefs */ false)) {
42500      for (unsigned I = 0, E = EltBits.size(); I < E; I++)
42501        if (!UndefElts[I] && !EltBits[I].isSignMask())
42502          return SDValue();
42503
42504      return peekThroughBitcasts(Op0);
42505    }
42506  }
42507  }
42508
42509  return SDValue();
42510}
42511
42512static unsigned negateFMAOpcode(unsigned Opcode, bool NegMul, bool NegAcc,
42513                                bool NegRes) {
42514  if (NegMul) {
42515    switch (Opcode) {
42516    default: llvm_unreachable("Unexpected opcode");
42517    case ISD::FMA:             Opcode = X86ISD::FNMADD;       break;
42518    case X86ISD::FMADD_RND:    Opcode = X86ISD::FNMADD_RND;   break;
42519    case X86ISD::FMSUB:        Opcode = X86ISD::FNMSUB;       break;
42520    case X86ISD::FMSUB_RND:    Opcode = X86ISD::FNMSUB_RND;   break;
42521    case X86ISD::FNMADD:       Opcode = ISD::FMA;             break;
42522    case X86ISD::FNMADD_RND:   Opcode = X86ISD::FMADD_RND;    break;
42523    case X86ISD::FNMSUB:       Opcode = X86ISD::FMSUB;        break;
42524    case X86ISD::FNMSUB_RND:   Opcode = X86ISD::FMSUB_RND;    break;
42525    }
42526  }
42527
42528  if (NegAcc) {
42529    switch (Opcode) {
42530    default: llvm_unreachable("Unexpected opcode");
42531    case ISD::FMA:             Opcode = X86ISD::FMSUB;        break;
42532    case X86ISD::FMADD_RND:    Opcode = X86ISD::FMSUB_RND;    break;
42533    case X86ISD::FMSUB:        Opcode = ISD::FMA;             break;
42534    case X86ISD::FMSUB_RND:    Opcode = X86ISD::FMADD_RND;    break;
42535    case X86ISD::FNMADD:       Opcode = X86ISD::FNMSUB;       break;
42536    case X86ISD::FNMADD_RND:   Opcode = X86ISD::FNMSUB_RND;   break;
42537    case X86ISD::FNMSUB:       Opcode = X86ISD::FNMADD;       break;
42538    case X86ISD::FNMSUB_RND:   Opcode = X86ISD::FNMADD_RND;   break;
42539    case X86ISD::FMADDSUB:     Opcode = X86ISD::FMSUBADD;     break;
42540    case X86ISD::FMADDSUB_RND: Opcode = X86ISD::FMSUBADD_RND; break;
42541    case X86ISD::FMSUBADD:     Opcode = X86ISD::FMADDSUB;     break;
42542    case X86ISD::FMSUBADD_RND: Opcode = X86ISD::FMADDSUB_RND; break;
42543    }
42544  }
42545
42546  if (NegRes) {
42547    switch (Opcode) {
42548    default: llvm_unreachable("Unexpected opcode");
42549    case ISD::FMA:             Opcode = X86ISD::FNMSUB;       break;
42550    case X86ISD::FMADD_RND:    Opcode = X86ISD::FNMSUB_RND;   break;
42551    case X86ISD::FMSUB:        Opcode = X86ISD::FNMADD;       break;
42552    case X86ISD::FMSUB_RND:    Opcode = X86ISD::FNMADD_RND;   break;
42553    case X86ISD::FNMADD:       Opcode = X86ISD::FMSUB;        break;
42554    case X86ISD::FNMADD_RND:   Opcode = X86ISD::FMSUB_RND;    break;
42555    case X86ISD::FNMSUB:       Opcode = ISD::FMA;             break;
42556    case X86ISD::FNMSUB_RND:   Opcode = X86ISD::FMADD_RND;    break;
42557    }
42558  }
42559
42560  return Opcode;
42561}
42562
42563/// Do target-specific dag combines on floating point negations.
42564static SDValue combineFneg(SDNode *N, SelectionDAG &DAG,
42565                           const X86Subtarget &Subtarget) {
42566  EVT OrigVT = N->getValueType(0);
42567  SDValue Arg = isFNEG(DAG, N);
42568  if (!Arg)
42569    return SDValue();
42570
42571  EVT VT = Arg.getValueType();
42572  EVT SVT = VT.getScalarType();
42573  SDLoc DL(N);
42574
42575  // Let legalize expand this if it isn't a legal type yet.
42576  if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
42577    return SDValue();
42578
42579  // If we're negating a FMUL node on a target with FMA, then we can avoid the
42580  // use of a constant by performing (-0 - A*B) instead.
42581  // FIXME: Check rounding control flags as well once it becomes available.
42582  if (Arg.getOpcode() == ISD::FMUL && (SVT == MVT::f32 || SVT == MVT::f64) &&
42583      Arg->getFlags().hasNoSignedZeros() && Subtarget.hasAnyFMA()) {
42584    SDValue Zero = DAG.getConstantFP(0.0, DL, VT);
42585    SDValue NewNode = DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0),
42586                                  Arg.getOperand(1), Zero);
42587    return DAG.getBitcast(OrigVT, NewNode);
42588  }
42589
42590  // If we're negating an FMA node, then we can adjust the
42591  // instruction to include the extra negation.
42592  if (Arg.hasOneUse() && Subtarget.hasAnyFMA()) {
42593    switch (Arg.getOpcode()) {
42594    case ISD::FMA:
42595    case X86ISD::FMSUB:
42596    case X86ISD::FNMADD:
42597    case X86ISD::FNMSUB:
42598    case X86ISD::FMADD_RND:
42599    case X86ISD::FMSUB_RND:
42600    case X86ISD::FNMADD_RND:
42601    case X86ISD::FNMSUB_RND: {
42602      // We can't handle scalar intrinsic node here because it would only
42603      // invert one element and not the whole vector. But we could try to handle
42604      // a negation of the lower element only.
42605      unsigned NewOpcode = negateFMAOpcode(Arg.getOpcode(), false, false, true);
42606      return DAG.getBitcast(OrigVT, DAG.getNode(NewOpcode, DL, VT, Arg->ops()));
42607    }
42608    }
42609  }
42610
42611  return SDValue();
42612}
42613
42614char X86TargetLowering::isNegatibleForFree(SDValue Op, SelectionDAG &DAG,
42615                                           bool LegalOperations,
42616                                           bool ForCodeSize,
42617                                           unsigned Depth) const {
42618  // fneg patterns are removable even if they have multiple uses.
42619  if (isFNEG(DAG, Op.getNode(), Depth))
42620    return 2;
42621
42622  // Don't recurse exponentially.
42623  if (Depth > SelectionDAG::MaxRecursionDepth)
42624    return 0;
42625
42626  EVT VT = Op.getValueType();
42627  EVT SVT = VT.getScalarType();
42628  switch (Op.getOpcode()) {
42629  case ISD::FMA:
42630  case X86ISD::FMSUB:
42631  case X86ISD::FNMADD:
42632  case X86ISD::FNMSUB:
42633  case X86ISD::FMADD_RND:
42634  case X86ISD::FMSUB_RND:
42635  case X86ISD::FNMADD_RND:
42636  case X86ISD::FNMSUB_RND: {
42637    if (!Op.hasOneUse() || !Subtarget.hasAnyFMA() || !isTypeLegal(VT) ||
42638        !(SVT == MVT::f32 || SVT == MVT::f64) || !LegalOperations)
42639      break;
42640
42641    // This is always negatible for free but we might be able to remove some
42642    // extra operand negations as well.
42643    for (int i = 0; i != 3; ++i) {
42644      char V = isNegatibleForFree(Op.getOperand(i), DAG, LegalOperations,
42645                                  ForCodeSize, Depth + 1);
42646      if (V == 2)
42647        return V;
42648    }
42649    return 1;
42650  }
42651  }
42652
42653  return TargetLowering::isNegatibleForFree(Op, DAG, LegalOperations,
42654                                            ForCodeSize, Depth);
42655}
42656
42657SDValue X86TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
42658                                                bool LegalOperations,
42659                                                bool ForCodeSize,
42660                                                unsigned Depth) const {
42661  // fneg patterns are removable even if they have multiple uses.
42662  if (SDValue Arg = isFNEG(DAG, Op.getNode(), Depth))
42663    return DAG.getBitcast(Op.getValueType(), Arg);
42664
42665  EVT VT = Op.getValueType();
42666  EVT SVT = VT.getScalarType();
42667  unsigned Opc = Op.getOpcode();
42668  switch (Opc) {
42669  case ISD::FMA:
42670  case X86ISD::FMSUB:
42671  case X86ISD::FNMADD:
42672  case X86ISD::FNMSUB:
42673  case X86ISD::FMADD_RND:
42674  case X86ISD::FMSUB_RND:
42675  case X86ISD::FNMADD_RND:
42676  case X86ISD::FNMSUB_RND: {
42677    if (!Op.hasOneUse() || !Subtarget.hasAnyFMA() || !isTypeLegal(VT) ||
42678        !(SVT == MVT::f32 || SVT == MVT::f64) || !LegalOperations)
42679      break;
42680
42681    // This is always negatible for free but we might be able to remove some
42682    // extra operand negations as well.
42683    SmallVector<SDValue, 4> NewOps(Op.getNumOperands(), SDValue());
42684    for (int i = 0; i != 3; ++i) {
42685      char V = isNegatibleForFree(Op.getOperand(i), DAG, LegalOperations,
42686                                  ForCodeSize, Depth + 1);
42687      if (V == 2)
42688        NewOps[i] = getNegatedExpression(Op.getOperand(i), DAG, LegalOperations,
42689                                         ForCodeSize, Depth + 1);
42690    }
42691
42692    bool NegA = !!NewOps[0];
42693    bool NegB = !!NewOps[1];
42694    bool NegC = !!NewOps[2];
42695    unsigned NewOpc = negateFMAOpcode(Opc, NegA != NegB, NegC, true);
42696
42697    // Fill in the non-negated ops with the original values.
42698    for (int i = 0, e = Op.getNumOperands(); i != e; ++i)
42699      if (!NewOps[i])
42700        NewOps[i] = Op.getOperand(i);
42701    return DAG.getNode(NewOpc, SDLoc(Op), VT, NewOps);
42702  }
42703  }
42704
42705  return TargetLowering::getNegatedExpression(Op, DAG, LegalOperations,
42706                                              ForCodeSize, Depth);
42707}
42708
42709static SDValue lowerX86FPLogicOp(SDNode *N, SelectionDAG &DAG,
42710                                 const X86Subtarget &Subtarget) {
42711  MVT VT = N->getSimpleValueType(0);
42712  // If we have integer vector types available, use the integer opcodes.
42713  if (!VT.isVector() || !Subtarget.hasSSE2())
42714    return SDValue();
42715
42716  SDLoc dl(N);
42717
42718  unsigned IntBits = VT.getScalarSizeInBits();
42719  MVT IntSVT = MVT::getIntegerVT(IntBits);
42720  MVT IntVT = MVT::getVectorVT(IntSVT, VT.getSizeInBits() / IntBits);
42721
42722  SDValue Op0 = DAG.getBitcast(IntVT, N->getOperand(0));
42723  SDValue Op1 = DAG.getBitcast(IntVT, N->getOperand(1));
42724  unsigned IntOpcode;
42725  switch (N->getOpcode()) {
42726  default: llvm_unreachable("Unexpected FP logic op");
42727  case X86ISD::FOR:   IntOpcode = ISD::OR; break;
42728  case X86ISD::FXOR:  IntOpcode = ISD::XOR; break;
42729  case X86ISD::FAND:  IntOpcode = ISD::AND; break;
42730  case X86ISD::FANDN: IntOpcode = X86ISD::ANDNP; break;
42731  }
42732  SDValue IntOp = DAG.getNode(IntOpcode, dl, IntVT, Op0, Op1);
42733  return DAG.getBitcast(VT, IntOp);
42734}
42735
42736
42737/// Fold a xor(setcc cond, val), 1 --> setcc (inverted(cond), val)
42738static SDValue foldXor1SetCC(SDNode *N, SelectionDAG &DAG) {
42739  if (N->getOpcode() != ISD::XOR)
42740    return SDValue();
42741
42742  SDValue LHS = N->getOperand(0);
42743  if (!isOneConstant(N->getOperand(1)) || LHS->getOpcode() != X86ISD::SETCC)
42744    return SDValue();
42745
42746  X86::CondCode NewCC = X86::GetOppositeBranchCondition(
42747      X86::CondCode(LHS->getConstantOperandVal(0)));
42748  SDLoc DL(N);
42749  return getSETCC(NewCC, LHS->getOperand(1), DL, DAG);
42750}
42751
42752static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
42753                          TargetLowering::DAGCombinerInfo &DCI,
42754                          const X86Subtarget &Subtarget) {
42755  // If this is SSE1 only convert to FXOR to avoid scalarization.
42756  if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() &&
42757      N->getValueType(0) == MVT::v4i32) {
42758    return DAG.getBitcast(
42759        MVT::v4i32, DAG.getNode(X86ISD::FXOR, SDLoc(N), MVT::v4f32,
42760                                DAG.getBitcast(MVT::v4f32, N->getOperand(0)),
42761                                DAG.getBitcast(MVT::v4f32, N->getOperand(1))));
42762  }
42763
42764  if (SDValue Cmp = foldVectorXorShiftIntoCmp(N, DAG, Subtarget))
42765    return Cmp;
42766
42767  if (DCI.isBeforeLegalizeOps())
42768    return SDValue();
42769
42770  if (SDValue SetCC = foldXor1SetCC(N, DAG))
42771    return SetCC;
42772
42773  if (SDValue RV = foldXorTruncShiftIntoCmp(N, DAG))
42774    return RV;
42775
42776  if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
42777    return FPLogic;
42778
42779  return combineFneg(N, DAG, Subtarget);
42780}
42781
42782static SDValue combineBEXTR(SDNode *N, SelectionDAG &DAG,
42783                            TargetLowering::DAGCombinerInfo &DCI,
42784                            const X86Subtarget &Subtarget) {
42785  SDValue Op0 = N->getOperand(0);
42786  SDValue Op1 = N->getOperand(1);
42787  EVT VT = N->getValueType(0);
42788  unsigned NumBits = VT.getSizeInBits();
42789
42790  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42791
42792  // TODO - Constant Folding.
42793  if (auto *Cst1 = dyn_cast<ConstantSDNode>(Op1)) {
42794    // Reduce Cst1 to the bottom 16-bits.
42795    // NOTE: SimplifyDemandedBits won't do this for constants.
42796    const APInt &Val1 = Cst1->getAPIntValue();
42797    APInt MaskedVal1 = Val1 & 0xFFFF;
42798    if (MaskedVal1 != Val1)
42799      return DAG.getNode(X86ISD::BEXTR, SDLoc(N), VT, Op0,
42800                         DAG.getConstant(MaskedVal1, SDLoc(N), VT));
42801  }
42802
42803  // Only bottom 16-bits of the control bits are required.
42804  APInt DemandedMask(APInt::getLowBitsSet(NumBits, 16));
42805  if (TLI.SimplifyDemandedBits(Op1, DemandedMask, DCI))
42806    return SDValue(N, 0);
42807
42808  return SDValue();
42809}
42810
42811static bool isNullFPScalarOrVectorConst(SDValue V) {
42812  return isNullFPConstant(V) || ISD::isBuildVectorAllZeros(V.getNode());
42813}
42814
42815/// If a value is a scalar FP zero or a vector FP zero (potentially including
42816/// undefined elements), return a zero constant that may be used to fold away
42817/// that value. In the case of a vector, the returned constant will not contain
42818/// undefined elements even if the input parameter does. This makes it suitable
42819/// to be used as a replacement operand with operations (eg, bitwise-and) where
42820/// an undef should not propagate.
42821static SDValue getNullFPConstForNullVal(SDValue V, SelectionDAG &DAG,
42822                                        const X86Subtarget &Subtarget) {
42823  if (!isNullFPScalarOrVectorConst(V))
42824    return SDValue();
42825
42826  if (V.getValueType().isVector())
42827    return getZeroVector(V.getSimpleValueType(), Subtarget, DAG, SDLoc(V));
42828
42829  return V;
42830}
42831
42832static SDValue combineFAndFNotToFAndn(SDNode *N, SelectionDAG &DAG,
42833                                      const X86Subtarget &Subtarget) {
42834  SDValue N0 = N->getOperand(0);
42835  SDValue N1 = N->getOperand(1);
42836  EVT VT = N->getValueType(0);
42837  SDLoc DL(N);
42838
42839  // Vector types are handled in combineANDXORWithAllOnesIntoANDNP().
42840  if (!((VT == MVT::f32 && Subtarget.hasSSE1()) ||
42841        (VT == MVT::f64 && Subtarget.hasSSE2()) ||
42842        (VT == MVT::v4f32 && Subtarget.hasSSE1() && !Subtarget.hasSSE2())))
42843    return SDValue();
42844
42845  auto isAllOnesConstantFP = [](SDValue V) {
42846    if (V.getSimpleValueType().isVector())
42847      return ISD::isBuildVectorAllOnes(V.getNode());
42848    auto *C = dyn_cast<ConstantFPSDNode>(V);
42849    return C && C->getConstantFPValue()->isAllOnesValue();
42850  };
42851
42852  // fand (fxor X, -1), Y --> fandn X, Y
42853  if (N0.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N0.getOperand(1)))
42854    return DAG.getNode(X86ISD::FANDN, DL, VT, N0.getOperand(0), N1);
42855
42856  // fand X, (fxor Y, -1) --> fandn Y, X
42857  if (N1.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N1.getOperand(1)))
42858    return DAG.getNode(X86ISD::FANDN, DL, VT, N1.getOperand(0), N0);
42859
42860  return SDValue();
42861}
42862
42863/// Do target-specific dag combines on X86ISD::FAND nodes.
42864static SDValue combineFAnd(SDNode *N, SelectionDAG &DAG,
42865                           const X86Subtarget &Subtarget) {
42866  // FAND(0.0, x) -> 0.0
42867  if (SDValue V = getNullFPConstForNullVal(N->getOperand(0), DAG, Subtarget))
42868    return V;
42869
42870  // FAND(x, 0.0) -> 0.0
42871  if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
42872    return V;
42873
42874  if (SDValue V = combineFAndFNotToFAndn(N, DAG, Subtarget))
42875    return V;
42876
42877  return lowerX86FPLogicOp(N, DAG, Subtarget);
42878}
42879
42880/// Do target-specific dag combines on X86ISD::FANDN nodes.
42881static SDValue combineFAndn(SDNode *N, SelectionDAG &DAG,
42882                            const X86Subtarget &Subtarget) {
42883  // FANDN(0.0, x) -> x
42884  if (isNullFPScalarOrVectorConst(N->getOperand(0)))
42885    return N->getOperand(1);
42886
42887  // FANDN(x, 0.0) -> 0.0
42888  if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
42889    return V;
42890
42891  return lowerX86FPLogicOp(N, DAG, Subtarget);
42892}
42893
42894/// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
42895static SDValue combineFOr(SDNode *N, SelectionDAG &DAG,
42896                          const X86Subtarget &Subtarget) {
42897  assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
42898
42899  // F[X]OR(0.0, x) -> x
42900  if (isNullFPScalarOrVectorConst(N->getOperand(0)))
42901    return N->getOperand(1);
42902
42903  // F[X]OR(x, 0.0) -> x
42904  if (isNullFPScalarOrVectorConst(N->getOperand(1)))
42905    return N->getOperand(0);
42906
42907  if (SDValue NewVal = combineFneg(N, DAG, Subtarget))
42908    return NewVal;
42909
42910  return lowerX86FPLogicOp(N, DAG, Subtarget);
42911}
42912
42913/// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
42914static SDValue combineFMinFMax(SDNode *N, SelectionDAG &DAG) {
42915  assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
42916
42917  // FMIN/FMAX are commutative if no NaNs and no negative zeros are allowed.
42918  if (!DAG.getTarget().Options.NoNaNsFPMath ||
42919      !DAG.getTarget().Options.NoSignedZerosFPMath)
42920    return SDValue();
42921
42922  // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
42923  // into FMINC and FMAXC, which are Commutative operations.
42924  unsigned NewOp = 0;
42925  switch (N->getOpcode()) {
42926    default: llvm_unreachable("unknown opcode");
42927    case X86ISD::FMIN:  NewOp = X86ISD::FMINC; break;
42928    case X86ISD::FMAX:  NewOp = X86ISD::FMAXC; break;
42929  }
42930
42931  return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
42932                     N->getOperand(0), N->getOperand(1));
42933}
42934
42935static SDValue combineFMinNumFMaxNum(SDNode *N, SelectionDAG &DAG,
42936                                     const X86Subtarget &Subtarget) {
42937  if (Subtarget.useSoftFloat())
42938    return SDValue();
42939
42940  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42941
42942  EVT VT = N->getValueType(0);
42943  if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
42944        (Subtarget.hasSSE2() && VT == MVT::f64) ||
42945        (VT.isVector() && TLI.isTypeLegal(VT))))
42946    return SDValue();
42947
42948  SDValue Op0 = N->getOperand(0);
42949  SDValue Op1 = N->getOperand(1);
42950  SDLoc DL(N);
42951  auto MinMaxOp = N->getOpcode() == ISD::FMAXNUM ? X86ISD::FMAX : X86ISD::FMIN;
42952
42953  // If we don't have to respect NaN inputs, this is a direct translation to x86
42954  // min/max instructions.
42955  if (DAG.getTarget().Options.NoNaNsFPMath || N->getFlags().hasNoNaNs())
42956    return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
42957
42958  // If one of the operands is known non-NaN use the native min/max instructions
42959  // with the non-NaN input as second operand.
42960  if (DAG.isKnownNeverNaN(Op1))
42961    return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
42962  if (DAG.isKnownNeverNaN(Op0))
42963    return DAG.getNode(MinMaxOp, DL, VT, Op1, Op0, N->getFlags());
42964
42965  // If we have to respect NaN inputs, this takes at least 3 instructions.
42966  // Favor a library call when operating on a scalar and minimizing code size.
42967  if (!VT.isVector() && DAG.getMachineFunction().getFunction().hasMinSize())
42968    return SDValue();
42969
42970  EVT SetCCType = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
42971                                         VT);
42972
42973  // There are 4 possibilities involving NaN inputs, and these are the required
42974  // outputs:
42975  //                   Op1
42976  //               Num     NaN
42977  //            ----------------
42978  //       Num  |  Max  |  Op0 |
42979  // Op0        ----------------
42980  //       NaN  |  Op1  |  NaN |
42981  //            ----------------
42982  //
42983  // The SSE FP max/min instructions were not designed for this case, but rather
42984  // to implement:
42985  //   Min = Op1 < Op0 ? Op1 : Op0
42986  //   Max = Op1 > Op0 ? Op1 : Op0
42987  //
42988  // So they always return Op0 if either input is a NaN. However, we can still
42989  // use those instructions for fmaxnum by selecting away a NaN input.
42990
42991  // If either operand is NaN, the 2nd source operand (Op0) is passed through.
42992  SDValue MinOrMax = DAG.getNode(MinMaxOp, DL, VT, Op1, Op0);
42993  SDValue IsOp0Nan = DAG.getSetCC(DL, SetCCType, Op0, Op0, ISD::SETUO);
42994
42995  // If Op0 is a NaN, select Op1. Otherwise, select the max. If both operands
42996  // are NaN, the NaN value of Op1 is the result.
42997  return DAG.getSelect(DL, VT, IsOp0Nan, Op1, MinOrMax);
42998}
42999
43000static SDValue combineX86INT_TO_FP(SDNode *N, SelectionDAG &DAG,
43001                                   TargetLowering::DAGCombinerInfo &DCI) {
43002  EVT VT = N->getValueType(0);
43003  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43004
43005  APInt KnownUndef, KnownZero;
43006  APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
43007  if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
43008                                     KnownZero, DCI))
43009    return SDValue(N, 0);
43010
43011  // Convert a full vector load into vzload when not all bits are needed.
43012  SDValue In = N->getOperand(0);
43013  MVT InVT = In.getSimpleValueType();
43014  if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
43015      ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
43016    assert(InVT.is128BitVector() && "Expected 128-bit input vector");
43017    LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0));
43018    // Unless the load is volatile or atomic.
43019    if (LN->isSimple()) {
43020      SDLoc dl(N);
43021      unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
43022      MVT MemVT = MVT::getIntegerVT(NumBits);
43023      MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
43024      SDVTList Tys = DAG.getVTList(LoadVT, MVT::Other);
43025      SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
43026      SDValue VZLoad =
43027          DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, MemVT,
43028                                  LN->getPointerInfo(),
43029                                  LN->getAlignment(),
43030                                  LN->getMemOperand()->getFlags());
43031      SDValue Convert = DAG.getNode(N->getOpcode(), dl, VT,
43032                                    DAG.getBitcast(InVT, VZLoad));
43033      DCI.CombineTo(N, Convert);
43034      DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
43035      return SDValue(N, 0);
43036    }
43037  }
43038
43039  return SDValue();
43040}
43041
43042static SDValue combineCVTP2I_CVTTP2I(SDNode *N, SelectionDAG &DAG,
43043                                     TargetLowering::DAGCombinerInfo &DCI) {
43044  // FIXME: Handle strict fp nodes.
43045  EVT VT = N->getValueType(0);
43046
43047  // Convert a full vector load into vzload when not all bits are needed.
43048  SDValue In = N->getOperand(0);
43049  MVT InVT = In.getSimpleValueType();
43050  if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
43051      ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
43052    assert(InVT.is128BitVector() && "Expected 128-bit input vector");
43053    LoadSDNode *LN = cast<LoadSDNode>(In);
43054    // Unless the load is volatile or atomic.
43055    if (LN->isSimple()) {
43056      SDLoc dl(N);
43057      unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
43058      MVT MemVT = MVT::getFloatingPointVT(NumBits);
43059      MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
43060      SDVTList Tys = DAG.getVTList(LoadVT, MVT::Other);
43061      SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
43062      SDValue VZLoad =
43063          DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, MemVT,
43064                                  LN->getPointerInfo(),
43065                                  LN->getAlignment(),
43066                                  LN->getMemOperand()->getFlags());
43067      SDValue Convert = DAG.getNode(N->getOpcode(), dl, VT,
43068                                    DAG.getBitcast(InVT, VZLoad));
43069      DCI.CombineTo(N, Convert);
43070      DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
43071      return SDValue(N, 0);
43072    }
43073  }
43074
43075  return SDValue();
43076}
43077
43078/// Do target-specific dag combines on X86ISD::ANDNP nodes.
43079static SDValue combineAndnp(SDNode *N, SelectionDAG &DAG,
43080                            TargetLowering::DAGCombinerInfo &DCI,
43081                            const X86Subtarget &Subtarget) {
43082  MVT VT = N->getSimpleValueType(0);
43083
43084  // ANDNP(0, x) -> x
43085  if (ISD::isBuildVectorAllZeros(N->getOperand(0).getNode()))
43086    return N->getOperand(1);
43087
43088  // ANDNP(x, 0) -> 0
43089  if (ISD::isBuildVectorAllZeros(N->getOperand(1).getNode()))
43090    return DAG.getConstant(0, SDLoc(N), VT);
43091
43092  // Turn ANDNP back to AND if input is inverted.
43093  if (SDValue Not = IsNOT(N->getOperand(0), DAG))
43094    return DAG.getNode(ISD::AND, SDLoc(N), VT, DAG.getBitcast(VT, Not),
43095                       N->getOperand(1));
43096
43097  // Attempt to recursively combine a bitmask ANDNP with shuffles.
43098  if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
43099    SDValue Op(N, 0);
43100    if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
43101      return Res;
43102  }
43103
43104  return SDValue();
43105}
43106
43107static SDValue combineBT(SDNode *N, SelectionDAG &DAG,
43108                         TargetLowering::DAGCombinerInfo &DCI) {
43109  SDValue N0 = N->getOperand(0);
43110  SDValue N1 = N->getOperand(1);
43111
43112  // BT ignores high bits in the bit index operand.
43113  unsigned BitWidth = N1.getValueSizeInBits();
43114  APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
43115  if (SDValue DemandedN1 = DAG.GetDemandedBits(N1, DemandedMask))
43116    return DAG.getNode(X86ISD::BT, SDLoc(N), MVT::i32, N0, DemandedN1);
43117
43118  return SDValue();
43119}
43120
43121// Try to combine sext_in_reg of a cmov of constants by extending the constants.
43122static SDValue combineSextInRegCmov(SDNode *N, SelectionDAG &DAG) {
43123  assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
43124
43125  EVT DstVT = N->getValueType(0);
43126
43127  SDValue N0 = N->getOperand(0);
43128  SDValue N1 = N->getOperand(1);
43129  EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
43130
43131  if (ExtraVT != MVT::i8 && ExtraVT != MVT::i16)
43132    return SDValue();
43133
43134  // Look through single use any_extends / truncs.
43135  SDValue IntermediateBitwidthOp;
43136  if ((N0.getOpcode() == ISD::ANY_EXTEND || N0.getOpcode() == ISD::TRUNCATE) &&
43137      N0.hasOneUse()) {
43138    IntermediateBitwidthOp = N0;
43139    N0 = N0.getOperand(0);
43140  }
43141
43142  // See if we have a single use cmov.
43143  if (N0.getOpcode() != X86ISD::CMOV || !N0.hasOneUse())
43144    return SDValue();
43145
43146  SDValue CMovOp0 = N0.getOperand(0);
43147  SDValue CMovOp1 = N0.getOperand(1);
43148
43149  // Make sure both operands are constants.
43150  if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
43151      !isa<ConstantSDNode>(CMovOp1.getNode()))
43152    return SDValue();
43153
43154  SDLoc DL(N);
43155
43156  // If we looked through an any_extend/trunc above, add one to the constants.
43157  if (IntermediateBitwidthOp) {
43158    unsigned IntermediateOpc = IntermediateBitwidthOp.getOpcode();
43159    CMovOp0 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp0);
43160    CMovOp1 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp1);
43161  }
43162
43163  CMovOp0 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp0, N1);
43164  CMovOp1 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp1, N1);
43165
43166  EVT CMovVT = DstVT;
43167  // We do not want i16 CMOV's. Promote to i32 and truncate afterwards.
43168  if (DstVT == MVT::i16) {
43169    CMovVT = MVT::i32;
43170    CMovOp0 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp0);
43171    CMovOp1 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp1);
43172  }
43173
43174  SDValue CMov = DAG.getNode(X86ISD::CMOV, DL, CMovVT, CMovOp0, CMovOp1,
43175                             N0.getOperand(2), N0.getOperand(3));
43176
43177  if (CMovVT != DstVT)
43178    CMov = DAG.getNode(ISD::TRUNCATE, DL, DstVT, CMov);
43179
43180  return CMov;
43181}
43182
43183static SDValue combineSignExtendInReg(SDNode *N, SelectionDAG &DAG,
43184                                      const X86Subtarget &Subtarget) {
43185  assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
43186
43187  if (SDValue V = combineSextInRegCmov(N, DAG))
43188    return V;
43189
43190  EVT VT = N->getValueType(0);
43191  SDValue N0 = N->getOperand(0);
43192  SDValue N1 = N->getOperand(1);
43193  EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
43194  SDLoc dl(N);
43195
43196  // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
43197  // both SSE and AVX2 since there is no sign-extended shift right
43198  // operation on a vector with 64-bit elements.
43199  //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
43200  // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
43201  if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
43202      N0.getOpcode() == ISD::SIGN_EXTEND)) {
43203    SDValue N00 = N0.getOperand(0);
43204
43205    // EXTLOAD has a better solution on AVX2,
43206    // it may be replaced with X86ISD::VSEXT node.
43207    if (N00.getOpcode() == ISD::LOAD && Subtarget.hasInt256())
43208      if (!ISD::isNormalLoad(N00.getNode()))
43209        return SDValue();
43210
43211    if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
43212        SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
43213                                  N00, N1);
43214      return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
43215    }
43216  }
43217  return SDValue();
43218}
43219
43220/// sext(add_nsw(x, C)) --> add(sext(x), C_sext)
43221/// zext(add_nuw(x, C)) --> add(zext(x), C_zext)
43222/// Promoting a sign/zero extension ahead of a no overflow 'add' exposes
43223/// opportunities to combine math ops, use an LEA, or use a complex addressing
43224/// mode. This can eliminate extend, add, and shift instructions.
43225static SDValue promoteExtBeforeAdd(SDNode *Ext, SelectionDAG &DAG,
43226                                   const X86Subtarget &Subtarget) {
43227  if (Ext->getOpcode() != ISD::SIGN_EXTEND &&
43228      Ext->getOpcode() != ISD::ZERO_EXTEND)
43229    return SDValue();
43230
43231  // TODO: This should be valid for other integer types.
43232  EVT VT = Ext->getValueType(0);
43233  if (VT != MVT::i64)
43234    return SDValue();
43235
43236  SDValue Add = Ext->getOperand(0);
43237  if (Add.getOpcode() != ISD::ADD)
43238    return SDValue();
43239
43240  bool Sext = Ext->getOpcode() == ISD::SIGN_EXTEND;
43241  bool NSW = Add->getFlags().hasNoSignedWrap();
43242  bool NUW = Add->getFlags().hasNoUnsignedWrap();
43243
43244  // We need an 'add nsw' feeding into the 'sext' or 'add nuw' feeding
43245  // into the 'zext'
43246  if ((Sext && !NSW) || (!Sext && !NUW))
43247    return SDValue();
43248
43249  // Having a constant operand to the 'add' ensures that we are not increasing
43250  // the instruction count because the constant is extended for free below.
43251  // A constant operand can also become the displacement field of an LEA.
43252  auto *AddOp1 = dyn_cast<ConstantSDNode>(Add.getOperand(1));
43253  if (!AddOp1)
43254    return SDValue();
43255
43256  // Don't make the 'add' bigger if there's no hope of combining it with some
43257  // other 'add' or 'shl' instruction.
43258  // TODO: It may be profitable to generate simpler LEA instructions in place
43259  // of single 'add' instructions, but the cost model for selecting an LEA
43260  // currently has a high threshold.
43261  bool HasLEAPotential = false;
43262  for (auto *User : Ext->uses()) {
43263    if (User->getOpcode() == ISD::ADD || User->getOpcode() == ISD::SHL) {
43264      HasLEAPotential = true;
43265      break;
43266    }
43267  }
43268  if (!HasLEAPotential)
43269    return SDValue();
43270
43271  // Everything looks good, so pull the '{s|z}ext' ahead of the 'add'.
43272  int64_t AddConstant = Sext ? AddOp1->getSExtValue() : AddOp1->getZExtValue();
43273  SDValue AddOp0 = Add.getOperand(0);
43274  SDValue NewExt = DAG.getNode(Ext->getOpcode(), SDLoc(Ext), VT, AddOp0);
43275  SDValue NewConstant = DAG.getConstant(AddConstant, SDLoc(Add), VT);
43276
43277  // The wider add is guaranteed to not wrap because both operands are
43278  // sign-extended.
43279  SDNodeFlags Flags;
43280  Flags.setNoSignedWrap(NSW);
43281  Flags.setNoUnsignedWrap(NUW);
43282  return DAG.getNode(ISD::ADD, SDLoc(Add), VT, NewExt, NewConstant, Flags);
43283}
43284
43285// If we face {ANY,SIGN,ZERO}_EXTEND that is applied to a CMOV with constant
43286// operands and the result of CMOV is not used anywhere else - promote CMOV
43287// itself instead of promoting its result. This could be beneficial, because:
43288//     1) X86TargetLowering::EmitLoweredSelect later can do merging of two
43289//        (or more) pseudo-CMOVs only when they go one-after-another and
43290//        getting rid of result extension code after CMOV will help that.
43291//     2) Promotion of constant CMOV arguments is free, hence the
43292//        {ANY,SIGN,ZERO}_EXTEND will just be deleted.
43293//     3) 16-bit CMOV encoding is 4 bytes, 32-bit CMOV is 3-byte, so this
43294//        promotion is also good in terms of code-size.
43295//        (64-bit CMOV is 4-bytes, that's why we don't do 32-bit => 64-bit
43296//         promotion).
43297static SDValue combineToExtendCMOV(SDNode *Extend, SelectionDAG &DAG) {
43298  SDValue CMovN = Extend->getOperand(0);
43299  if (CMovN.getOpcode() != X86ISD::CMOV || !CMovN.hasOneUse())
43300    return SDValue();
43301
43302  EVT TargetVT = Extend->getValueType(0);
43303  unsigned ExtendOpcode = Extend->getOpcode();
43304  SDLoc DL(Extend);
43305
43306  EVT VT = CMovN.getValueType();
43307  SDValue CMovOp0 = CMovN.getOperand(0);
43308  SDValue CMovOp1 = CMovN.getOperand(1);
43309
43310  if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
43311      !isa<ConstantSDNode>(CMovOp1.getNode()))
43312    return SDValue();
43313
43314  // Only extend to i32 or i64.
43315  if (TargetVT != MVT::i32 && TargetVT != MVT::i64)
43316    return SDValue();
43317
43318  // Only extend from i16 unless its a sign_extend from i32. Zext/aext from i32
43319  // are free.
43320  if (VT != MVT::i16 && !(ExtendOpcode == ISD::SIGN_EXTEND && VT == MVT::i32))
43321    return SDValue();
43322
43323  // If this a zero extend to i64, we should only extend to i32 and use a free
43324  // zero extend to finish.
43325  EVT ExtendVT = TargetVT;
43326  if (TargetVT == MVT::i64 && ExtendOpcode != ISD::SIGN_EXTEND)
43327    ExtendVT = MVT::i32;
43328
43329  CMovOp0 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp0);
43330  CMovOp1 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp1);
43331
43332  SDValue Res = DAG.getNode(X86ISD::CMOV, DL, ExtendVT, CMovOp0, CMovOp1,
43333                            CMovN.getOperand(2), CMovN.getOperand(3));
43334
43335  // Finish extending if needed.
43336  if (ExtendVT != TargetVT)
43337    Res = DAG.getNode(ExtendOpcode, DL, TargetVT, Res);
43338
43339  return Res;
43340}
43341
43342// Convert (vXiY *ext(vXi1 bitcast(iX))) to extend_in_reg(broadcast(iX)).
43343// This is more or less the reverse of combineBitcastvxi1.
43344static SDValue
43345combineToExtendBoolVectorInReg(SDNode *N, SelectionDAG &DAG,
43346                               TargetLowering::DAGCombinerInfo &DCI,
43347                               const X86Subtarget &Subtarget) {
43348  unsigned Opcode = N->getOpcode();
43349  if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND &&
43350      Opcode != ISD::ANY_EXTEND)
43351    return SDValue();
43352  if (!DCI.isBeforeLegalizeOps())
43353    return SDValue();
43354  if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
43355    return SDValue();
43356
43357  SDValue N0 = N->getOperand(0);
43358  EVT VT = N->getValueType(0);
43359  EVT SVT = VT.getScalarType();
43360  EVT InSVT = N0.getValueType().getScalarType();
43361  unsigned EltSizeInBits = SVT.getSizeInBits();
43362
43363  // Input type must be extending a bool vector (bit-casted from a scalar
43364  // integer) to legal integer types.
43365  if (!VT.isVector())
43366    return SDValue();
43367  if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16 && SVT != MVT::i8)
43368    return SDValue();
43369  if (InSVT != MVT::i1 || N0.getOpcode() != ISD::BITCAST)
43370    return SDValue();
43371
43372  SDValue N00 = N0.getOperand(0);
43373  EVT SclVT = N0.getOperand(0).getValueType();
43374  if (!SclVT.isScalarInteger())
43375    return SDValue();
43376
43377  SDLoc DL(N);
43378  SDValue Vec;
43379  SmallVector<int, 32> ShuffleMask;
43380  unsigned NumElts = VT.getVectorNumElements();
43381  assert(NumElts == SclVT.getSizeInBits() && "Unexpected bool vector size");
43382
43383  // Broadcast the scalar integer to the vector elements.
43384  if (NumElts > EltSizeInBits) {
43385    // If the scalar integer is greater than the vector element size, then we
43386    // must split it down into sub-sections for broadcasting. For example:
43387    //   i16 -> v16i8 (i16 -> v8i16 -> v16i8) with 2 sub-sections.
43388    //   i32 -> v32i8 (i32 -> v8i32 -> v32i8) with 4 sub-sections.
43389    assert((NumElts % EltSizeInBits) == 0 && "Unexpected integer scale");
43390    unsigned Scale = NumElts / EltSizeInBits;
43391    EVT BroadcastVT =
43392        EVT::getVectorVT(*DAG.getContext(), SclVT, EltSizeInBits);
43393    Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
43394    Vec = DAG.getBitcast(VT, Vec);
43395
43396    for (unsigned i = 0; i != Scale; ++i)
43397      ShuffleMask.append(EltSizeInBits, i);
43398  } else {
43399    // For smaller scalar integers, we can simply any-extend it to the vector
43400    // element size (we don't care about the upper bits) and broadcast it to all
43401    // elements.
43402    SDValue Scl = DAG.getAnyExtOrTrunc(N00, DL, SVT);
43403    Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
43404    ShuffleMask.append(NumElts, 0);
43405  }
43406  Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
43407
43408  // Now, mask the relevant bit in each element.
43409  SmallVector<SDValue, 32> Bits;
43410  for (unsigned i = 0; i != NumElts; ++i) {
43411    int BitIdx = (i % EltSizeInBits);
43412    APInt Bit = APInt::getBitsSet(EltSizeInBits, BitIdx, BitIdx + 1);
43413    Bits.push_back(DAG.getConstant(Bit, DL, SVT));
43414  }
43415  SDValue BitMask = DAG.getBuildVector(VT, DL, Bits);
43416  Vec = DAG.getNode(ISD::AND, DL, VT, Vec, BitMask);
43417
43418  // Compare against the bitmask and extend the result.
43419  EVT CCVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
43420  Vec = DAG.getSetCC(DL, CCVT, Vec, BitMask, ISD::SETEQ);
43421  Vec = DAG.getSExtOrTrunc(Vec, DL, VT);
43422
43423  // For SEXT, this is now done, otherwise shift the result down for
43424  // zero-extension.
43425  if (Opcode == ISD::SIGN_EXTEND)
43426    return Vec;
43427  return DAG.getNode(ISD::SRL, DL, VT, Vec,
43428                     DAG.getConstant(EltSizeInBits - 1, DL, VT));
43429}
43430
43431// Attempt to combine a (sext/zext (setcc)) to a setcc with a xmm/ymm/zmm
43432// result type.
43433static SDValue combineExtSetcc(SDNode *N, SelectionDAG &DAG,
43434                               const X86Subtarget &Subtarget) {
43435  SDValue N0 = N->getOperand(0);
43436  EVT VT = N->getValueType(0);
43437  SDLoc dl(N);
43438
43439  // Only do this combine with AVX512 for vector extends.
43440  if (!Subtarget.hasAVX512() || !VT.isVector() || N0.getOpcode() != ISD::SETCC)
43441    return SDValue();
43442
43443  // Only combine legal element types.
43444  EVT SVT = VT.getVectorElementType();
43445  if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32 &&
43446      SVT != MVT::i64 && SVT != MVT::f32 && SVT != MVT::f64)
43447    return SDValue();
43448
43449  // We can only do this if the vector size in 256 bits or less.
43450  unsigned Size = VT.getSizeInBits();
43451  if (Size > 256)
43452    return SDValue();
43453
43454  // Don't fold if the condition code can't be handled by PCMPEQ/PCMPGT since
43455  // that's the only integer compares with we have.
43456  ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
43457  if (ISD::isUnsignedIntSetCC(CC))
43458    return SDValue();
43459
43460  // Only do this combine if the extension will be fully consumed by the setcc.
43461  EVT N00VT = N0.getOperand(0).getValueType();
43462  EVT MatchingVecType = N00VT.changeVectorElementTypeToInteger();
43463  if (Size != MatchingVecType.getSizeInBits())
43464    return SDValue();
43465
43466  SDValue Res = DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
43467
43468  if (N->getOpcode() == ISD::ZERO_EXTEND)
43469    Res = DAG.getZeroExtendInReg(Res, dl, N0.getValueType().getScalarType());
43470
43471  return Res;
43472}
43473
43474static SDValue combineSext(SDNode *N, SelectionDAG &DAG,
43475                           TargetLowering::DAGCombinerInfo &DCI,
43476                           const X86Subtarget &Subtarget) {
43477  SDValue N0 = N->getOperand(0);
43478  EVT VT = N->getValueType(0);
43479  EVT InVT = N0.getValueType();
43480  SDLoc DL(N);
43481
43482  if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
43483    return NewCMov;
43484
43485  if (!DCI.isBeforeLegalizeOps())
43486    return SDValue();
43487
43488  if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
43489    return V;
43490
43491  if (InVT == MVT::i1 && N0.getOpcode() == ISD::XOR &&
43492      isAllOnesConstant(N0.getOperand(1)) && N0.hasOneUse()) {
43493    // Invert and sign-extend a boolean is the same as zero-extend and subtract
43494    // 1 because 0 becomes -1 and 1 becomes 0. The subtract is efficiently
43495    // lowered with an LEA or a DEC. This is the same as: select Bool, 0, -1.
43496    // sext (xor Bool, -1) --> sub (zext Bool), 1
43497    SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0));
43498    return DAG.getNode(ISD::SUB, DL, VT, Zext, DAG.getConstant(1, DL, VT));
43499  }
43500
43501  if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
43502    return V;
43503
43504  if (VT.isVector())
43505    if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
43506      return R;
43507
43508  if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
43509    return NewAdd;
43510
43511  return SDValue();
43512}
43513
43514static SDValue combineFMA(SDNode *N, SelectionDAG &DAG,
43515                          TargetLowering::DAGCombinerInfo &DCI,
43516                          const X86Subtarget &Subtarget) {
43517  SDLoc dl(N);
43518  EVT VT = N->getValueType(0);
43519
43520  // Let legalize expand this if it isn't a legal type yet.
43521  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43522  if (!TLI.isTypeLegal(VT))
43523    return SDValue();
43524
43525  EVT ScalarVT = VT.getScalarType();
43526  if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || !Subtarget.hasAnyFMA())
43527    return SDValue();
43528
43529  SDValue A = N->getOperand(0);
43530  SDValue B = N->getOperand(1);
43531  SDValue C = N->getOperand(2);
43532
43533  auto invertIfNegative = [&DAG, &TLI, &DCI](SDValue &V) {
43534    bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
43535    bool LegalOperations = !DCI.isBeforeLegalizeOps();
43536    if (TLI.isNegatibleForFree(V, DAG, LegalOperations, CodeSize) == 2) {
43537      V = TLI.getNegatedExpression(V, DAG, LegalOperations, CodeSize);
43538      return true;
43539    }
43540    // Look through extract_vector_elts. If it comes from an FNEG, create a
43541    // new extract from the FNEG input.
43542    if (V.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
43543        isNullConstant(V.getOperand(1))) {
43544      SDValue Vec = V.getOperand(0);
43545      if (TLI.isNegatibleForFree(Vec, DAG, LegalOperations, CodeSize) == 2) {
43546        SDValue NegVal =
43547            TLI.getNegatedExpression(Vec, DAG, LegalOperations, CodeSize);
43548        V = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), V.getValueType(),
43549                        NegVal, V.getOperand(1));
43550        return true;
43551      }
43552    }
43553
43554    return false;
43555  };
43556
43557  // Do not convert the passthru input of scalar intrinsics.
43558  // FIXME: We could allow negations of the lower element only.
43559  bool NegA = invertIfNegative(A);
43560  bool NegB = invertIfNegative(B);
43561  bool NegC = invertIfNegative(C);
43562
43563  if (!NegA && !NegB && !NegC)
43564    return SDValue();
43565
43566  unsigned NewOpcode =
43567      negateFMAOpcode(N->getOpcode(), NegA != NegB, NegC, false);
43568
43569  if (N->getNumOperands() == 4)
43570    return DAG.getNode(NewOpcode, dl, VT, A, B, C, N->getOperand(3));
43571  return DAG.getNode(NewOpcode, dl, VT, A, B, C);
43572}
43573
43574// Combine FMADDSUB(A, B, FNEG(C)) -> FMSUBADD(A, B, C)
43575// Combine FMSUBADD(A, B, FNEG(C)) -> FMADDSUB(A, B, C)
43576static SDValue combineFMADDSUB(SDNode *N, SelectionDAG &DAG,
43577                               TargetLowering::DAGCombinerInfo &DCI) {
43578  SDLoc dl(N);
43579  EVT VT = N->getValueType(0);
43580  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43581  bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
43582  bool LegalOperations = !DCI.isBeforeLegalizeOps();
43583
43584  SDValue N2 = N->getOperand(2);
43585  if (TLI.isNegatibleForFree(N2, DAG, LegalOperations, CodeSize) != 2)
43586    return SDValue();
43587
43588  SDValue NegN2 = TLI.getNegatedExpression(N2, DAG, LegalOperations, CodeSize);
43589  unsigned NewOpcode = negateFMAOpcode(N->getOpcode(), false, true, false);
43590
43591  if (N->getNumOperands() == 4)
43592    return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
43593                       NegN2, N->getOperand(3));
43594  return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
43595                     NegN2);
43596}
43597
43598static SDValue combineZext(SDNode *N, SelectionDAG &DAG,
43599                           TargetLowering::DAGCombinerInfo &DCI,
43600                           const X86Subtarget &Subtarget) {
43601  // (i32 zext (and (i8  x86isd::setcc_carry), 1)) ->
43602  //           (and (i32 x86isd::setcc_carry), 1)
43603  // This eliminates the zext. This transformation is necessary because
43604  // ISD::SETCC is always legalized to i8.
43605  SDLoc dl(N);
43606  SDValue N0 = N->getOperand(0);
43607  EVT VT = N->getValueType(0);
43608
43609  if (N0.getOpcode() == ISD::AND &&
43610      N0.hasOneUse() &&
43611      N0.getOperand(0).hasOneUse()) {
43612    SDValue N00 = N0.getOperand(0);
43613    if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
43614      if (!isOneConstant(N0.getOperand(1)))
43615        return SDValue();
43616      return DAG.getNode(ISD::AND, dl, VT,
43617                         DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
43618                                     N00.getOperand(0), N00.getOperand(1)),
43619                         DAG.getConstant(1, dl, VT));
43620    }
43621  }
43622
43623  if (N0.getOpcode() == ISD::TRUNCATE &&
43624      N0.hasOneUse() &&
43625      N0.getOperand(0).hasOneUse()) {
43626    SDValue N00 = N0.getOperand(0);
43627    if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
43628      return DAG.getNode(ISD::AND, dl, VT,
43629                         DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
43630                                     N00.getOperand(0), N00.getOperand(1)),
43631                         DAG.getConstant(1, dl, VT));
43632    }
43633  }
43634
43635  if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
43636    return NewCMov;
43637
43638  if (DCI.isBeforeLegalizeOps())
43639    if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
43640      return V;
43641
43642  if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
43643    return V;
43644
43645  if (VT.isVector())
43646    if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
43647      return R;
43648
43649  if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
43650    return NewAdd;
43651
43652  if (SDValue R = combineOrCmpEqZeroToCtlzSrl(N, DAG, DCI, Subtarget))
43653    return R;
43654
43655  // TODO: Combine with any target/faux shuffle.
43656  if (N0.getOpcode() == X86ISD::PACKUS && N0.getValueSizeInBits() == 128 &&
43657      VT.getScalarSizeInBits() == N0.getOperand(0).getScalarValueSizeInBits()) {
43658    SDValue N00 = N0.getOperand(0);
43659    SDValue N01 = N0.getOperand(1);
43660    unsigned NumSrcEltBits = N00.getScalarValueSizeInBits();
43661    APInt ZeroMask = APInt::getHighBitsSet(NumSrcEltBits, NumSrcEltBits / 2);
43662    if ((N00.isUndef() || DAG.MaskedValueIsZero(N00, ZeroMask)) &&
43663        (N01.isUndef() || DAG.MaskedValueIsZero(N01, ZeroMask))) {
43664      return concatSubVectors(N00, N01, DAG, dl);
43665    }
43666  }
43667
43668  return SDValue();
43669}
43670
43671/// Recursive helper for combineVectorSizedSetCCEquality() to see if we have a
43672/// recognizable memcmp expansion.
43673static bool isOrXorXorTree(SDValue X, bool Root = true) {
43674  if (X.getOpcode() == ISD::OR)
43675    return isOrXorXorTree(X.getOperand(0), false) &&
43676           isOrXorXorTree(X.getOperand(1), false);
43677  if (Root)
43678    return false;
43679  return X.getOpcode() == ISD::XOR;
43680}
43681
43682/// Recursive helper for combineVectorSizedSetCCEquality() to emit the memcmp
43683/// expansion.
43684template<typename F>
43685static SDValue emitOrXorXorTree(SDValue X, SDLoc &DL, SelectionDAG &DAG,
43686                                EVT VecVT, EVT CmpVT, bool HasPT, F SToV) {
43687  SDValue Op0 = X.getOperand(0);
43688  SDValue Op1 = X.getOperand(1);
43689  if (X.getOpcode() == ISD::OR) {
43690    SDValue A = emitOrXorXorTree(Op0, DL, DAG, VecVT, CmpVT, HasPT, SToV);
43691    SDValue B = emitOrXorXorTree(Op1, DL, DAG, VecVT, CmpVT, HasPT, SToV);
43692    if (VecVT != CmpVT)
43693      return DAG.getNode(ISD::OR, DL, CmpVT, A, B);
43694    if (HasPT)
43695      return DAG.getNode(ISD::OR, DL, VecVT, A, B);
43696    return DAG.getNode(ISD::AND, DL, CmpVT, A, B);
43697  } else if (X.getOpcode() == ISD::XOR) {
43698    SDValue A = SToV(Op0);
43699    SDValue B = SToV(Op1);
43700    if (VecVT != CmpVT)
43701      return DAG.getSetCC(DL, CmpVT, A, B, ISD::SETNE);
43702    if (HasPT)
43703      return DAG.getNode(ISD::XOR, DL, VecVT, A, B);
43704    return DAG.getSetCC(DL, CmpVT, A, B, ISD::SETEQ);
43705  }
43706  llvm_unreachable("Impossible");
43707}
43708
43709/// Try to map a 128-bit or larger integer comparison to vector instructions
43710/// before type legalization splits it up into chunks.
43711static SDValue combineVectorSizedSetCCEquality(SDNode *SetCC, SelectionDAG &DAG,
43712                                               const X86Subtarget &Subtarget) {
43713  ISD::CondCode CC = cast<CondCodeSDNode>(SetCC->getOperand(2))->get();
43714  assert((CC == ISD::SETNE || CC == ISD::SETEQ) && "Bad comparison predicate");
43715
43716  // We're looking for an oversized integer equality comparison.
43717  SDValue X = SetCC->getOperand(0);
43718  SDValue Y = SetCC->getOperand(1);
43719  EVT OpVT = X.getValueType();
43720  unsigned OpSize = OpVT.getSizeInBits();
43721  if (!OpVT.isScalarInteger() || OpSize < 128)
43722    return SDValue();
43723
43724  // Ignore a comparison with zero because that gets special treatment in
43725  // EmitTest(). But make an exception for the special case of a pair of
43726  // logically-combined vector-sized operands compared to zero. This pattern may
43727  // be generated by the memcmp expansion pass with oversized integer compares
43728  // (see PR33325).
43729  bool IsOrXorXorTreeCCZero = isNullConstant(Y) && isOrXorXorTree(X);
43730  if (isNullConstant(Y) && !IsOrXorXorTreeCCZero)
43731    return SDValue();
43732
43733  // Don't perform this combine if constructing the vector will be expensive.
43734  auto IsVectorBitCastCheap = [](SDValue X) {
43735    X = peekThroughBitcasts(X);
43736    return isa<ConstantSDNode>(X) || X.getValueType().isVector() ||
43737           X.getOpcode() == ISD::LOAD;
43738  };
43739  if ((!IsVectorBitCastCheap(X) || !IsVectorBitCastCheap(Y)) &&
43740      !IsOrXorXorTreeCCZero)
43741    return SDValue();
43742
43743  EVT VT = SetCC->getValueType(0);
43744  SDLoc DL(SetCC);
43745  bool HasAVX = Subtarget.hasAVX();
43746
43747  // Use XOR (plus OR) and PTEST after SSE4.1 for 128/256-bit operands.
43748  // Use PCMPNEQ (plus OR) and KORTEST for 512-bit operands.
43749  // Otherwise use PCMPEQ (plus AND) and mask testing.
43750  if ((OpSize == 128 && Subtarget.hasSSE2()) ||
43751      (OpSize == 256 && HasAVX) ||
43752      (OpSize == 512 && Subtarget.useAVX512Regs())) {
43753    bool HasPT = Subtarget.hasSSE41();
43754
43755    // PTEST and MOVMSK are slow on Knights Landing and Knights Mill and widened
43756    // vector registers are essentially free. (Technically, widening registers
43757    // prevents load folding, but the tradeoff is worth it.)
43758    bool PreferKOT = Subtarget.preferMaskRegisters();
43759    bool NeedZExt = PreferKOT && !Subtarget.hasVLX() && OpSize != 512;
43760
43761    EVT VecVT = MVT::v16i8;
43762    EVT CmpVT = PreferKOT ? MVT::v16i1 : VecVT;
43763    if (OpSize == 256) {
43764      VecVT = MVT::v32i8;
43765      CmpVT = PreferKOT ? MVT::v32i1 : VecVT;
43766    }
43767    EVT CastVT = VecVT;
43768    bool NeedsAVX512FCast = false;
43769    if (OpSize == 512 || NeedZExt) {
43770      if (Subtarget.hasBWI()) {
43771        VecVT = MVT::v64i8;
43772        CmpVT = MVT::v64i1;
43773        if (OpSize == 512)
43774          CastVT = VecVT;
43775      } else {
43776        VecVT = MVT::v16i32;
43777        CmpVT = MVT::v16i1;
43778        CastVT = OpSize == 512 ? VecVT :
43779                 OpSize == 256 ? MVT::v8i32 : MVT::v4i32;
43780        NeedsAVX512FCast = true;
43781      }
43782    }
43783
43784    auto ScalarToVector = [&](SDValue X) -> SDValue {
43785      bool TmpZext = false;
43786      EVT TmpCastVT = CastVT;
43787      if (X.getOpcode() == ISD::ZERO_EXTEND) {
43788        SDValue OrigX = X.getOperand(0);
43789        unsigned OrigSize = OrigX.getScalarValueSizeInBits();
43790        if (OrigSize < OpSize) {
43791          if (OrigSize == 128) {
43792            TmpCastVT = NeedsAVX512FCast ? MVT::v4i32 : MVT::v16i8;
43793            X = OrigX;
43794            TmpZext = true;
43795          } else if (OrigSize == 256) {
43796            TmpCastVT = NeedsAVX512FCast ? MVT::v8i32 : MVT::v32i8;
43797            X = OrigX;
43798            TmpZext = true;
43799          }
43800        }
43801      }
43802      X = DAG.getBitcast(TmpCastVT, X);
43803      if (!NeedZExt && !TmpZext)
43804        return X;
43805      const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43806      MVT VecIdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
43807      return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT,
43808                         DAG.getConstant(0, DL, VecVT), X,
43809                         DAG.getConstant(0, DL, VecIdxVT));
43810    };
43811
43812    SDValue Cmp;
43813    if (IsOrXorXorTreeCCZero) {
43814      // This is a bitwise-combined equality comparison of 2 pairs of vectors:
43815      // setcc i128 (or (xor A, B), (xor C, D)), 0, eq|ne
43816      // Use 2 vector equality compares and 'and' the results before doing a
43817      // MOVMSK.
43818      Cmp = emitOrXorXorTree(X, DL, DAG, VecVT, CmpVT, HasPT, ScalarToVector);
43819    } else {
43820      SDValue VecX = ScalarToVector(X);
43821      SDValue VecY = ScalarToVector(Y);
43822      if (VecVT != CmpVT) {
43823        Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETNE);
43824      } else if (HasPT) {
43825        Cmp = DAG.getNode(ISD::XOR, DL, VecVT, VecX, VecY);
43826      } else {
43827        Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETEQ);
43828      }
43829    }
43830    // AVX512 should emit a setcc that will lower to kortest.
43831    if (VecVT != CmpVT) {
43832      EVT KRegVT = CmpVT == MVT::v64i1 ? MVT::i64 :
43833                   CmpVT == MVT::v32i1 ? MVT::i32 : MVT::i16;
43834      return DAG.getSetCC(DL, VT, DAG.getBitcast(KRegVT, Cmp),
43835                          DAG.getConstant(0, DL, KRegVT), CC);
43836    }
43837    if (HasPT) {
43838      SDValue BCCmp = DAG.getBitcast(OpSize == 256 ? MVT::v4i64 : MVT::v2i64,
43839                                     Cmp);
43840      SDValue PT = DAG.getNode(X86ISD::PTEST, DL, MVT::i32, BCCmp, BCCmp);
43841      X86::CondCode X86CC = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
43842      SDValue SetCC = getSETCC(X86CC, PT, DL, DAG);
43843      return DAG.getNode(ISD::TRUNCATE, DL, VT, SetCC.getValue(0));
43844    }
43845    // If all bytes match (bitmask is 0x(FFFF)FFFF), that's equality.
43846    // setcc i128 X, Y, eq --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, eq
43847    // setcc i128 X, Y, ne --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, ne
43848    // setcc i256 X, Y, eq --> setcc (vpmovmskb (vpcmpeqb X, Y)), 0xFFFFFFFF, eq
43849    // setcc i256 X, Y, ne --> setcc (vpmovmskb (vpcmpeqb X, Y)), 0xFFFFFFFF, ne
43850    SDValue MovMsk = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Cmp);
43851    SDValue FFFFs = DAG.getConstant(OpSize == 128 ? 0xFFFF : 0xFFFFFFFF, DL,
43852                                    MVT::i32);
43853    return DAG.getSetCC(DL, VT, MovMsk, FFFFs, CC);
43854  }
43855
43856  return SDValue();
43857}
43858
43859static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
43860                            const X86Subtarget &Subtarget) {
43861  const ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
43862  const SDValue LHS = N->getOperand(0);
43863  const SDValue RHS = N->getOperand(1);
43864  EVT VT = N->getValueType(0);
43865  EVT OpVT = LHS.getValueType();
43866  SDLoc DL(N);
43867
43868  if (CC == ISD::SETNE || CC == ISD::SETEQ) {
43869    // 0-x == y --> x+y == 0
43870    // 0-x != y --> x+y != 0
43871    if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
43872        LHS.hasOneUse()) {
43873      SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, RHS, LHS.getOperand(1));
43874      return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
43875    }
43876    // x == 0-y --> x+y == 0
43877    // x != 0-y --> x+y != 0
43878    if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) &&
43879        RHS.hasOneUse()) {
43880      SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
43881      return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
43882    }
43883
43884    if (SDValue V = combineVectorSizedSetCCEquality(N, DAG, Subtarget))
43885      return V;
43886  }
43887
43888  if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
43889      (CC == ISD::SETNE || CC == ISD::SETEQ || ISD::isSignedIntSetCC(CC))) {
43890    // Using temporaries to avoid messing up operand ordering for later
43891    // transformations if this doesn't work.
43892    SDValue Op0 = LHS;
43893    SDValue Op1 = RHS;
43894    ISD::CondCode TmpCC = CC;
43895    // Put build_vector on the right.
43896    if (Op0.getOpcode() == ISD::BUILD_VECTOR) {
43897      std::swap(Op0, Op1);
43898      TmpCC = ISD::getSetCCSwappedOperands(TmpCC);
43899    }
43900
43901    bool IsSEXT0 =
43902        (Op0.getOpcode() == ISD::SIGN_EXTEND) &&
43903        (Op0.getOperand(0).getValueType().getVectorElementType() == MVT::i1);
43904    bool IsVZero1 = ISD::isBuildVectorAllZeros(Op1.getNode());
43905
43906    if (IsSEXT0 && IsVZero1) {
43907      assert(VT == Op0.getOperand(0).getValueType() &&
43908             "Uexpected operand type");
43909      if (TmpCC == ISD::SETGT)
43910        return DAG.getConstant(0, DL, VT);
43911      if (TmpCC == ISD::SETLE)
43912        return DAG.getConstant(1, DL, VT);
43913      if (TmpCC == ISD::SETEQ || TmpCC == ISD::SETGE)
43914        return DAG.getNOT(DL, Op0.getOperand(0), VT);
43915
43916      assert((TmpCC == ISD::SETNE || TmpCC == ISD::SETLT) &&
43917             "Unexpected condition code!");
43918      return Op0.getOperand(0);
43919    }
43920  }
43921
43922  // If we have AVX512, but not BWI and this is a vXi16/vXi8 setcc, just
43923  // pre-promote its result type since vXi1 vectors don't get promoted
43924  // during type legalization.
43925  // NOTE: The element count check is to ignore operand types that need to
43926  // go through type promotion to a 128-bit vector.
43927  if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && VT.isVector() &&
43928      VT.getVectorElementType() == MVT::i1 &&
43929      (OpVT.getVectorElementType() == MVT::i8 ||
43930       OpVT.getVectorElementType() == MVT::i16)) {
43931    SDValue Setcc = DAG.getSetCC(DL, OpVT, LHS, RHS, CC);
43932    return DAG.getNode(ISD::TRUNCATE, DL, VT, Setcc);
43933  }
43934
43935  // For an SSE1-only target, lower a comparison of v4f32 to X86ISD::CMPP early
43936  // to avoid scalarization via legalization because v4i32 is not a legal type.
43937  if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32 &&
43938      LHS.getValueType() == MVT::v4f32)
43939    return LowerVSETCC(SDValue(N, 0), Subtarget, DAG);
43940
43941  return SDValue();
43942}
43943
43944static SDValue combineMOVMSK(SDNode *N, SelectionDAG &DAG,
43945                             TargetLowering::DAGCombinerInfo &DCI,
43946                             const X86Subtarget &Subtarget) {
43947  SDValue Src = N->getOperand(0);
43948  MVT SrcVT = Src.getSimpleValueType();
43949  MVT VT = N->getSimpleValueType(0);
43950  unsigned NumBits = VT.getScalarSizeInBits();
43951  unsigned NumElts = SrcVT.getVectorNumElements();
43952
43953  // Perform constant folding.
43954  if (ISD::isBuildVectorOfConstantSDNodes(Src.getNode())) {
43955    assert(VT == MVT::i32 && "Unexpected result type");
43956    APInt Imm(32, 0);
43957    for (unsigned Idx = 0, e = Src.getNumOperands(); Idx < e; ++Idx) {
43958      if (!Src.getOperand(Idx).isUndef() &&
43959          Src.getConstantOperandAPInt(Idx).isNegative())
43960        Imm.setBit(Idx);
43961    }
43962    return DAG.getConstant(Imm, SDLoc(N), VT);
43963  }
43964
43965  // Look through int->fp bitcasts that don't change the element width.
43966  unsigned EltWidth = SrcVT.getScalarSizeInBits();
43967  if (Subtarget.hasSSE2() && Src.getOpcode() == ISD::BITCAST &&
43968      Src.getOperand(0).getScalarValueSizeInBits() == EltWidth)
43969    return DAG.getNode(X86ISD::MOVMSK, SDLoc(N), VT, Src.getOperand(0));
43970
43971  // Fold movmsk(not(x)) -> not(movmsk) to improve folding of movmsk results
43972  // with scalar comparisons.
43973  if (SDValue NotSrc = IsNOT(Src, DAG)) {
43974    SDLoc DL(N);
43975    APInt NotMask = APInt::getLowBitsSet(NumBits, NumElts);
43976    NotSrc = DAG.getBitcast(SrcVT, NotSrc);
43977    return DAG.getNode(ISD::XOR, DL, VT,
43978                       DAG.getNode(X86ISD::MOVMSK, DL, VT, NotSrc),
43979                       DAG.getConstant(NotMask, DL, VT));
43980  }
43981
43982  // Simplify the inputs.
43983  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43984  APInt DemandedMask(APInt::getAllOnesValue(NumBits));
43985  if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
43986    return SDValue(N, 0);
43987
43988  return SDValue();
43989}
43990
43991static SDValue combineX86GatherScatter(SDNode *N, SelectionDAG &DAG,
43992                                       TargetLowering::DAGCombinerInfo &DCI) {
43993  // With vector masks we only demand the upper bit of the mask.
43994  SDValue Mask = cast<X86MaskedGatherScatterSDNode>(N)->getMask();
43995  if (Mask.getScalarValueSizeInBits() != 1) {
43996    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43997    APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
43998    if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI))
43999      return SDValue(N, 0);
44000  }
44001
44002  return SDValue();
44003}
44004
44005static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
44006                                    TargetLowering::DAGCombinerInfo &DCI) {
44007  SDLoc DL(N);
44008  auto *GorS = cast<MaskedGatherScatterSDNode>(N);
44009  SDValue Chain = GorS->getChain();
44010  SDValue Index = GorS->getIndex();
44011  SDValue Mask = GorS->getMask();
44012  SDValue Base = GorS->getBasePtr();
44013  SDValue Scale = GorS->getScale();
44014
44015  if (DCI.isBeforeLegalize()) {
44016    unsigned IndexWidth = Index.getScalarValueSizeInBits();
44017
44018    // Shrink constant indices if they are larger than 32-bits.
44019    // Only do this before legalize types since v2i64 could become v2i32.
44020    // FIXME: We could check that the type is legal if we're after legalize
44021    // types, but then we would need to construct test cases where that happens.
44022    // FIXME: We could support more than just constant vectors, but we need to
44023    // careful with costing. A truncate that can be optimized out would be fine.
44024    // Otherwise we might only want to create a truncate if it avoids a split.
44025    if (auto *BV = dyn_cast<BuildVectorSDNode>(Index)) {
44026      if (BV->isConstant() && IndexWidth > 32 &&
44027          DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
44028        unsigned NumElts = Index.getValueType().getVectorNumElements();
44029        EVT NewVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
44030        Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
44031        if (auto *Gather = dyn_cast<MaskedGatherSDNode>(GorS)) {
44032          SDValue Ops[] = { Chain, Gather->getPassThru(),
44033                            Mask, Base, Index, Scale } ;
44034          return DAG.getMaskedGather(Gather->getVTList(),
44035                                     Gather->getMemoryVT(), DL, Ops,
44036                                     Gather->getMemOperand(),
44037                                     Gather->getIndexType());
44038        }
44039        auto *Scatter = cast<MaskedScatterSDNode>(GorS);
44040        SDValue Ops[] = { Chain, Scatter->getValue(),
44041                          Mask, Base, Index, Scale };
44042        return DAG.getMaskedScatter(Scatter->getVTList(),
44043                                    Scatter->getMemoryVT(), DL,
44044                                    Ops, Scatter->getMemOperand(),
44045                                    Scatter->getIndexType());
44046      }
44047    }
44048
44049    // Shrink any sign/zero extends from 32 or smaller to larger than 32 if
44050    // there are sufficient sign bits. Only do this before legalize types to
44051    // avoid creating illegal types in truncate.
44052    if ((Index.getOpcode() == ISD::SIGN_EXTEND ||
44053         Index.getOpcode() == ISD::ZERO_EXTEND) &&
44054        IndexWidth > 32 &&
44055        Index.getOperand(0).getScalarValueSizeInBits() <= 32 &&
44056        DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
44057      unsigned NumElts = Index.getValueType().getVectorNumElements();
44058      EVT NewVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
44059      Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
44060      if (auto *Gather = dyn_cast<MaskedGatherSDNode>(GorS)) {
44061        SDValue Ops[] = { Chain, Gather->getPassThru(),
44062                          Mask, Base, Index, Scale } ;
44063        return DAG.getMaskedGather(Gather->getVTList(),
44064                                   Gather->getMemoryVT(), DL, Ops,
44065                                   Gather->getMemOperand(),
44066                                   Gather->getIndexType());
44067      }
44068      auto *Scatter = cast<MaskedScatterSDNode>(GorS);
44069      SDValue Ops[] = { Chain, Scatter->getValue(),
44070                        Mask, Base, Index, Scale };
44071      return DAG.getMaskedScatter(Scatter->getVTList(),
44072                                  Scatter->getMemoryVT(), DL,
44073                                  Ops, Scatter->getMemOperand(),
44074                                  Scatter->getIndexType());
44075    }
44076  }
44077
44078  if (DCI.isBeforeLegalizeOps()) {
44079    unsigned IndexWidth = Index.getScalarValueSizeInBits();
44080
44081    // Make sure the index is either i32 or i64
44082    if (IndexWidth != 32 && IndexWidth != 64) {
44083      MVT EltVT = IndexWidth > 32 ? MVT::i64 : MVT::i32;
44084      EVT IndexVT = EVT::getVectorVT(*DAG.getContext(), EltVT,
44085                                   Index.getValueType().getVectorNumElements());
44086      Index = DAG.getSExtOrTrunc(Index, DL, IndexVT);
44087      if (auto *Gather = dyn_cast<MaskedGatherSDNode>(GorS)) {
44088        SDValue Ops[] = { Chain, Gather->getPassThru(),
44089                          Mask, Base, Index, Scale } ;
44090        return DAG.getMaskedGather(Gather->getVTList(),
44091                                   Gather->getMemoryVT(), DL, Ops,
44092                                   Gather->getMemOperand(),
44093                                   Gather->getIndexType());
44094      }
44095      auto *Scatter = cast<MaskedScatterSDNode>(GorS);
44096      SDValue Ops[] = { Chain, Scatter->getValue(),
44097                        Mask, Base, Index, Scale };
44098      return DAG.getMaskedScatter(Scatter->getVTList(),
44099                                  Scatter->getMemoryVT(), DL,
44100                                  Ops, Scatter->getMemOperand(),
44101                                  Scatter->getIndexType());
44102    }
44103  }
44104
44105  // With vector masks we only demand the upper bit of the mask.
44106  if (Mask.getScalarValueSizeInBits() != 1) {
44107    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44108    APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
44109    if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI))
44110      return SDValue(N, 0);
44111  }
44112
44113  return SDValue();
44114}
44115
44116// Optimize  RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
44117static SDValue combineX86SetCC(SDNode *N, SelectionDAG &DAG,
44118                               const X86Subtarget &Subtarget) {
44119  SDLoc DL(N);
44120  X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
44121  SDValue EFLAGS = N->getOperand(1);
44122
44123  // Try to simplify the EFLAGS and condition code operands.
44124  if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget))
44125    return getSETCC(CC, Flags, DL, DAG);
44126
44127  return SDValue();
44128}
44129
44130/// Optimize branch condition evaluation.
44131static SDValue combineBrCond(SDNode *N, SelectionDAG &DAG,
44132                             const X86Subtarget &Subtarget) {
44133  SDLoc DL(N);
44134  SDValue EFLAGS = N->getOperand(3);
44135  X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
44136
44137  // Try to simplify the EFLAGS and condition code operands.
44138  // Make sure to not keep references to operands, as combineSetCCEFLAGS can
44139  // RAUW them under us.
44140  if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget)) {
44141    SDValue Cond = DAG.getTargetConstant(CC, DL, MVT::i8);
44142    return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), N->getOperand(0),
44143                       N->getOperand(1), Cond, Flags);
44144  }
44145
44146  return SDValue();
44147}
44148
44149static SDValue combineVectorCompareAndMaskUnaryOp(SDNode *N,
44150                                                  SelectionDAG &DAG) {
44151  // Take advantage of vector comparisons producing 0 or -1 in each lane to
44152  // optimize away operation when it's from a constant.
44153  //
44154  // The general transformation is:
44155  //    UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
44156  //       AND(VECTOR_CMP(x,y), constant2)
44157  //    constant2 = UNARYOP(constant)
44158
44159  // Early exit if this isn't a vector operation, the operand of the
44160  // unary operation isn't a bitwise AND, or if the sizes of the operations
44161  // aren't the same.
44162  EVT VT = N->getValueType(0);
44163  bool IsStrict = N->isStrictFPOpcode();
44164  SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
44165  if (!VT.isVector() || Op0->getOpcode() != ISD::AND ||
44166      Op0->getOperand(0)->getOpcode() != ISD::SETCC ||
44167      VT.getSizeInBits() != Op0.getValueSizeInBits())
44168    return SDValue();
44169
44170  // Now check that the other operand of the AND is a constant. We could
44171  // make the transformation for non-constant splats as well, but it's unclear
44172  // that would be a benefit as it would not eliminate any operations, just
44173  // perform one more step in scalar code before moving to the vector unit.
44174  if (auto *BV = dyn_cast<BuildVectorSDNode>(Op0.getOperand(1))) {
44175    // Bail out if the vector isn't a constant.
44176    if (!BV->isConstant())
44177      return SDValue();
44178
44179    // Everything checks out. Build up the new and improved node.
44180    SDLoc DL(N);
44181    EVT IntVT = BV->getValueType(0);
44182    // Create a new constant of the appropriate type for the transformed
44183    // DAG.
44184    SDValue SourceConst;
44185    if (IsStrict)
44186      SourceConst = DAG.getNode(N->getOpcode(), DL, {VT, MVT::Other},
44187                                {N->getOperand(0), SDValue(BV, 0)});
44188    else
44189      SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
44190    // The AND node needs bitcasts to/from an integer vector type around it.
44191    SDValue MaskConst = DAG.getBitcast(IntVT, SourceConst);
44192    SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT, Op0->getOperand(0),
44193                                 MaskConst);
44194    SDValue Res = DAG.getBitcast(VT, NewAnd);
44195    if (IsStrict)
44196      return DAG.getMergeValues({Res, SourceConst.getValue(1)}, DL);
44197    return Res;
44198  }
44199
44200  return SDValue();
44201}
44202
44203/// If we are converting a value to floating-point, try to replace scalar
44204/// truncate of an extracted vector element with a bitcast. This tries to keep
44205/// the sequence on XMM registers rather than moving between vector and GPRs.
44206static SDValue combineToFPTruncExtElt(SDNode *N, SelectionDAG &DAG) {
44207  // TODO: This is currently only used by combineSIntToFP, but it is generalized
44208  //       to allow being called by any similar cast opcode.
44209  // TODO: Consider merging this into lowering: vectorizeExtractedCast().
44210  SDValue Trunc = N->getOperand(0);
44211  if (!Trunc.hasOneUse() || Trunc.getOpcode() != ISD::TRUNCATE)
44212    return SDValue();
44213
44214  SDValue ExtElt = Trunc.getOperand(0);
44215  if (!ExtElt.hasOneUse() || ExtElt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
44216      !isNullConstant(ExtElt.getOperand(1)))
44217    return SDValue();
44218
44219  EVT TruncVT = Trunc.getValueType();
44220  EVT SrcVT = ExtElt.getValueType();
44221  unsigned DestWidth = TruncVT.getSizeInBits();
44222  unsigned SrcWidth = SrcVT.getSizeInBits();
44223  if (SrcWidth % DestWidth != 0)
44224    return SDValue();
44225
44226  // inttofp (trunc (extelt X, 0)) --> inttofp (extelt (bitcast X), 0)
44227  EVT SrcVecVT = ExtElt.getOperand(0).getValueType();
44228  unsigned VecWidth = SrcVecVT.getSizeInBits();
44229  unsigned NumElts = VecWidth / DestWidth;
44230  EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), TruncVT, NumElts);
44231  SDValue BitcastVec = DAG.getBitcast(BitcastVT, ExtElt.getOperand(0));
44232  SDLoc DL(N);
44233  SDValue NewExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, TruncVT,
44234                                  BitcastVec, ExtElt.getOperand(1));
44235  return DAG.getNode(N->getOpcode(), DL, N->getValueType(0), NewExtElt);
44236}
44237
44238static SDValue combineUIntToFP(SDNode *N, SelectionDAG &DAG,
44239                               const X86Subtarget &Subtarget) {
44240  bool IsStrict = N->isStrictFPOpcode();
44241  SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
44242  EVT VT = N->getValueType(0);
44243  EVT InVT = Op0.getValueType();
44244
44245  // UINT_TO_FP(vXi1) -> SINT_TO_FP(ZEXT(vXi1 to vXi32))
44246  // UINT_TO_FP(vXi8) -> SINT_TO_FP(ZEXT(vXi8 to vXi32))
44247  // UINT_TO_FP(vXi16) -> SINT_TO_FP(ZEXT(vXi16 to vXi32))
44248  if (InVT.isVector() && InVT.getScalarSizeInBits() < 32) {
44249    SDLoc dl(N);
44250    EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
44251                                 InVT.getVectorNumElements());
44252    SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
44253
44254    // UINT_TO_FP isn't legal without AVX512 so use SINT_TO_FP.
44255    if (IsStrict)
44256      return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
44257                         {N->getOperand(0), P});
44258    return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
44259  }
44260
44261  // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
44262  // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
44263  // the optimization here.
44264  if (DAG.SignBitIsZero(Op0)) {
44265    if (IsStrict)
44266      return DAG.getNode(ISD::STRICT_SINT_TO_FP, SDLoc(N), {VT, MVT::Other},
44267                         {N->getOperand(0), Op0});
44268    return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, Op0);
44269  }
44270
44271  return SDValue();
44272}
44273
44274static SDValue combineSIntToFP(SDNode *N, SelectionDAG &DAG,
44275                               TargetLowering::DAGCombinerInfo &DCI,
44276                               const X86Subtarget &Subtarget) {
44277  // First try to optimize away the conversion entirely when it's
44278  // conditionally from a constant. Vectors only.
44279  bool IsStrict = N->isStrictFPOpcode();
44280  if (SDValue Res = combineVectorCompareAndMaskUnaryOp(N, DAG))
44281    return Res;
44282
44283  // Now move on to more general possibilities.
44284  SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
44285  EVT VT = N->getValueType(0);
44286  EVT InVT = Op0.getValueType();
44287
44288  // SINT_TO_FP(vXi1) -> SINT_TO_FP(SEXT(vXi1 to vXi32))
44289  // SINT_TO_FP(vXi8) -> SINT_TO_FP(SEXT(vXi8 to vXi32))
44290  // SINT_TO_FP(vXi16) -> SINT_TO_FP(SEXT(vXi16 to vXi32))
44291  if (InVT.isVector() && InVT.getScalarSizeInBits() < 32) {
44292    SDLoc dl(N);
44293    EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
44294                                 InVT.getVectorNumElements());
44295    SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
44296    if (IsStrict)
44297      return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
44298                         {N->getOperand(0), P});
44299    return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
44300  }
44301
44302  // Without AVX512DQ we only support i64 to float scalar conversion. For both
44303  // vectors and scalars, see if we know that the upper bits are all the sign
44304  // bit, in which case we can truncate the input to i32 and convert from that.
44305  if (InVT.getScalarSizeInBits() > 32 && !Subtarget.hasDQI()) {
44306    unsigned BitWidth = InVT.getScalarSizeInBits();
44307    unsigned NumSignBits = DAG.ComputeNumSignBits(Op0);
44308    if (NumSignBits >= (BitWidth - 31)) {
44309      EVT TruncVT = MVT::i32;
44310      if (InVT.isVector())
44311        TruncVT = EVT::getVectorVT(*DAG.getContext(), TruncVT,
44312                                   InVT.getVectorNumElements());
44313      SDLoc dl(N);
44314      if (DCI.isBeforeLegalize() || TruncVT != MVT::v2i32) {
44315        SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Op0);
44316        if (IsStrict)
44317          return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
44318                             {N->getOperand(0), Trunc});
44319        return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Trunc);
44320      }
44321      // If we're after legalize and the type is v2i32 we need to shuffle and
44322      // use CVTSI2P.
44323      assert(InVT == MVT::v2i64 && "Unexpected VT!");
44324      SDValue Cast = DAG.getBitcast(MVT::v4i32, Op0);
44325      SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Cast, Cast,
44326                                          { 0, 2, -1, -1 });
44327      if (IsStrict)
44328        return DAG.getNode(X86ISD::STRICT_CVTSI2P, dl, {VT, MVT::Other},
44329                           {N->getOperand(0), Shuf});
44330      return DAG.getNode(X86ISD::CVTSI2P, dl, VT, Shuf);
44331    }
44332  }
44333
44334  // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
44335  // a 32-bit target where SSE doesn't support i64->FP operations.
44336  if (!Subtarget.useSoftFloat() && Subtarget.hasX87() &&
44337      Op0.getOpcode() == ISD::LOAD) {
44338    LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
44339    EVT LdVT = Ld->getValueType(0);
44340
44341    // This transformation is not supported if the result type is f16 or f128.
44342    if (VT == MVT::f16 || VT == MVT::f128)
44343      return SDValue();
44344
44345    // If we have AVX512DQ we can use packed conversion instructions unless
44346    // the VT is f80.
44347    if (Subtarget.hasDQI() && VT != MVT::f80)
44348      return SDValue();
44349
44350    if (Ld->isSimple() && !VT.isVector() &&
44351        ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
44352        !Subtarget.is64Bit() && LdVT == MVT::i64) {
44353      std::pair<SDValue, SDValue> Tmp = Subtarget.getTargetLowering()->BuildFILD(
44354          SDValue(N, 0), LdVT, Ld->getChain(), Op0, DAG);
44355      DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), Tmp.second);
44356      return Tmp.first;
44357    }
44358  }
44359
44360  if (IsStrict)
44361    return SDValue();
44362
44363  if (SDValue V = combineToFPTruncExtElt(N, DAG))
44364    return V;
44365
44366  return SDValue();
44367}
44368
44369static bool needCarryOrOverflowFlag(SDValue Flags) {
44370  assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
44371
44372  for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
44373         UI != UE; ++UI) {
44374    SDNode *User = *UI;
44375
44376    X86::CondCode CC;
44377    switch (User->getOpcode()) {
44378    default:
44379      // Be conservative.
44380      return true;
44381    case X86ISD::SETCC:
44382    case X86ISD::SETCC_CARRY:
44383      CC = (X86::CondCode)User->getConstantOperandVal(0);
44384      break;
44385    case X86ISD::BRCOND:
44386      CC = (X86::CondCode)User->getConstantOperandVal(2);
44387      break;
44388    case X86ISD::CMOV:
44389      CC = (X86::CondCode)User->getConstantOperandVal(2);
44390      break;
44391    }
44392
44393    switch (CC) {
44394    default: break;
44395    case X86::COND_A: case X86::COND_AE:
44396    case X86::COND_B: case X86::COND_BE:
44397    case X86::COND_O: case X86::COND_NO:
44398    case X86::COND_G: case X86::COND_GE:
44399    case X86::COND_L: case X86::COND_LE:
44400      return true;
44401    }
44402  }
44403
44404  return false;
44405}
44406
44407static bool onlyZeroFlagUsed(SDValue Flags) {
44408  assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
44409
44410  for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
44411         UI != UE; ++UI) {
44412    SDNode *User = *UI;
44413
44414    unsigned CCOpNo;
44415    switch (User->getOpcode()) {
44416    default:
44417      // Be conservative.
44418      return false;
44419    case X86ISD::SETCC:       CCOpNo = 0; break;
44420    case X86ISD::SETCC_CARRY: CCOpNo = 0; break;
44421    case X86ISD::BRCOND:      CCOpNo = 2; break;
44422    case X86ISD::CMOV:        CCOpNo = 2; break;
44423    }
44424
44425    X86::CondCode CC = (X86::CondCode)User->getConstantOperandVal(CCOpNo);
44426    if (CC != X86::COND_E && CC != X86::COND_NE)
44427      return false;
44428  }
44429
44430  return true;
44431}
44432
44433static SDValue combineCMP(SDNode *N, SelectionDAG &DAG) {
44434  // Only handle test patterns.
44435  if (!isNullConstant(N->getOperand(1)))
44436    return SDValue();
44437
44438  // If we have a CMP of a truncated binop, see if we can make a smaller binop
44439  // and use its flags directly.
44440  // TODO: Maybe we should try promoting compares that only use the zero flag
44441  // first if we can prove the upper bits with computeKnownBits?
44442  SDLoc dl(N);
44443  SDValue Op = N->getOperand(0);
44444  EVT VT = Op.getValueType();
44445
44446  // If we have a constant logical shift that's only used in a comparison
44447  // against zero turn it into an equivalent AND. This allows turning it into
44448  // a TEST instruction later.
44449  if ((Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) &&
44450      Op.hasOneUse() && isa<ConstantSDNode>(Op.getOperand(1)) &&
44451      onlyZeroFlagUsed(SDValue(N, 0))) {
44452    unsigned BitWidth = VT.getSizeInBits();
44453    const APInt &ShAmt = Op.getConstantOperandAPInt(1);
44454    if (ShAmt.ult(BitWidth)) { // Avoid undefined shifts.
44455      unsigned MaskBits = BitWidth - ShAmt.getZExtValue();
44456      APInt Mask = Op.getOpcode() == ISD::SRL
44457                       ? APInt::getHighBitsSet(BitWidth, MaskBits)
44458                       : APInt::getLowBitsSet(BitWidth, MaskBits);
44459      if (Mask.isSignedIntN(32)) {
44460        Op = DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0),
44461                         DAG.getConstant(Mask, dl, VT));
44462        return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
44463                           DAG.getConstant(0, dl, VT));
44464      }
44465    }
44466  }
44467
44468  // Look for a truncate with a single use.
44469  if (Op.getOpcode() != ISD::TRUNCATE || !Op.hasOneUse())
44470    return SDValue();
44471
44472  Op = Op.getOperand(0);
44473
44474  // Arithmetic op can only have one use.
44475  if (!Op.hasOneUse())
44476    return SDValue();
44477
44478  unsigned NewOpc;
44479  switch (Op.getOpcode()) {
44480  default: return SDValue();
44481  case ISD::AND:
44482    // Skip and with constant. We have special handling for and with immediate
44483    // during isel to generate test instructions.
44484    if (isa<ConstantSDNode>(Op.getOperand(1)))
44485      return SDValue();
44486    NewOpc = X86ISD::AND;
44487    break;
44488  case ISD::OR:  NewOpc = X86ISD::OR;  break;
44489  case ISD::XOR: NewOpc = X86ISD::XOR; break;
44490  case ISD::ADD:
44491    // If the carry or overflow flag is used, we can't truncate.
44492    if (needCarryOrOverflowFlag(SDValue(N, 0)))
44493      return SDValue();
44494    NewOpc = X86ISD::ADD;
44495    break;
44496  case ISD::SUB:
44497    // If the carry or overflow flag is used, we can't truncate.
44498    if (needCarryOrOverflowFlag(SDValue(N, 0)))
44499      return SDValue();
44500    NewOpc = X86ISD::SUB;
44501    break;
44502  }
44503
44504  // We found an op we can narrow. Truncate its inputs.
44505  SDValue Op0 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(0));
44506  SDValue Op1 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(1));
44507
44508  // Use a X86 specific opcode to avoid DAG combine messing with it.
44509  SDVTList VTs = DAG.getVTList(VT, MVT::i32);
44510  Op = DAG.getNode(NewOpc, dl, VTs, Op0, Op1);
44511
44512  // For AND, keep a CMP so that we can match the test pattern.
44513  if (NewOpc == X86ISD::AND)
44514    return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
44515                       DAG.getConstant(0, dl, VT));
44516
44517  // Return the flags.
44518  return Op.getValue(1);
44519}
44520
44521static SDValue combineX86AddSub(SDNode *N, SelectionDAG &DAG,
44522                                TargetLowering::DAGCombinerInfo &DCI) {
44523  assert((X86ISD::ADD == N->getOpcode() || X86ISD::SUB == N->getOpcode()) &&
44524         "Expected X86ISD::ADD or X86ISD::SUB");
44525
44526  SDLoc DL(N);
44527  SDValue LHS = N->getOperand(0);
44528  SDValue RHS = N->getOperand(1);
44529  MVT VT = LHS.getSimpleValueType();
44530  unsigned GenericOpc = X86ISD::ADD == N->getOpcode() ? ISD::ADD : ISD::SUB;
44531
44532  // If we don't use the flag result, simplify back to a generic ADD/SUB.
44533  if (!N->hasAnyUseOfValue(1)) {
44534    SDValue Res = DAG.getNode(GenericOpc, DL, VT, LHS, RHS);
44535    return DAG.getMergeValues({Res, DAG.getConstant(0, DL, MVT::i32)}, DL);
44536  }
44537
44538  // Fold any similar generic ADD/SUB opcodes to reuse this node.
44539  auto MatchGeneric = [&](SDValue N0, SDValue N1, bool Negate) {
44540    SDValue Ops[] = {N0, N1};
44541    SDVTList VTs = DAG.getVTList(N->getValueType(0));
44542    if (SDNode *GenericAddSub = DAG.getNodeIfExists(GenericOpc, VTs, Ops)) {
44543      SDValue Op(N, 0);
44544      if (Negate)
44545        Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
44546      DCI.CombineTo(GenericAddSub, Op);
44547    }
44548  };
44549  MatchGeneric(LHS, RHS, false);
44550  MatchGeneric(RHS, LHS, X86ISD::SUB == N->getOpcode());
44551
44552  return SDValue();
44553}
44554
44555static SDValue combineSBB(SDNode *N, SelectionDAG &DAG) {
44556  if (SDValue Flags = combineCarryThroughADD(N->getOperand(2), DAG)) {
44557    MVT VT = N->getSimpleValueType(0);
44558    SDVTList VTs = DAG.getVTList(VT, MVT::i32);
44559    return DAG.getNode(X86ISD::SBB, SDLoc(N), VTs,
44560                       N->getOperand(0), N->getOperand(1),
44561                       Flags);
44562  }
44563
44564  // Fold SBB(SUB(X,Y),0,Carry) -> SBB(X,Y,Carry)
44565  // iff the flag result is dead.
44566  SDValue Op0 = N->getOperand(0);
44567  SDValue Op1 = N->getOperand(1);
44568  if (Op0.getOpcode() == ISD::SUB && isNullConstant(Op1) &&
44569      !N->hasAnyUseOfValue(1))
44570    return DAG.getNode(X86ISD::SBB, SDLoc(N), N->getVTList(), Op0.getOperand(0),
44571                       Op0.getOperand(1), N->getOperand(2));
44572
44573  return SDValue();
44574}
44575
44576// Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
44577static SDValue combineADC(SDNode *N, SelectionDAG &DAG,
44578                          TargetLowering::DAGCombinerInfo &DCI) {
44579  // If the LHS and RHS of the ADC node are zero, then it can't overflow and
44580  // the result is either zero or one (depending on the input carry bit).
44581  // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
44582  if (X86::isZeroNode(N->getOperand(0)) &&
44583      X86::isZeroNode(N->getOperand(1)) &&
44584      // We don't have a good way to replace an EFLAGS use, so only do this when
44585      // dead right now.
44586      SDValue(N, 1).use_empty()) {
44587    SDLoc DL(N);
44588    EVT VT = N->getValueType(0);
44589    SDValue CarryOut = DAG.getConstant(0, DL, N->getValueType(1));
44590    SDValue Res1 =
44591        DAG.getNode(ISD::AND, DL, VT,
44592                    DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
44593                                DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
44594                                N->getOperand(2)),
44595                    DAG.getConstant(1, DL, VT));
44596    return DCI.CombineTo(N, Res1, CarryOut);
44597  }
44598
44599  if (SDValue Flags = combineCarryThroughADD(N->getOperand(2), DAG)) {
44600    MVT VT = N->getSimpleValueType(0);
44601    SDVTList VTs = DAG.getVTList(VT, MVT::i32);
44602    return DAG.getNode(X86ISD::ADC, SDLoc(N), VTs,
44603                       N->getOperand(0), N->getOperand(1),
44604                       Flags);
44605  }
44606
44607  return SDValue();
44608}
44609
44610/// If this is an add or subtract where one operand is produced by a cmp+setcc,
44611/// then try to convert it to an ADC or SBB. This replaces TEST+SET+{ADD/SUB}
44612/// with CMP+{ADC, SBB}.
44613static SDValue combineAddOrSubToADCOrSBB(SDNode *N, SelectionDAG &DAG) {
44614  bool IsSub = N->getOpcode() == ISD::SUB;
44615  SDValue X = N->getOperand(0);
44616  SDValue Y = N->getOperand(1);
44617
44618  // If this is an add, canonicalize a zext operand to the RHS.
44619  // TODO: Incomplete? What if both sides are zexts?
44620  if (!IsSub && X.getOpcode() == ISD::ZERO_EXTEND &&
44621      Y.getOpcode() != ISD::ZERO_EXTEND)
44622    std::swap(X, Y);
44623
44624  // Look through a one-use zext.
44625  bool PeekedThroughZext = false;
44626  if (Y.getOpcode() == ISD::ZERO_EXTEND && Y.hasOneUse()) {
44627    Y = Y.getOperand(0);
44628    PeekedThroughZext = true;
44629  }
44630
44631  // If this is an add, canonicalize a setcc operand to the RHS.
44632  // TODO: Incomplete? What if both sides are setcc?
44633  // TODO: Should we allow peeking through a zext of the other operand?
44634  if (!IsSub && !PeekedThroughZext && X.getOpcode() == X86ISD::SETCC &&
44635      Y.getOpcode() != X86ISD::SETCC)
44636    std::swap(X, Y);
44637
44638  if (Y.getOpcode() != X86ISD::SETCC || !Y.hasOneUse())
44639    return SDValue();
44640
44641  SDLoc DL(N);
44642  EVT VT = N->getValueType(0);
44643  X86::CondCode CC = (X86::CondCode)Y.getConstantOperandVal(0);
44644
44645  // If X is -1 or 0, then we have an opportunity to avoid constants required in
44646  // the general case below.
44647  auto *ConstantX = dyn_cast<ConstantSDNode>(X);
44648  if (ConstantX) {
44649    if ((!IsSub && CC == X86::COND_AE && ConstantX->isAllOnesValue()) ||
44650        (IsSub && CC == X86::COND_B && ConstantX->isNullValue())) {
44651      // This is a complicated way to get -1 or 0 from the carry flag:
44652      // -1 + SETAE --> -1 + (!CF) --> CF ? -1 : 0 --> SBB %eax, %eax
44653      //  0 - SETB  -->  0 -  (CF) --> CF ? -1 : 0 --> SBB %eax, %eax
44654      return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
44655                         DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
44656                         Y.getOperand(1));
44657    }
44658
44659    if ((!IsSub && CC == X86::COND_BE && ConstantX->isAllOnesValue()) ||
44660        (IsSub && CC == X86::COND_A && ConstantX->isNullValue())) {
44661      SDValue EFLAGS = Y->getOperand(1);
44662      if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
44663          EFLAGS.getValueType().isInteger() &&
44664          !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
44665        // Swap the operands of a SUB, and we have the same pattern as above.
44666        // -1 + SETBE (SUB A, B) --> -1 + SETAE (SUB B, A) --> SUB + SBB
44667        //  0 - SETA  (SUB A, B) -->  0 - SETB  (SUB B, A) --> SUB + SBB
44668        SDValue NewSub = DAG.getNode(
44669            X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
44670            EFLAGS.getOperand(1), EFLAGS.getOperand(0));
44671        SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
44672        return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
44673                           DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
44674                           NewEFLAGS);
44675      }
44676    }
44677  }
44678
44679  if (CC == X86::COND_B) {
44680    // X + SETB Z --> adc X, 0
44681    // X - SETB Z --> sbb X, 0
44682    return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
44683                       DAG.getVTList(VT, MVT::i32), X,
44684                       DAG.getConstant(0, DL, VT), Y.getOperand(1));
44685  }
44686
44687  if (CC == X86::COND_A) {
44688    SDValue EFLAGS = Y->getOperand(1);
44689    // Try to convert COND_A into COND_B in an attempt to facilitate
44690    // materializing "setb reg".
44691    //
44692    // Do not flip "e > c", where "c" is a constant, because Cmp instruction
44693    // cannot take an immediate as its first operand.
44694    //
44695    if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.getNode()->hasOneUse() &&
44696        EFLAGS.getValueType().isInteger() &&
44697        !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
44698      SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
44699                                   EFLAGS.getNode()->getVTList(),
44700                                   EFLAGS.getOperand(1), EFLAGS.getOperand(0));
44701      SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
44702      return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
44703                         DAG.getVTList(VT, MVT::i32), X,
44704                         DAG.getConstant(0, DL, VT), NewEFLAGS);
44705    }
44706  }
44707
44708  if (CC != X86::COND_E && CC != X86::COND_NE)
44709    return SDValue();
44710
44711  SDValue Cmp = Y.getOperand(1);
44712  if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
44713      !X86::isZeroNode(Cmp.getOperand(1)) ||
44714      !Cmp.getOperand(0).getValueType().isInteger())
44715    return SDValue();
44716
44717  SDValue Z = Cmp.getOperand(0);
44718  EVT ZVT = Z.getValueType();
44719
44720  // If X is -1 or 0, then we have an opportunity to avoid constants required in
44721  // the general case below.
44722  if (ConstantX) {
44723    // 'neg' sets the carry flag when Z != 0, so create 0 or -1 using 'sbb' with
44724    // fake operands:
44725    //  0 - (Z != 0) --> sbb %eax, %eax, (neg Z)
44726    // -1 + (Z == 0) --> sbb %eax, %eax, (neg Z)
44727    if ((IsSub && CC == X86::COND_NE && ConstantX->isNullValue()) ||
44728        (!IsSub && CC == X86::COND_E && ConstantX->isAllOnesValue())) {
44729      SDValue Zero = DAG.getConstant(0, DL, ZVT);
44730      SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
44731      SDValue Neg = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Zero, Z);
44732      return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
44733                         DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
44734                         SDValue(Neg.getNode(), 1));
44735    }
44736
44737    // cmp with 1 sets the carry flag when Z == 0, so create 0 or -1 using 'sbb'
44738    // with fake operands:
44739    //  0 - (Z == 0) --> sbb %eax, %eax, (cmp Z, 1)
44740    // -1 + (Z != 0) --> sbb %eax, %eax, (cmp Z, 1)
44741    if ((IsSub && CC == X86::COND_E && ConstantX->isNullValue()) ||
44742        (!IsSub && CC == X86::COND_NE && ConstantX->isAllOnesValue())) {
44743      SDValue One = DAG.getConstant(1, DL, ZVT);
44744      SDValue Cmp1 = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Z, One);
44745      return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
44746                         DAG.getTargetConstant(X86::COND_B, DL, MVT::i8), Cmp1);
44747    }
44748  }
44749
44750  // (cmp Z, 1) sets the carry flag if Z is 0.
44751  SDValue One = DAG.getConstant(1, DL, ZVT);
44752  SDValue Cmp1 = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Z, One);
44753
44754  // Add the flags type for ADC/SBB nodes.
44755  SDVTList VTs = DAG.getVTList(VT, MVT::i32);
44756
44757  // X - (Z != 0) --> sub X, (zext(setne Z, 0)) --> adc X, -1, (cmp Z, 1)
44758  // X + (Z != 0) --> add X, (zext(setne Z, 0)) --> sbb X, -1, (cmp Z, 1)
44759  if (CC == X86::COND_NE)
44760    return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL, VTs, X,
44761                       DAG.getConstant(-1ULL, DL, VT), Cmp1);
44762
44763  // X - (Z == 0) --> sub X, (zext(sete  Z, 0)) --> sbb X, 0, (cmp Z, 1)
44764  // X + (Z == 0) --> add X, (zext(sete  Z, 0)) --> adc X, 0, (cmp Z, 1)
44765  return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL, VTs, X,
44766                     DAG.getConstant(0, DL, VT), Cmp1);
44767}
44768
44769static SDValue combineLoopMAddPattern(SDNode *N, SelectionDAG &DAG,
44770                                      const X86Subtarget &Subtarget) {
44771  if (!Subtarget.hasSSE2())
44772    return SDValue();
44773
44774  EVT VT = N->getValueType(0);
44775
44776  // If the vector size is less than 128, or greater than the supported RegSize,
44777  // do not use PMADD.
44778  if (!VT.isVector() || VT.getVectorNumElements() < 8)
44779    return SDValue();
44780
44781  SDValue Op0 = N->getOperand(0);
44782  SDValue Op1 = N->getOperand(1);
44783
44784  auto UsePMADDWD = [&](SDValue Op) {
44785    ShrinkMode Mode;
44786    return Op.getOpcode() == ISD::MUL &&
44787           canReduceVMulWidth(Op.getNode(), DAG, Mode) &&
44788           Mode != ShrinkMode::MULU16 &&
44789           (!Subtarget.hasSSE41() ||
44790            (Op->isOnlyUserOf(Op.getOperand(0).getNode()) &&
44791             Op->isOnlyUserOf(Op.getOperand(1).getNode())));
44792  };
44793
44794  SDValue MulOp, OtherOp;
44795  if (UsePMADDWD(Op0)) {
44796    MulOp = Op0;
44797    OtherOp = Op1;
44798  } else if (UsePMADDWD(Op1)) {
44799    MulOp = Op1;
44800    OtherOp = Op0;
44801  } else
44802   return SDValue();
44803
44804  SDLoc DL(N);
44805  EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
44806                                   VT.getVectorNumElements());
44807  EVT MAddVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
44808                                VT.getVectorNumElements() / 2);
44809
44810  // Shrink the operands of mul.
44811  SDValue N0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, MulOp->getOperand(0));
44812  SDValue N1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, MulOp->getOperand(1));
44813
44814  // Madd vector size is half of the original vector size
44815  auto PMADDWDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
44816                           ArrayRef<SDValue> Ops) {
44817    MVT OpVT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
44818    return DAG.getNode(X86ISD::VPMADDWD, DL, OpVT, Ops);
44819  };
44820  SDValue Madd = SplitOpsAndApply(DAG, Subtarget, DL, MAddVT, { N0, N1 },
44821                                  PMADDWDBuilder);
44822  // Fill the rest of the output with 0
44823  SDValue Zero = DAG.getConstant(0, DL, Madd.getSimpleValueType());
44824  SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Madd, Zero);
44825
44826  // Preserve the reduction flag on the ADD. We may need to revisit for the
44827  // other operand.
44828  SDNodeFlags Flags;
44829  Flags.setVectorReduction(true);
44830  return DAG.getNode(ISD::ADD, DL, VT, Concat, OtherOp, Flags);
44831}
44832
44833static SDValue combineLoopSADPattern(SDNode *N, SelectionDAG &DAG,
44834                                     const X86Subtarget &Subtarget) {
44835  if (!Subtarget.hasSSE2())
44836    return SDValue();
44837
44838  SDLoc DL(N);
44839  EVT VT = N->getValueType(0);
44840
44841  // TODO: There's nothing special about i32, any integer type above i16 should
44842  // work just as well.
44843  if (!VT.isVector() || !VT.isSimple() ||
44844      !(VT.getVectorElementType() == MVT::i32))
44845    return SDValue();
44846
44847  unsigned RegSize = 128;
44848  if (Subtarget.useBWIRegs())
44849    RegSize = 512;
44850  else if (Subtarget.hasAVX())
44851    RegSize = 256;
44852
44853  // We only handle v16i32 for SSE2 / v32i32 for AVX / v64i32 for AVX512.
44854  // TODO: We should be able to handle larger vectors by splitting them before
44855  // feeding them into several SADs, and then reducing over those.
44856  if (VT.getSizeInBits() / 4 > RegSize)
44857    return SDValue();
44858
44859  // We know N is a reduction add. To match SAD, we need one of the operands to
44860  // be an ABS.
44861  SDValue AbsOp = N->getOperand(0);
44862  SDValue OtherOp = N->getOperand(1);
44863  if (AbsOp.getOpcode() != ISD::ABS)
44864    std::swap(AbsOp, OtherOp);
44865  if (AbsOp.getOpcode() != ISD::ABS)
44866    return SDValue();
44867
44868  // Check whether we have an abs-diff pattern feeding into the select.
44869  SDValue SadOp0, SadOp1;
44870  if(!detectZextAbsDiff(AbsOp, SadOp0, SadOp1))
44871    return SDValue();
44872
44873  // SAD pattern detected. Now build a SAD instruction and an addition for
44874  // reduction. Note that the number of elements of the result of SAD is less
44875  // than the number of elements of its input. Therefore, we could only update
44876  // part of elements in the reduction vector.
44877  SDValue Sad = createPSADBW(DAG, SadOp0, SadOp1, DL, Subtarget);
44878
44879  // The output of PSADBW is a vector of i64.
44880  // We need to turn the vector of i64 into a vector of i32.
44881  // If the reduction vector is at least as wide as the psadbw result, just
44882  // bitcast. If it's narrower which can only occur for v2i32, bits 127:16 of
44883  // the PSADBW will be zero. If we promote/ narrow vectors, truncate the v2i64
44884  // result to v2i32 which will be removed by type legalization. If we/ widen
44885  // narrow vectors then we bitcast to v4i32 and extract v2i32.
44886  MVT ResVT = MVT::getVectorVT(MVT::i32, Sad.getValueSizeInBits() / 32);
44887  Sad = DAG.getNode(ISD::BITCAST, DL, ResVT, Sad);
44888
44889  if (VT.getSizeInBits() > ResVT.getSizeInBits()) {
44890    // Fill the upper elements with zero to match the add width.
44891    assert(VT.getSizeInBits() % ResVT.getSizeInBits() == 0 && "Unexpected VTs");
44892    unsigned NumConcats = VT.getSizeInBits() / ResVT.getSizeInBits();
44893    SmallVector<SDValue, 4> Ops(NumConcats, DAG.getConstant(0, DL, ResVT));
44894    Ops[0] = Sad;
44895    Sad = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Ops);
44896  } else if (VT.getSizeInBits() < ResVT.getSizeInBits()) {
44897    Sad = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Sad,
44898                      DAG.getIntPtrConstant(0, DL));
44899  }
44900
44901  // Preserve the reduction flag on the ADD. We may need to revisit for the
44902  // other operand.
44903  SDNodeFlags Flags;
44904  Flags.setVectorReduction(true);
44905  return DAG.getNode(ISD::ADD, DL, VT, Sad, OtherOp, Flags);
44906}
44907
44908static SDValue matchPMADDWD(SelectionDAG &DAG, SDValue Op0, SDValue Op1,
44909                            const SDLoc &DL, EVT VT,
44910                            const X86Subtarget &Subtarget) {
44911  // Example of pattern we try to detect:
44912  // t := (v8i32 mul (sext (v8i16 x0), (sext (v8i16 x1))))
44913  //(add (build_vector (extract_elt t, 0),
44914  //                   (extract_elt t, 2),
44915  //                   (extract_elt t, 4),
44916  //                   (extract_elt t, 6)),
44917  //     (build_vector (extract_elt t, 1),
44918  //                   (extract_elt t, 3),
44919  //                   (extract_elt t, 5),
44920  //                   (extract_elt t, 7)))
44921
44922  if (!Subtarget.hasSSE2())
44923    return SDValue();
44924
44925  if (Op0.getOpcode() != ISD::BUILD_VECTOR ||
44926      Op1.getOpcode() != ISD::BUILD_VECTOR)
44927    return SDValue();
44928
44929  if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
44930      VT.getVectorNumElements() < 4 ||
44931      !isPowerOf2_32(VT.getVectorNumElements()))
44932    return SDValue();
44933
44934  // Check if one of Op0,Op1 is of the form:
44935  // (build_vector (extract_elt Mul, 0),
44936  //               (extract_elt Mul, 2),
44937  //               (extract_elt Mul, 4),
44938  //                   ...
44939  // the other is of the form:
44940  // (build_vector (extract_elt Mul, 1),
44941  //               (extract_elt Mul, 3),
44942  //               (extract_elt Mul, 5),
44943  //                   ...
44944  // and identify Mul.
44945  SDValue Mul;
44946  for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; i += 2) {
44947    SDValue Op0L = Op0->getOperand(i), Op1L = Op1->getOperand(i),
44948            Op0H = Op0->getOperand(i + 1), Op1H = Op1->getOperand(i + 1);
44949    // TODO: Be more tolerant to undefs.
44950    if (Op0L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
44951        Op1L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
44952        Op0H.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
44953        Op1H.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
44954      return SDValue();
44955    auto *Const0L = dyn_cast<ConstantSDNode>(Op0L->getOperand(1));
44956    auto *Const1L = dyn_cast<ConstantSDNode>(Op1L->getOperand(1));
44957    auto *Const0H = dyn_cast<ConstantSDNode>(Op0H->getOperand(1));
44958    auto *Const1H = dyn_cast<ConstantSDNode>(Op1H->getOperand(1));
44959    if (!Const0L || !Const1L || !Const0H || !Const1H)
44960      return SDValue();
44961    unsigned Idx0L = Const0L->getZExtValue(), Idx1L = Const1L->getZExtValue(),
44962             Idx0H = Const0H->getZExtValue(), Idx1H = Const1H->getZExtValue();
44963    // Commutativity of mul allows factors of a product to reorder.
44964    if (Idx0L > Idx1L)
44965      std::swap(Idx0L, Idx1L);
44966    if (Idx0H > Idx1H)
44967      std::swap(Idx0H, Idx1H);
44968    // Commutativity of add allows pairs of factors to reorder.
44969    if (Idx0L > Idx0H) {
44970      std::swap(Idx0L, Idx0H);
44971      std::swap(Idx1L, Idx1H);
44972    }
44973    if (Idx0L != 2 * i || Idx1L != 2 * i + 1 || Idx0H != 2 * i + 2 ||
44974        Idx1H != 2 * i + 3)
44975      return SDValue();
44976    if (!Mul) {
44977      // First time an extract_elt's source vector is visited. Must be a MUL
44978      // with 2X number of vector elements than the BUILD_VECTOR.
44979      // Both extracts must be from same MUL.
44980      Mul = Op0L->getOperand(0);
44981      if (Mul->getOpcode() != ISD::MUL ||
44982          Mul.getValueType().getVectorNumElements() != 2 * e)
44983        return SDValue();
44984    }
44985    // Check that the extract is from the same MUL previously seen.
44986    if (Mul != Op0L->getOperand(0) || Mul != Op1L->getOperand(0) ||
44987        Mul != Op0H->getOperand(0) || Mul != Op1H->getOperand(0))
44988      return SDValue();
44989  }
44990
44991  // Check if the Mul source can be safely shrunk.
44992  ShrinkMode Mode;
44993  if (!canReduceVMulWidth(Mul.getNode(), DAG, Mode) ||
44994      Mode == ShrinkMode::MULU16)
44995    return SDValue();
44996
44997  auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
44998                         ArrayRef<SDValue> Ops) {
44999    // Shrink by adding truncate nodes and let DAGCombine fold with the
45000    // sources.
45001    EVT InVT = Ops[0].getValueType();
45002    assert(InVT.getScalarType() == MVT::i32 &&
45003           "Unexpected scalar element type");
45004    assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
45005    EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
45006                                 InVT.getVectorNumElements() / 2);
45007    EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
45008                                   InVT.getVectorNumElements());
45009    return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT,
45010                       DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Ops[0]),
45011                       DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Ops[1]));
45012  };
45013  return SplitOpsAndApply(DAG, Subtarget, DL, VT,
45014                          { Mul.getOperand(0), Mul.getOperand(1) },
45015                          PMADDBuilder);
45016}
45017
45018// Attempt to turn this pattern into PMADDWD.
45019// (mul (add (sext (build_vector)), (sext (build_vector))),
45020//      (add (sext (build_vector)), (sext (build_vector)))
45021static SDValue matchPMADDWD_2(SelectionDAG &DAG, SDValue N0, SDValue N1,
45022                              const SDLoc &DL, EVT VT,
45023                              const X86Subtarget &Subtarget) {
45024  if (!Subtarget.hasSSE2())
45025    return SDValue();
45026
45027  if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
45028    return SDValue();
45029
45030  if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
45031      VT.getVectorNumElements() < 4 ||
45032      !isPowerOf2_32(VT.getVectorNumElements()))
45033    return SDValue();
45034
45035  SDValue N00 = N0.getOperand(0);
45036  SDValue N01 = N0.getOperand(1);
45037  SDValue N10 = N1.getOperand(0);
45038  SDValue N11 = N1.getOperand(1);
45039
45040  // All inputs need to be sign extends.
45041  // TODO: Support ZERO_EXTEND from known positive?
45042  if (N00.getOpcode() != ISD::SIGN_EXTEND ||
45043      N01.getOpcode() != ISD::SIGN_EXTEND ||
45044      N10.getOpcode() != ISD::SIGN_EXTEND ||
45045      N11.getOpcode() != ISD::SIGN_EXTEND)
45046    return SDValue();
45047
45048  // Peek through the extends.
45049  N00 = N00.getOperand(0);
45050  N01 = N01.getOperand(0);
45051  N10 = N10.getOperand(0);
45052  N11 = N11.getOperand(0);
45053
45054  // Must be extending from vXi16.
45055  EVT InVT = N00.getValueType();
45056  if (InVT.getVectorElementType() != MVT::i16 || N01.getValueType() != InVT ||
45057      N10.getValueType() != InVT || N11.getValueType() != InVT)
45058    return SDValue();
45059
45060  // All inputs should be build_vectors.
45061  if (N00.getOpcode() != ISD::BUILD_VECTOR ||
45062      N01.getOpcode() != ISD::BUILD_VECTOR ||
45063      N10.getOpcode() != ISD::BUILD_VECTOR ||
45064      N11.getOpcode() != ISD::BUILD_VECTOR)
45065    return SDValue();
45066
45067  // For each element, we need to ensure we have an odd element from one vector
45068  // multiplied by the odd element of another vector and the even element from
45069  // one of the same vectors being multiplied by the even element from the
45070  // other vector. So we need to make sure for each element i, this operator
45071  // is being performed:
45072  //  A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
45073  SDValue In0, In1;
45074  for (unsigned i = 0; i != N00.getNumOperands(); ++i) {
45075    SDValue N00Elt = N00.getOperand(i);
45076    SDValue N01Elt = N01.getOperand(i);
45077    SDValue N10Elt = N10.getOperand(i);
45078    SDValue N11Elt = N11.getOperand(i);
45079    // TODO: Be more tolerant to undefs.
45080    if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
45081        N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
45082        N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
45083        N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
45084      return SDValue();
45085    auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
45086    auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
45087    auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
45088    auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
45089    if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
45090      return SDValue();
45091    unsigned IdxN00 = ConstN00Elt->getZExtValue();
45092    unsigned IdxN01 = ConstN01Elt->getZExtValue();
45093    unsigned IdxN10 = ConstN10Elt->getZExtValue();
45094    unsigned IdxN11 = ConstN11Elt->getZExtValue();
45095    // Add is commutative so indices can be reordered.
45096    if (IdxN00 > IdxN10) {
45097      std::swap(IdxN00, IdxN10);
45098      std::swap(IdxN01, IdxN11);
45099    }
45100    // N0 indices be the even element. N1 indices must be the next odd element.
45101    if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
45102        IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
45103      return SDValue();
45104    SDValue N00In = N00Elt.getOperand(0);
45105    SDValue N01In = N01Elt.getOperand(0);
45106    SDValue N10In = N10Elt.getOperand(0);
45107    SDValue N11In = N11Elt.getOperand(0);
45108    // First time we find an input capture it.
45109    if (!In0) {
45110      In0 = N00In;
45111      In1 = N01In;
45112    }
45113    // Mul is commutative so the input vectors can be in any order.
45114    // Canonicalize to make the compares easier.
45115    if (In0 != N00In)
45116      std::swap(N00In, N01In);
45117    if (In0 != N10In)
45118      std::swap(N10In, N11In);
45119    if (In0 != N00In || In1 != N01In || In0 != N10In || In1 != N11In)
45120      return SDValue();
45121  }
45122
45123  auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
45124                         ArrayRef<SDValue> Ops) {
45125    // Shrink by adding truncate nodes and let DAGCombine fold with the
45126    // sources.
45127    EVT OpVT = Ops[0].getValueType();
45128    assert(OpVT.getScalarType() == MVT::i16 &&
45129           "Unexpected scalar element type");
45130    assert(OpVT == Ops[1].getValueType() && "Operands' types mismatch");
45131    EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
45132                                 OpVT.getVectorNumElements() / 2);
45133    return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT, Ops[0], Ops[1]);
45134  };
45135  return SplitOpsAndApply(DAG, Subtarget, DL, VT, { In0, In1 },
45136                          PMADDBuilder);
45137}
45138
45139static SDValue combineAdd(SDNode *N, SelectionDAG &DAG,
45140                          TargetLowering::DAGCombinerInfo &DCI,
45141                          const X86Subtarget &Subtarget) {
45142  const SDNodeFlags Flags = N->getFlags();
45143  if (Flags.hasVectorReduction()) {
45144    if (SDValue Sad = combineLoopSADPattern(N, DAG, Subtarget))
45145      return Sad;
45146    if (SDValue MAdd = combineLoopMAddPattern(N, DAG, Subtarget))
45147      return MAdd;
45148  }
45149  EVT VT = N->getValueType(0);
45150  SDValue Op0 = N->getOperand(0);
45151  SDValue Op1 = N->getOperand(1);
45152
45153  if (SDValue MAdd = matchPMADDWD(DAG, Op0, Op1, SDLoc(N), VT, Subtarget))
45154    return MAdd;
45155  if (SDValue MAdd = matchPMADDWD_2(DAG, Op0, Op1, SDLoc(N), VT, Subtarget))
45156    return MAdd;
45157
45158  // Try to synthesize horizontal adds from adds of shuffles.
45159  if ((VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v16i16 ||
45160       VT == MVT::v8i32) &&
45161      Subtarget.hasSSSE3() &&
45162      isHorizontalBinOp(Op0, Op1, DAG, Subtarget, true)) {
45163    auto HADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
45164                          ArrayRef<SDValue> Ops) {
45165      return DAG.getNode(X86ISD::HADD, DL, Ops[0].getValueType(), Ops);
45166    };
45167    return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, {Op0, Op1},
45168                            HADDBuilder);
45169  }
45170
45171  // If vectors of i1 are legal, turn (add (zext (vXi1 X)), Y) into
45172  // (sub Y, (sext (vXi1 X))).
45173  // FIXME: We have the (sub Y, (zext (vXi1 X))) -> (add (sext (vXi1 X)), Y) in
45174  // generic DAG combine without a legal type check, but adding this there
45175  // caused regressions.
45176  if (VT.isVector()) {
45177    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45178    if (Op0.getOpcode() == ISD::ZERO_EXTEND &&
45179        Op0.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
45180        TLI.isTypeLegal(Op0.getOperand(0).getValueType())) {
45181      SDLoc DL(N);
45182      SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op0.getOperand(0));
45183      return DAG.getNode(ISD::SUB, DL, VT, Op1, SExt);
45184    }
45185
45186    if (Op1.getOpcode() == ISD::ZERO_EXTEND &&
45187        Op1.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
45188        TLI.isTypeLegal(Op1.getOperand(0).getValueType())) {
45189      SDLoc DL(N);
45190      SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op1.getOperand(0));
45191      return DAG.getNode(ISD::SUB, DL, VT, Op0, SExt);
45192    }
45193  }
45194
45195  return combineAddOrSubToADCOrSBB(N, DAG);
45196}
45197
45198static SDValue combineSubToSubus(SDNode *N, SelectionDAG &DAG,
45199                                 const X86Subtarget &Subtarget) {
45200  SDValue Op0 = N->getOperand(0);
45201  SDValue Op1 = N->getOperand(1);
45202  EVT VT = N->getValueType(0);
45203
45204  if (!VT.isVector())
45205    return SDValue();
45206
45207  // PSUBUS is supported, starting from SSE2, but truncation for v8i32
45208  // is only worth it with SSSE3 (PSHUFB).
45209  EVT EltVT = VT.getVectorElementType();
45210  if (!(Subtarget.hasSSE2() && (EltVT == MVT::i8 || EltVT == MVT::i16)) &&
45211      !(Subtarget.hasSSSE3() && (VT == MVT::v8i32 || VT == MVT::v8i64)) &&
45212      !(Subtarget.useBWIRegs() && (VT == MVT::v16i32)))
45213    return SDValue();
45214
45215  SDValue SubusLHS, SubusRHS;
45216  // Try to find umax(a,b) - b or a - umin(a,b) patterns
45217  // they may be converted to subus(a,b).
45218  // TODO: Need to add IR canonicalization for this code.
45219  if (Op0.getOpcode() == ISD::UMAX) {
45220    SubusRHS = Op1;
45221    SDValue MaxLHS = Op0.getOperand(0);
45222    SDValue MaxRHS = Op0.getOperand(1);
45223    if (MaxLHS == Op1)
45224      SubusLHS = MaxRHS;
45225    else if (MaxRHS == Op1)
45226      SubusLHS = MaxLHS;
45227    else
45228      return SDValue();
45229  } else if (Op1.getOpcode() == ISD::UMIN) {
45230    SubusLHS = Op0;
45231    SDValue MinLHS = Op1.getOperand(0);
45232    SDValue MinRHS = Op1.getOperand(1);
45233    if (MinLHS == Op0)
45234      SubusRHS = MinRHS;
45235    else if (MinRHS == Op0)
45236      SubusRHS = MinLHS;
45237    else
45238      return SDValue();
45239  } else
45240    return SDValue();
45241
45242  // PSUBUS doesn't support v8i32/v8i64/v16i32, but it can be enabled with
45243  // special preprocessing in some cases.
45244  if (EltVT == MVT::i8 || EltVT == MVT::i16)
45245    return DAG.getNode(ISD::USUBSAT, SDLoc(N), VT, SubusLHS, SubusRHS);
45246
45247  assert((VT == MVT::v8i32 || VT == MVT::v16i32 || VT == MVT::v8i64) &&
45248         "Unexpected VT!");
45249
45250  // Special preprocessing case can be only applied
45251  // if the value was zero extended from 16 bit,
45252  // so we require first 16 bits to be zeros for 32 bit
45253  // values, or first 48 bits for 64 bit values.
45254  KnownBits Known = DAG.computeKnownBits(SubusLHS);
45255  unsigned NumZeros = Known.countMinLeadingZeros();
45256  if ((VT == MVT::v8i64 && NumZeros < 48) || NumZeros < 16)
45257    return SDValue();
45258
45259  EVT ExtType = SubusLHS.getValueType();
45260  EVT ShrinkedType;
45261  if (VT == MVT::v8i32 || VT == MVT::v8i64)
45262    ShrinkedType = MVT::v8i16;
45263  else
45264    ShrinkedType = NumZeros >= 24 ? MVT::v16i8 : MVT::v16i16;
45265
45266  // If SubusLHS is zeroextended - truncate SubusRHS to it's
45267  // size SubusRHS = umin(0xFFF.., SubusRHS).
45268  SDValue SaturationConst =
45269      DAG.getConstant(APInt::getLowBitsSet(ExtType.getScalarSizeInBits(),
45270                                           ShrinkedType.getScalarSizeInBits()),
45271                      SDLoc(SubusLHS), ExtType);
45272  SDValue UMin = DAG.getNode(ISD::UMIN, SDLoc(SubusLHS), ExtType, SubusRHS,
45273                             SaturationConst);
45274  SDValue NewSubusLHS =
45275      DAG.getZExtOrTrunc(SubusLHS, SDLoc(SubusLHS), ShrinkedType);
45276  SDValue NewSubusRHS = DAG.getZExtOrTrunc(UMin, SDLoc(SubusRHS), ShrinkedType);
45277  SDValue Psubus = DAG.getNode(ISD::USUBSAT, SDLoc(N), ShrinkedType,
45278                               NewSubusLHS, NewSubusRHS);
45279
45280  // Zero extend the result, it may be used somewhere as 32 bit,
45281  // if not zext and following trunc will shrink.
45282  return DAG.getZExtOrTrunc(Psubus, SDLoc(N), ExtType);
45283}
45284
45285static SDValue combineSub(SDNode *N, SelectionDAG &DAG,
45286                          TargetLowering::DAGCombinerInfo &DCI,
45287                          const X86Subtarget &Subtarget) {
45288  SDValue Op0 = N->getOperand(0);
45289  SDValue Op1 = N->getOperand(1);
45290
45291  // X86 can't encode an immediate LHS of a sub. See if we can push the
45292  // negation into a preceding instruction.
45293  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
45294    // If the RHS of the sub is a XOR with one use and a constant, invert the
45295    // immediate. Then add one to the LHS of the sub so we can turn
45296    // X-Y -> X+~Y+1, saving one register.
45297    if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
45298        isa<ConstantSDNode>(Op1.getOperand(1))) {
45299      const APInt &XorC = Op1.getConstantOperandAPInt(1);
45300      EVT VT = Op0.getValueType();
45301      SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
45302                                   Op1.getOperand(0),
45303                                   DAG.getConstant(~XorC, SDLoc(Op1), VT));
45304      return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
45305                         DAG.getConstant(C->getAPIntValue() + 1, SDLoc(N), VT));
45306    }
45307  }
45308
45309  // Try to synthesize horizontal subs from subs of shuffles.
45310  EVT VT = N->getValueType(0);
45311  if ((VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v16i16 ||
45312       VT == MVT::v8i32) &&
45313      Subtarget.hasSSSE3() &&
45314      isHorizontalBinOp(Op0, Op1, DAG, Subtarget, false)) {
45315    auto HSUBBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
45316                          ArrayRef<SDValue> Ops) {
45317      return DAG.getNode(X86ISD::HSUB, DL, Ops[0].getValueType(), Ops);
45318    };
45319    return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, {Op0, Op1},
45320                            HSUBBuilder);
45321  }
45322
45323  // Try to create PSUBUS if SUB's argument is max/min
45324  if (SDValue V = combineSubToSubus(N, DAG, Subtarget))
45325    return V;
45326
45327  return combineAddOrSubToADCOrSBB(N, DAG);
45328}
45329
45330static SDValue combineVectorCompare(SDNode *N, SelectionDAG &DAG,
45331                                    const X86Subtarget &Subtarget) {
45332  MVT VT = N->getSimpleValueType(0);
45333  SDLoc DL(N);
45334
45335  if (N->getOperand(0) == N->getOperand(1)) {
45336    if (N->getOpcode() == X86ISD::PCMPEQ)
45337      return DAG.getConstant(-1, DL, VT);
45338    if (N->getOpcode() == X86ISD::PCMPGT)
45339      return DAG.getConstant(0, DL, VT);
45340  }
45341
45342  return SDValue();
45343}
45344
45345/// Helper that combines an array of subvector ops as if they were the operands
45346/// of a ISD::CONCAT_VECTORS node, but may have come from another source (e.g.
45347/// ISD::INSERT_SUBVECTOR). The ops are assumed to be of the same type.
45348static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
45349                                      ArrayRef<SDValue> Ops, SelectionDAG &DAG,
45350                                      TargetLowering::DAGCombinerInfo &DCI,
45351                                      const X86Subtarget &Subtarget) {
45352  assert(Subtarget.hasAVX() && "AVX assumed for concat_vectors");
45353
45354  if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
45355    return DAG.getUNDEF(VT);
45356
45357  if (llvm::all_of(Ops, [](SDValue Op) {
45358        return ISD::isBuildVectorAllZeros(Op.getNode());
45359      }))
45360    return getZeroVector(VT, Subtarget, DAG, DL);
45361
45362  SDValue Op0 = Ops[0];
45363
45364  // Fold subvector loads into one.
45365  // If needed, look through bitcasts to get to the load.
45366  if (auto *FirstLd = dyn_cast<LoadSDNode>(peekThroughBitcasts(Op0))) {
45367    bool Fast;
45368    const X86TargetLowering *TLI = Subtarget.getTargetLowering();
45369    if (TLI->allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
45370                                *FirstLd->getMemOperand(), &Fast) &&
45371        Fast) {
45372      if (SDValue Ld =
45373              EltsFromConsecutiveLoads(VT, Ops, DL, DAG, Subtarget, false))
45374        return Ld;
45375    }
45376  }
45377
45378  // Repeated subvectors.
45379  if (llvm::all_of(Ops, [Op0](SDValue Op) { return Op == Op0; })) {
45380    // If this broadcast/subv_broadcast is inserted into both halves, use a
45381    // larger broadcast/subv_broadcast.
45382    if (Op0.getOpcode() == X86ISD::VBROADCAST ||
45383        Op0.getOpcode() == X86ISD::SUBV_BROADCAST)
45384      return DAG.getNode(Op0.getOpcode(), DL, VT, Op0.getOperand(0));
45385
45386    // concat_vectors(movddup(x),movddup(x)) -> broadcast(x)
45387    if (Op0.getOpcode() == X86ISD::MOVDDUP && VT == MVT::v4f64 &&
45388        (Subtarget.hasAVX2() || MayFoldLoad(Op0.getOperand(0))))
45389      return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
45390                         DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f64,
45391                                     Op0.getOperand(0),
45392                                     DAG.getIntPtrConstant(0, DL)));
45393
45394    // concat_vectors(scalar_to_vector(x),scalar_to_vector(x)) -> broadcast(x)
45395    if (Op0.getOpcode() == ISD::SCALAR_TO_VECTOR &&
45396        (Subtarget.hasAVX2() ||
45397         (VT.getScalarSizeInBits() >= 32 && MayFoldLoad(Op0.getOperand(0)))) &&
45398        Op0.getOperand(0).getValueType() == VT.getScalarType())
45399      return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Op0.getOperand(0));
45400  }
45401
45402  bool IsSplat = llvm::all_of(Ops, [&Op0](SDValue Op) { return Op == Op0; });
45403
45404  // Repeated opcode.
45405  // TODO - combineX86ShufflesRecursively should handle shuffle concatenation
45406  // but it currently struggles with different vector widths.
45407  if (llvm::all_of(Ops, [Op0](SDValue Op) {
45408        return Op.getOpcode() == Op0.getOpcode();
45409      })) {
45410    unsigned NumOps = Ops.size();
45411    switch (Op0.getOpcode()) {
45412    case X86ISD::PSHUFHW:
45413    case X86ISD::PSHUFLW:
45414    case X86ISD::PSHUFD:
45415      if (!IsSplat && NumOps == 2 && VT.is256BitVector() &&
45416          Subtarget.hasInt256() && Op0.getOperand(1) == Ops[1].getOperand(1)) {
45417        SmallVector<SDValue, 2> Src;
45418        for (unsigned i = 0; i != NumOps; ++i)
45419          Src.push_back(Ops[i].getOperand(0));
45420        return DAG.getNode(Op0.getOpcode(), DL, VT,
45421                           DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Src),
45422                           Op0.getOperand(1));
45423      }
45424      LLVM_FALLTHROUGH;
45425    case X86ISD::VPERMILPI:
45426      // TODO - add support for vXf64/vXi64 shuffles.
45427      if (!IsSplat && NumOps == 2 && (VT == MVT::v8f32 || VT == MVT::v8i32) &&
45428          Subtarget.hasAVX() && Op0.getOperand(1) == Ops[1].getOperand(1)) {
45429        SmallVector<SDValue, 2> Src;
45430        for (unsigned i = 0; i != NumOps; ++i)
45431          Src.push_back(DAG.getBitcast(MVT::v4f32, Ops[i].getOperand(0)));
45432        SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8f32, Src);
45433        Res = DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, Res,
45434                          Op0.getOperand(1));
45435        return DAG.getBitcast(VT, Res);
45436      }
45437      break;
45438    case X86ISD::PACKUS:
45439      if (NumOps == 2 && VT.is256BitVector() && Subtarget.hasInt256()) {
45440        SmallVector<SDValue, 2> LHS, RHS;
45441        for (unsigned i = 0; i != NumOps; ++i) {
45442          LHS.push_back(Ops[i].getOperand(0));
45443          RHS.push_back(Ops[i].getOperand(1));
45444        }
45445        MVT SrcVT = Op0.getOperand(0).getSimpleValueType();
45446        SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
45447                                 NumOps * SrcVT.getVectorNumElements());
45448        return DAG.getNode(Op0.getOpcode(), DL, VT,
45449                           DAG.getNode(ISD::CONCAT_VECTORS, DL, SrcVT, LHS),
45450                           DAG.getNode(ISD::CONCAT_VECTORS, DL, SrcVT, RHS));
45451      }
45452      break;
45453    }
45454  }
45455
45456  return SDValue();
45457}
45458
45459static SDValue combineConcatVectors(SDNode *N, SelectionDAG &DAG,
45460                                    TargetLowering::DAGCombinerInfo &DCI,
45461                                    const X86Subtarget &Subtarget) {
45462  EVT VT = N->getValueType(0);
45463  EVT SrcVT = N->getOperand(0).getValueType();
45464  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45465
45466  // Don't do anything for i1 vectors.
45467  if (VT.getVectorElementType() == MVT::i1)
45468    return SDValue();
45469
45470  if (Subtarget.hasAVX() && TLI.isTypeLegal(VT) && TLI.isTypeLegal(SrcVT)) {
45471    SmallVector<SDValue, 4> Ops(N->op_begin(), N->op_end());
45472    if (SDValue R = combineConcatVectorOps(SDLoc(N), VT.getSimpleVT(), Ops, DAG,
45473                                           DCI, Subtarget))
45474      return R;
45475  }
45476
45477  return SDValue();
45478}
45479
45480static SDValue combineInsertSubvector(SDNode *N, SelectionDAG &DAG,
45481                                      TargetLowering::DAGCombinerInfo &DCI,
45482                                      const X86Subtarget &Subtarget) {
45483  if (DCI.isBeforeLegalizeOps())
45484    return SDValue();
45485
45486  MVT OpVT = N->getSimpleValueType(0);
45487
45488  bool IsI1Vector = OpVT.getVectorElementType() == MVT::i1;
45489
45490  SDLoc dl(N);
45491  SDValue Vec = N->getOperand(0);
45492  SDValue SubVec = N->getOperand(1);
45493
45494  uint64_t IdxVal = N->getConstantOperandVal(2);
45495  MVT SubVecVT = SubVec.getSimpleValueType();
45496
45497  if (Vec.isUndef() && SubVec.isUndef())
45498    return DAG.getUNDEF(OpVT);
45499
45500  // Inserting undefs/zeros into zeros/undefs is a zero vector.
45501  if ((Vec.isUndef() || ISD::isBuildVectorAllZeros(Vec.getNode())) &&
45502      (SubVec.isUndef() || ISD::isBuildVectorAllZeros(SubVec.getNode())))
45503    return getZeroVector(OpVT, Subtarget, DAG, dl);
45504
45505  if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
45506    // If we're inserting into a zero vector and then into a larger zero vector,
45507    // just insert into the larger zero vector directly.
45508    if (SubVec.getOpcode() == ISD::INSERT_SUBVECTOR &&
45509        ISD::isBuildVectorAllZeros(SubVec.getOperand(0).getNode())) {
45510      uint64_t Idx2Val = SubVec.getConstantOperandVal(2);
45511      return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
45512                         getZeroVector(OpVT, Subtarget, DAG, dl),
45513                         SubVec.getOperand(1),
45514                         DAG.getIntPtrConstant(IdxVal + Idx2Val, dl));
45515    }
45516
45517    // If we're inserting into a zero vector and our input was extracted from an
45518    // insert into a zero vector of the same type and the extraction was at
45519    // least as large as the original insertion. Just insert the original
45520    // subvector into a zero vector.
45521    if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR && IdxVal == 0 &&
45522        isNullConstant(SubVec.getOperand(1)) &&
45523        SubVec.getOperand(0).getOpcode() == ISD::INSERT_SUBVECTOR) {
45524      SDValue Ins = SubVec.getOperand(0);
45525      if (isNullConstant(Ins.getOperand(2)) &&
45526          ISD::isBuildVectorAllZeros(Ins.getOperand(0).getNode()) &&
45527          Ins.getOperand(1).getValueSizeInBits() <= SubVecVT.getSizeInBits())
45528        return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
45529                           getZeroVector(OpVT, Subtarget, DAG, dl),
45530                           Ins.getOperand(1), N->getOperand(2));
45531    }
45532  }
45533
45534  // Stop here if this is an i1 vector.
45535  if (IsI1Vector)
45536    return SDValue();
45537
45538  // If this is an insert of an extract, combine to a shuffle. Don't do this
45539  // if the insert or extract can be represented with a subregister operation.
45540  if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
45541      SubVec.getOperand(0).getSimpleValueType() == OpVT &&
45542      (IdxVal != 0 || !Vec.isUndef())) {
45543    int ExtIdxVal = SubVec.getConstantOperandVal(1);
45544    if (ExtIdxVal != 0) {
45545      int VecNumElts = OpVT.getVectorNumElements();
45546      int SubVecNumElts = SubVecVT.getVectorNumElements();
45547      SmallVector<int, 64> Mask(VecNumElts);
45548      // First create an identity shuffle mask.
45549      for (int i = 0; i != VecNumElts; ++i)
45550        Mask[i] = i;
45551      // Now insert the extracted portion.
45552      for (int i = 0; i != SubVecNumElts; ++i)
45553        Mask[i + IdxVal] = i + ExtIdxVal + VecNumElts;
45554
45555      return DAG.getVectorShuffle(OpVT, dl, Vec, SubVec.getOperand(0), Mask);
45556    }
45557  }
45558
45559  // Match concat_vector style patterns.
45560  SmallVector<SDValue, 2> SubVectorOps;
45561  if (collectConcatOps(N, SubVectorOps)) {
45562    if (SDValue Fold =
45563            combineConcatVectorOps(dl, OpVT, SubVectorOps, DAG, DCI, Subtarget))
45564      return Fold;
45565
45566    // If we're inserting all zeros into the upper half, change this to
45567    // a concat with zero. We will match this to a move
45568    // with implicit upper bit zeroing during isel.
45569    // We do this here because we don't want combineConcatVectorOps to
45570    // create INSERT_SUBVECTOR from CONCAT_VECTORS.
45571    if (SubVectorOps.size() == 2 &&
45572        ISD::isBuildVectorAllZeros(SubVectorOps[1].getNode()))
45573      return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
45574                         getZeroVector(OpVT, Subtarget, DAG, dl),
45575                         SubVectorOps[0], DAG.getIntPtrConstant(0, dl));
45576  }
45577
45578  // If this is a broadcast insert into an upper undef, use a larger broadcast.
45579  if (Vec.isUndef() && IdxVal != 0 && SubVec.getOpcode() == X86ISD::VBROADCAST)
45580    return DAG.getNode(X86ISD::VBROADCAST, dl, OpVT, SubVec.getOperand(0));
45581
45582  // If this is a broadcast load inserted into an upper undef, use a larger
45583  // broadcast load.
45584  if (Vec.isUndef() && IdxVal != 0 && SubVec.hasOneUse() &&
45585      SubVec.getOpcode() == X86ISD::VBROADCAST_LOAD) {
45586    auto *MemIntr = cast<MemIntrinsicSDNode>(SubVec);
45587    SDVTList Tys = DAG.getVTList(OpVT, MVT::Other);
45588    SDValue Ops[] = { MemIntr->getChain(), MemIntr->getBasePtr() };
45589    SDValue BcastLd =
45590        DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
45591                                MemIntr->getMemoryVT(),
45592                                MemIntr->getMemOperand());
45593    DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), BcastLd.getValue(1));
45594    return BcastLd;
45595  }
45596
45597  return SDValue();
45598}
45599
45600/// If we are extracting a subvector of a vector select and the select condition
45601/// is composed of concatenated vectors, try to narrow the select width. This
45602/// is a common pattern for AVX1 integer code because 256-bit selects may be
45603/// legal, but there is almost no integer math/logic available for 256-bit.
45604/// This function should only be called with legal types (otherwise, the calls
45605/// to get simple value types will assert).
45606static SDValue narrowExtractedVectorSelect(SDNode *Ext, SelectionDAG &DAG) {
45607  SDValue Sel = peekThroughBitcasts(Ext->getOperand(0));
45608  SmallVector<SDValue, 4> CatOps;
45609  if (Sel.getOpcode() != ISD::VSELECT ||
45610      !collectConcatOps(Sel.getOperand(0).getNode(), CatOps))
45611    return SDValue();
45612
45613  // Note: We assume simple value types because this should only be called with
45614  //       legal operations/types.
45615  // TODO: This can be extended to handle extraction to 256-bits.
45616  MVT VT = Ext->getSimpleValueType(0);
45617  if (!VT.is128BitVector())
45618    return SDValue();
45619
45620  MVT SelCondVT = Sel.getOperand(0).getSimpleValueType();
45621  if (!SelCondVT.is256BitVector() && !SelCondVT.is512BitVector())
45622    return SDValue();
45623
45624  MVT WideVT = Ext->getOperand(0).getSimpleValueType();
45625  MVT SelVT = Sel.getSimpleValueType();
45626  assert((SelVT.is256BitVector() || SelVT.is512BitVector()) &&
45627         "Unexpected vector type with legal operations");
45628
45629  unsigned SelElts = SelVT.getVectorNumElements();
45630  unsigned CastedElts = WideVT.getVectorNumElements();
45631  unsigned ExtIdx = cast<ConstantSDNode>(Ext->getOperand(1))->getZExtValue();
45632  if (SelElts % CastedElts == 0) {
45633    // The select has the same or more (narrower) elements than the extract
45634    // operand. The extraction index gets scaled by that factor.
45635    ExtIdx *= (SelElts / CastedElts);
45636  } else if (CastedElts % SelElts == 0) {
45637    // The select has less (wider) elements than the extract operand. Make sure
45638    // that the extraction index can be divided evenly.
45639    unsigned IndexDivisor = CastedElts / SelElts;
45640    if (ExtIdx % IndexDivisor != 0)
45641      return SDValue();
45642    ExtIdx /= IndexDivisor;
45643  } else {
45644    llvm_unreachable("Element count of simple vector types are not divisible?");
45645  }
45646
45647  unsigned NarrowingFactor = WideVT.getSizeInBits() / VT.getSizeInBits();
45648  unsigned NarrowElts = SelElts / NarrowingFactor;
45649  MVT NarrowSelVT = MVT::getVectorVT(SelVT.getVectorElementType(), NarrowElts);
45650  SDLoc DL(Ext);
45651  SDValue ExtCond = extract128BitVector(Sel.getOperand(0), ExtIdx, DAG, DL);
45652  SDValue ExtT = extract128BitVector(Sel.getOperand(1), ExtIdx, DAG, DL);
45653  SDValue ExtF = extract128BitVector(Sel.getOperand(2), ExtIdx, DAG, DL);
45654  SDValue NarrowSel = DAG.getSelect(DL, NarrowSelVT, ExtCond, ExtT, ExtF);
45655  return DAG.getBitcast(VT, NarrowSel);
45656}
45657
45658static SDValue combineExtractSubvector(SDNode *N, SelectionDAG &DAG,
45659                                       TargetLowering::DAGCombinerInfo &DCI,
45660                                       const X86Subtarget &Subtarget) {
45661  // For AVX1 only, if we are extracting from a 256-bit and+not (which will
45662  // eventually get combined/lowered into ANDNP) with a concatenated operand,
45663  // split the 'and' into 128-bit ops to avoid the concatenate and extract.
45664  // We let generic combining take over from there to simplify the
45665  // insert/extract and 'not'.
45666  // This pattern emerges during AVX1 legalization. We handle it before lowering
45667  // to avoid complications like splitting constant vector loads.
45668
45669  // Capture the original wide type in the likely case that we need to bitcast
45670  // back to this type.
45671  if (!N->getValueType(0).isSimple())
45672    return SDValue();
45673
45674  MVT VT = N->getSimpleValueType(0);
45675  SDValue InVec = N->getOperand(0);
45676  SDValue InVecBC = peekThroughBitcasts(InVec);
45677  EVT InVecVT = InVec.getValueType();
45678  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45679
45680  if (Subtarget.hasAVX() && !Subtarget.hasAVX2() &&
45681      TLI.isTypeLegal(InVecVT) &&
45682      InVecVT.getSizeInBits() == 256 && InVecBC.getOpcode() == ISD::AND) {
45683    auto isConcatenatedNot = [] (SDValue V) {
45684      V = peekThroughBitcasts(V);
45685      if (!isBitwiseNot(V))
45686        return false;
45687      SDValue NotOp = V->getOperand(0);
45688      return peekThroughBitcasts(NotOp).getOpcode() == ISD::CONCAT_VECTORS;
45689    };
45690    if (isConcatenatedNot(InVecBC.getOperand(0)) ||
45691        isConcatenatedNot(InVecBC.getOperand(1))) {
45692      // extract (and v4i64 X, (not (concat Y1, Y2))), n -> andnp v2i64 X(n), Y1
45693      SDValue Concat = split256IntArith(InVecBC, DAG);
45694      return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), VT,
45695                         DAG.getBitcast(InVecVT, Concat), N->getOperand(1));
45696    }
45697  }
45698
45699  if (DCI.isBeforeLegalizeOps())
45700    return SDValue();
45701
45702  if (SDValue V = narrowExtractedVectorSelect(N, DAG))
45703    return V;
45704
45705  unsigned IdxVal = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
45706
45707  if (ISD::isBuildVectorAllZeros(InVec.getNode()))
45708    return getZeroVector(VT, Subtarget, DAG, SDLoc(N));
45709
45710  if (ISD::isBuildVectorAllOnes(InVec.getNode())) {
45711    if (VT.getScalarType() == MVT::i1)
45712      return DAG.getConstant(1, SDLoc(N), VT);
45713    return getOnesVector(VT, DAG, SDLoc(N));
45714  }
45715
45716  if (InVec.getOpcode() == ISD::BUILD_VECTOR)
45717    return DAG.getBuildVector(
45718        VT, SDLoc(N),
45719        InVec.getNode()->ops().slice(IdxVal, VT.getVectorNumElements()));
45720
45721  // If we are extracting from an insert into a zero vector, replace with a
45722  // smaller insert into zero if we don't access less than the original
45723  // subvector. Don't do this for i1 vectors.
45724  if (VT.getVectorElementType() != MVT::i1 &&
45725      InVec.getOpcode() == ISD::INSERT_SUBVECTOR && IdxVal == 0 &&
45726      InVec.hasOneUse() && isNullConstant(InVec.getOperand(2)) &&
45727      ISD::isBuildVectorAllZeros(InVec.getOperand(0).getNode()) &&
45728      InVec.getOperand(1).getValueSizeInBits() <= VT.getSizeInBits()) {
45729    SDLoc DL(N);
45730    return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
45731                       getZeroVector(VT, Subtarget, DAG, DL),
45732                       InVec.getOperand(1), InVec.getOperand(2));
45733  }
45734
45735  // If we're extracting from a broadcast then we're better off just
45736  // broadcasting to the smaller type directly, assuming this is the only use.
45737  // As its a broadcast we don't care about the extraction index.
45738  if (InVec.getOpcode() == X86ISD::VBROADCAST && InVec.hasOneUse() &&
45739      InVec.getOperand(0).getValueSizeInBits() <= VT.getSizeInBits())
45740    return DAG.getNode(X86ISD::VBROADCAST, SDLoc(N), VT, InVec.getOperand(0));
45741
45742  if (InVec.getOpcode() == X86ISD::VBROADCAST_LOAD && InVec.hasOneUse()) {
45743    auto *MemIntr = cast<MemIntrinsicSDNode>(InVec);
45744    if (MemIntr->getMemoryVT().getSizeInBits() <= VT.getSizeInBits()) {
45745      SDVTList Tys = DAG.getVTList(VT, MVT::Other);
45746      SDValue Ops[] = { MemIntr->getChain(), MemIntr->getBasePtr() };
45747      SDValue BcastLd =
45748          DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, SDLoc(N), Tys, Ops,
45749                                  MemIntr->getMemoryVT(),
45750                                  MemIntr->getMemOperand());
45751      DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), BcastLd.getValue(1));
45752      return BcastLd;
45753    }
45754  }
45755
45756  // If we're extracting the lowest subvector and we're the only user,
45757  // we may be able to perform this with a smaller vector width.
45758  if (IdxVal == 0 && InVec.hasOneUse()) {
45759    unsigned InOpcode = InVec.getOpcode();
45760    if (VT == MVT::v2f64 && InVecVT == MVT::v4f64) {
45761      // v2f64 CVTDQ2PD(v4i32).
45762      if (InOpcode == ISD::SINT_TO_FP &&
45763          InVec.getOperand(0).getValueType() == MVT::v4i32) {
45764        return DAG.getNode(X86ISD::CVTSI2P, SDLoc(N), VT, InVec.getOperand(0));
45765      }
45766      // v2f64 CVTUDQ2PD(v4i32).
45767      if (InOpcode == ISD::UINT_TO_FP && Subtarget.hasVLX() &&
45768          InVec.getOperand(0).getValueType() == MVT::v4i32) {
45769        return DAG.getNode(X86ISD::CVTUI2P, SDLoc(N), VT, InVec.getOperand(0));
45770      }
45771      // v2f64 CVTPS2PD(v4f32).
45772      if (InOpcode == ISD::FP_EXTEND &&
45773          InVec.getOperand(0).getValueType() == MVT::v4f32) {
45774        return DAG.getNode(X86ISD::VFPEXT, SDLoc(N), VT, InVec.getOperand(0));
45775      }
45776    }
45777    if ((InOpcode == ISD::ANY_EXTEND ||
45778         InOpcode == ISD::ANY_EXTEND_VECTOR_INREG ||
45779         InOpcode == ISD::ZERO_EXTEND ||
45780         InOpcode == ISD::ZERO_EXTEND_VECTOR_INREG ||
45781         InOpcode == ISD::SIGN_EXTEND ||
45782         InOpcode == ISD::SIGN_EXTEND_VECTOR_INREG) &&
45783        VT.is128BitVector() &&
45784        InVec.getOperand(0).getSimpleValueType().is128BitVector()) {
45785      unsigned ExtOp = getOpcode_EXTEND_VECTOR_INREG(InOpcode);
45786      return DAG.getNode(ExtOp, SDLoc(N), VT, InVec.getOperand(0));
45787    }
45788    if (InOpcode == ISD::VSELECT &&
45789        InVec.getOperand(0).getValueType().is256BitVector() &&
45790        InVec.getOperand(1).getValueType().is256BitVector() &&
45791        InVec.getOperand(2).getValueType().is256BitVector()) {
45792      SDLoc DL(N);
45793      SDValue Ext0 = extractSubVector(InVec.getOperand(0), 0, DAG, DL, 128);
45794      SDValue Ext1 = extractSubVector(InVec.getOperand(1), 0, DAG, DL, 128);
45795      SDValue Ext2 = extractSubVector(InVec.getOperand(2), 0, DAG, DL, 128);
45796      return DAG.getNode(InOpcode, DL, VT, Ext0, Ext1, Ext2);
45797    }
45798  }
45799
45800  return SDValue();
45801}
45802
45803static SDValue combineScalarToVector(SDNode *N, SelectionDAG &DAG) {
45804  EVT VT = N->getValueType(0);
45805  SDValue Src = N->getOperand(0);
45806  SDLoc DL(N);
45807
45808  // If this is a scalar to vector to v1i1 from an AND with 1, bypass the and.
45809  // This occurs frequently in our masked scalar intrinsic code and our
45810  // floating point select lowering with AVX512.
45811  // TODO: SimplifyDemandedBits instead?
45812  if (VT == MVT::v1i1 && Src.getOpcode() == ISD::AND && Src.hasOneUse())
45813    if (auto *C = dyn_cast<ConstantSDNode>(Src.getOperand(1)))
45814      if (C->getAPIntValue().isOneValue())
45815        return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1,
45816                           Src.getOperand(0));
45817
45818  // Combine scalar_to_vector of an extract_vector_elt into an extract_subvec.
45819  if (VT == MVT::v1i1 && Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
45820      Src.hasOneUse() && Src.getOperand(0).getValueType().isVector() &&
45821      Src.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
45822    if (auto *C = dyn_cast<ConstantSDNode>(Src.getOperand(1)))
45823      if (C->isNullValue())
45824        return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src.getOperand(0),
45825                           Src.getOperand(1));
45826
45827  // Reduce v2i64 to v4i32 if we don't need the upper bits.
45828  // TODO: Move to DAGCombine?
45829  if (VT == MVT::v2i64 && Src.getOpcode() == ISD::ANY_EXTEND &&
45830      Src.getValueType() == MVT::i64 && Src.hasOneUse() &&
45831      Src.getOperand(0).getScalarValueSizeInBits() <= 32)
45832    return DAG.getBitcast(
45833        VT, DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4i32,
45834                        DAG.getAnyExtOrTrunc(Src.getOperand(0), DL, MVT::i32)));
45835
45836  return SDValue();
45837}
45838
45839// Simplify PMULDQ and PMULUDQ operations.
45840static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG,
45841                             TargetLowering::DAGCombinerInfo &DCI,
45842                             const X86Subtarget &Subtarget) {
45843  SDValue LHS = N->getOperand(0);
45844  SDValue RHS = N->getOperand(1);
45845
45846  // Canonicalize constant to RHS.
45847  if (DAG.isConstantIntBuildVectorOrConstantInt(LHS) &&
45848      !DAG.isConstantIntBuildVectorOrConstantInt(RHS))
45849    return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), RHS, LHS);
45850
45851  // Multiply by zero.
45852  // Don't return RHS as it may contain UNDEFs.
45853  if (ISD::isBuildVectorAllZeros(RHS.getNode()))
45854    return DAG.getConstant(0, SDLoc(N), N->getValueType(0));
45855
45856  // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
45857  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45858  if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnesValue(64), DCI))
45859    return SDValue(N, 0);
45860
45861  // If the input is an extend_invec and the SimplifyDemandedBits call didn't
45862  // convert it to any_extend_invec, due to the LegalOperations check, do the
45863  // conversion directly to a vector shuffle manually. This exposes combine
45864  // opportunities missed by combineExtInVec not calling
45865  // combineX86ShufflesRecursively on SSE4.1 targets.
45866  // FIXME: This is basically a hack around several other issues related to
45867  // ANY_EXTEND_VECTOR_INREG.
45868  if (N->getValueType(0) == MVT::v2i64 && LHS.hasOneUse() &&
45869      (LHS.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG ||
45870       LHS.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG) &&
45871      LHS.getOperand(0).getValueType() == MVT::v4i32) {
45872    SDLoc dl(N);
45873    LHS = DAG.getVectorShuffle(MVT::v4i32, dl, LHS.getOperand(0),
45874                               LHS.getOperand(0), { 0, -1, 1, -1 });
45875    LHS = DAG.getBitcast(MVT::v2i64, LHS);
45876    return DAG.getNode(N->getOpcode(), dl, MVT::v2i64, LHS, RHS);
45877  }
45878  if (N->getValueType(0) == MVT::v2i64 && RHS.hasOneUse() &&
45879      (RHS.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG ||
45880       RHS.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG) &&
45881      RHS.getOperand(0).getValueType() == MVT::v4i32) {
45882    SDLoc dl(N);
45883    RHS = DAG.getVectorShuffle(MVT::v4i32, dl, RHS.getOperand(0),
45884                               RHS.getOperand(0), { 0, -1, 1, -1 });
45885    RHS = DAG.getBitcast(MVT::v2i64, RHS);
45886    return DAG.getNode(N->getOpcode(), dl, MVT::v2i64, LHS, RHS);
45887  }
45888
45889  return SDValue();
45890}
45891
45892static SDValue combineExtInVec(SDNode *N, SelectionDAG &DAG,
45893                               TargetLowering::DAGCombinerInfo &DCI,
45894                               const X86Subtarget &Subtarget) {
45895  EVT VT = N->getValueType(0);
45896  SDValue In = N->getOperand(0);
45897  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45898
45899  // Try to merge vector loads and extend_inreg to an extload.
45900  if (!DCI.isBeforeLegalizeOps() && ISD::isNormalLoad(In.getNode()) &&
45901      In.hasOneUse()) {
45902    auto *Ld = cast<LoadSDNode>(In);
45903    if (Ld->isSimple()) {
45904      MVT SVT = In.getSimpleValueType().getVectorElementType();
45905      ISD::LoadExtType Ext = N->getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG ? ISD::SEXTLOAD : ISD::ZEXTLOAD;
45906      EVT MemVT = EVT::getVectorVT(*DAG.getContext(), SVT,
45907                                   VT.getVectorNumElements());
45908      if (TLI.isLoadExtLegal(Ext, VT, MemVT)) {
45909        SDValue Load =
45910            DAG.getExtLoad(Ext, SDLoc(N), VT, Ld->getChain(), Ld->getBasePtr(),
45911                           Ld->getPointerInfo(), MemVT, Ld->getAlignment(),
45912                           Ld->getMemOperand()->getFlags());
45913        DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
45914        return Load;
45915      }
45916    }
45917  }
45918
45919  // Attempt to combine as a shuffle.
45920  // TODO: SSE41 support
45921  if (Subtarget.hasAVX() && N->getOpcode() != ISD::SIGN_EXTEND_VECTOR_INREG) {
45922    SDValue Op(N, 0);
45923    if (TLI.isTypeLegal(VT) && TLI.isTypeLegal(In.getValueType()))
45924      if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
45925        return Res;
45926  }
45927
45928  return SDValue();
45929}
45930
45931static SDValue combineKSHIFT(SDNode *N, SelectionDAG &DAG,
45932                             TargetLowering::DAGCombinerInfo &DCI) {
45933  EVT VT = N->getValueType(0);
45934
45935  if (ISD::isBuildVectorAllZeros(N->getOperand(0).getNode()))
45936    return DAG.getConstant(0, SDLoc(N), VT);
45937
45938  APInt KnownUndef, KnownZero;
45939  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45940  APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
45941  if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
45942                                     KnownZero, DCI))
45943    return SDValue(N, 0);
45944
45945  return SDValue();
45946}
45947
45948SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
45949                                             DAGCombinerInfo &DCI) const {
45950  SelectionDAG &DAG = DCI.DAG;
45951  switch (N->getOpcode()) {
45952  default: break;
45953  case ISD::SCALAR_TO_VECTOR:
45954    return combineScalarToVector(N, DAG);
45955  case ISD::EXTRACT_VECTOR_ELT:
45956  case X86ISD::PEXTRW:
45957  case X86ISD::PEXTRB:
45958    return combineExtractVectorElt(N, DAG, DCI, Subtarget);
45959  case ISD::CONCAT_VECTORS:
45960    return combineConcatVectors(N, DAG, DCI, Subtarget);
45961  case ISD::INSERT_SUBVECTOR:
45962    return combineInsertSubvector(N, DAG, DCI, Subtarget);
45963  case ISD::EXTRACT_SUBVECTOR:
45964    return combineExtractSubvector(N, DAG, DCI, Subtarget);
45965  case ISD::VSELECT:
45966  case ISD::SELECT:
45967  case X86ISD::BLENDV:      return combineSelect(N, DAG, DCI, Subtarget);
45968  case ISD::BITCAST:        return combineBitcast(N, DAG, DCI, Subtarget);
45969  case X86ISD::CMOV:        return combineCMov(N, DAG, DCI, Subtarget);
45970  case X86ISD::CMP:         return combineCMP(N, DAG);
45971  case ISD::ADD:            return combineAdd(N, DAG, DCI, Subtarget);
45972  case ISD::SUB:            return combineSub(N, DAG, DCI, Subtarget);
45973  case X86ISD::ADD:
45974  case X86ISD::SUB:         return combineX86AddSub(N, DAG, DCI);
45975  case X86ISD::SBB:         return combineSBB(N, DAG);
45976  case X86ISD::ADC:         return combineADC(N, DAG, DCI);
45977  case ISD::MUL:            return combineMul(N, DAG, DCI, Subtarget);
45978  case ISD::SHL:            return combineShiftLeft(N, DAG);
45979  case ISD::SRA:            return combineShiftRightArithmetic(N, DAG);
45980  case ISD::SRL:            return combineShiftRightLogical(N, DAG, DCI);
45981  case ISD::AND:            return combineAnd(N, DAG, DCI, Subtarget);
45982  case ISD::OR:             return combineOr(N, DAG, DCI, Subtarget);
45983  case ISD::XOR:            return combineXor(N, DAG, DCI, Subtarget);
45984  case X86ISD::BEXTR:       return combineBEXTR(N, DAG, DCI, Subtarget);
45985  case ISD::LOAD:           return combineLoad(N, DAG, DCI, Subtarget);
45986  case ISD::MLOAD:          return combineMaskedLoad(N, DAG, DCI, Subtarget);
45987  case ISD::STORE:          return combineStore(N, DAG, DCI, Subtarget);
45988  case ISD::MSTORE:         return combineMaskedStore(N, DAG, DCI, Subtarget);
45989  case ISD::SINT_TO_FP:
45990  case ISD::STRICT_SINT_TO_FP:
45991    return combineSIntToFP(N, DAG, DCI, Subtarget);
45992  case ISD::UINT_TO_FP:
45993  case ISD::STRICT_UINT_TO_FP:
45994    return combineUIntToFP(N, DAG, Subtarget);
45995  case ISD::FADD:
45996  case ISD::FSUB:           return combineFaddFsub(N, DAG, Subtarget);
45997  case ISD::FNEG:           return combineFneg(N, DAG, Subtarget);
45998  case ISD::TRUNCATE:       return combineTruncate(N, DAG, Subtarget);
45999  case X86ISD::VTRUNC:      return combineVTRUNC(N, DAG);
46000  case X86ISD::ANDNP:       return combineAndnp(N, DAG, DCI, Subtarget);
46001  case X86ISD::FAND:        return combineFAnd(N, DAG, Subtarget);
46002  case X86ISD::FANDN:       return combineFAndn(N, DAG, Subtarget);
46003  case X86ISD::FXOR:
46004  case X86ISD::FOR:         return combineFOr(N, DAG, Subtarget);
46005  case X86ISD::FMIN:
46006  case X86ISD::FMAX:        return combineFMinFMax(N, DAG);
46007  case ISD::FMINNUM:
46008  case ISD::FMAXNUM:        return combineFMinNumFMaxNum(N, DAG, Subtarget);
46009  case X86ISD::CVTSI2P:
46010  case X86ISD::CVTUI2P:     return combineX86INT_TO_FP(N, DAG, DCI);
46011  case X86ISD::CVTP2SI:
46012  case X86ISD::CVTP2UI:
46013  case X86ISD::CVTTP2SI:
46014  case X86ISD::CVTTP2UI:    return combineCVTP2I_CVTTP2I(N, DAG, DCI);
46015  case X86ISD::BT:          return combineBT(N, DAG, DCI);
46016  case ISD::ANY_EXTEND:
46017  case ISD::ZERO_EXTEND:    return combineZext(N, DAG, DCI, Subtarget);
46018  case ISD::SIGN_EXTEND:    return combineSext(N, DAG, DCI, Subtarget);
46019  case ISD::SIGN_EXTEND_INREG: return combineSignExtendInReg(N, DAG, Subtarget);
46020  case ISD::ANY_EXTEND_VECTOR_INREG:
46021  case ISD::SIGN_EXTEND_VECTOR_INREG:
46022  case ISD::ZERO_EXTEND_VECTOR_INREG: return combineExtInVec(N, DAG, DCI,
46023                                                             Subtarget);
46024  case ISD::SETCC:          return combineSetCC(N, DAG, Subtarget);
46025  case X86ISD::SETCC:       return combineX86SetCC(N, DAG, Subtarget);
46026  case X86ISD::BRCOND:      return combineBrCond(N, DAG, Subtarget);
46027  case X86ISD::PACKSS:
46028  case X86ISD::PACKUS:      return combineVectorPack(N, DAG, DCI, Subtarget);
46029  case X86ISD::VSHL:
46030  case X86ISD::VSRA:
46031  case X86ISD::VSRL:
46032    return combineVectorShiftVar(N, DAG, DCI, Subtarget);
46033  case X86ISD::VSHLI:
46034  case X86ISD::VSRAI:
46035  case X86ISD::VSRLI:
46036    return combineVectorShiftImm(N, DAG, DCI, Subtarget);
46037  case X86ISD::PINSRB:
46038  case X86ISD::PINSRW:      return combineVectorInsert(N, DAG, DCI, Subtarget);
46039  case X86ISD::SHUFP:       // Handle all target specific shuffles
46040  case X86ISD::INSERTPS:
46041  case X86ISD::EXTRQI:
46042  case X86ISD::INSERTQI:
46043  case X86ISD::PALIGNR:
46044  case X86ISD::VSHLDQ:
46045  case X86ISD::VSRLDQ:
46046  case X86ISD::BLENDI:
46047  case X86ISD::UNPCKH:
46048  case X86ISD::UNPCKL:
46049  case X86ISD::MOVHLPS:
46050  case X86ISD::MOVLHPS:
46051  case X86ISD::PSHUFB:
46052  case X86ISD::PSHUFD:
46053  case X86ISD::PSHUFHW:
46054  case X86ISD::PSHUFLW:
46055  case X86ISD::MOVSHDUP:
46056  case X86ISD::MOVSLDUP:
46057  case X86ISD::MOVDDUP:
46058  case X86ISD::MOVSS:
46059  case X86ISD::MOVSD:
46060  case X86ISD::VBROADCAST:
46061  case X86ISD::VPPERM:
46062  case X86ISD::VPERMI:
46063  case X86ISD::VPERMV:
46064  case X86ISD::VPERMV3:
46065  case X86ISD::VPERMIL2:
46066  case X86ISD::VPERMILPI:
46067  case X86ISD::VPERMILPV:
46068  case X86ISD::VPERM2X128:
46069  case X86ISD::SHUF128:
46070  case X86ISD::VZEXT_MOVL:
46071  case ISD::VECTOR_SHUFFLE: return combineShuffle(N, DAG, DCI,Subtarget);
46072  case X86ISD::FMADD_RND:
46073  case X86ISD::FMSUB:
46074  case X86ISD::FMSUB_RND:
46075  case X86ISD::FNMADD:
46076  case X86ISD::FNMADD_RND:
46077  case X86ISD::FNMSUB:
46078  case X86ISD::FNMSUB_RND:
46079  case ISD::FMA: return combineFMA(N, DAG, DCI, Subtarget);
46080  case X86ISD::FMADDSUB_RND:
46081  case X86ISD::FMSUBADD_RND:
46082  case X86ISD::FMADDSUB:
46083  case X86ISD::FMSUBADD:    return combineFMADDSUB(N, DAG, DCI);
46084  case X86ISD::MOVMSK:      return combineMOVMSK(N, DAG, DCI, Subtarget);
46085  case X86ISD::MGATHER:
46086  case X86ISD::MSCATTER:    return combineX86GatherScatter(N, DAG, DCI);
46087  case ISD::MGATHER:
46088  case ISD::MSCATTER:       return combineGatherScatter(N, DAG, DCI);
46089  case X86ISD::PCMPEQ:
46090  case X86ISD::PCMPGT:      return combineVectorCompare(N, DAG, Subtarget);
46091  case X86ISD::PMULDQ:
46092  case X86ISD::PMULUDQ:     return combinePMULDQ(N, DAG, DCI, Subtarget);
46093  case X86ISD::KSHIFTL:
46094  case X86ISD::KSHIFTR:     return combineKSHIFT(N, DAG, DCI);
46095  }
46096
46097  return SDValue();
46098}
46099
46100bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
46101  if (!isTypeLegal(VT))
46102    return false;
46103
46104  // There are no vXi8 shifts.
46105  if (Opc == ISD::SHL && VT.isVector() && VT.getVectorElementType() == MVT::i8)
46106    return false;
46107
46108  // TODO: Almost no 8-bit ops are desirable because they have no actual
46109  //       size/speed advantages vs. 32-bit ops, but they do have a major
46110  //       potential disadvantage by causing partial register stalls.
46111  //
46112  // 8-bit multiply/shl is probably not cheaper than 32-bit multiply/shl, and
46113  // we have specializations to turn 32-bit multiply/shl into LEA or other ops.
46114  // Also, see the comment in "IsDesirableToPromoteOp" - where we additionally
46115  // check for a constant operand to the multiply.
46116  if ((Opc == ISD::MUL || Opc == ISD::SHL) && VT == MVT::i8)
46117    return false;
46118
46119  // i16 instruction encodings are longer and some i16 instructions are slow,
46120  // so those are not desirable.
46121  if (VT == MVT::i16) {
46122    switch (Opc) {
46123    default:
46124      break;
46125    case ISD::LOAD:
46126    case ISD::SIGN_EXTEND:
46127    case ISD::ZERO_EXTEND:
46128    case ISD::ANY_EXTEND:
46129    case ISD::SHL:
46130    case ISD::SRA:
46131    case ISD::SRL:
46132    case ISD::SUB:
46133    case ISD::ADD:
46134    case ISD::MUL:
46135    case ISD::AND:
46136    case ISD::OR:
46137    case ISD::XOR:
46138      return false;
46139    }
46140  }
46141
46142  // Any legal type not explicitly accounted for above here is desirable.
46143  return true;
46144}
46145
46146SDValue X86TargetLowering::expandIndirectJTBranch(const SDLoc& dl,
46147                                                  SDValue Value, SDValue Addr,
46148                                                  SelectionDAG &DAG) const {
46149  const Module *M = DAG.getMachineFunction().getMMI().getModule();
46150  Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
46151  if (IsCFProtectionSupported) {
46152    // In case control-flow branch protection is enabled, we need to add
46153    // notrack prefix to the indirect branch.
46154    // In order to do that we create NT_BRIND SDNode.
46155    // Upon ISEL, the pattern will convert it to jmp with NoTrack prefix.
46156    return DAG.getNode(X86ISD::NT_BRIND, dl, MVT::Other, Value, Addr);
46157  }
46158
46159  return TargetLowering::expandIndirectJTBranch(dl, Value, Addr, DAG);
46160}
46161
46162bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
46163  EVT VT = Op.getValueType();
46164  bool Is8BitMulByConstant = VT == MVT::i8 && Op.getOpcode() == ISD::MUL &&
46165                             isa<ConstantSDNode>(Op.getOperand(1));
46166
46167  // i16 is legal, but undesirable since i16 instruction encodings are longer
46168  // and some i16 instructions are slow.
46169  // 8-bit multiply-by-constant can usually be expanded to something cheaper
46170  // using LEA and/or other ALU ops.
46171  if (VT != MVT::i16 && !Is8BitMulByConstant)
46172    return false;
46173
46174  auto IsFoldableRMW = [](SDValue Load, SDValue Op) {
46175    if (!Op.hasOneUse())
46176      return false;
46177    SDNode *User = *Op->use_begin();
46178    if (!ISD::isNormalStore(User))
46179      return false;
46180    auto *Ld = cast<LoadSDNode>(Load);
46181    auto *St = cast<StoreSDNode>(User);
46182    return Ld->getBasePtr() == St->getBasePtr();
46183  };
46184
46185  auto IsFoldableAtomicRMW = [](SDValue Load, SDValue Op) {
46186    if (!Load.hasOneUse() || Load.getOpcode() != ISD::ATOMIC_LOAD)
46187      return false;
46188    if (!Op.hasOneUse())
46189      return false;
46190    SDNode *User = *Op->use_begin();
46191    if (User->getOpcode() != ISD::ATOMIC_STORE)
46192      return false;
46193    auto *Ld = cast<AtomicSDNode>(Load);
46194    auto *St = cast<AtomicSDNode>(User);
46195    return Ld->getBasePtr() == St->getBasePtr();
46196  };
46197
46198  bool Commute = false;
46199  switch (Op.getOpcode()) {
46200  default: return false;
46201  case ISD::SIGN_EXTEND:
46202  case ISD::ZERO_EXTEND:
46203  case ISD::ANY_EXTEND:
46204    break;
46205  case ISD::SHL:
46206  case ISD::SRA:
46207  case ISD::SRL: {
46208    SDValue N0 = Op.getOperand(0);
46209    // Look out for (store (shl (load), x)).
46210    if (MayFoldLoad(N0) && IsFoldableRMW(N0, Op))
46211      return false;
46212    break;
46213  }
46214  case ISD::ADD:
46215  case ISD::MUL:
46216  case ISD::AND:
46217  case ISD::OR:
46218  case ISD::XOR:
46219    Commute = true;
46220    LLVM_FALLTHROUGH;
46221  case ISD::SUB: {
46222    SDValue N0 = Op.getOperand(0);
46223    SDValue N1 = Op.getOperand(1);
46224    // Avoid disabling potential load folding opportunities.
46225    if (MayFoldLoad(N1) &&
46226        (!Commute || !isa<ConstantSDNode>(N0) ||
46227         (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N1, Op))))
46228      return false;
46229    if (MayFoldLoad(N0) &&
46230        ((Commute && !isa<ConstantSDNode>(N1)) ||
46231         (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N0, Op))))
46232      return false;
46233    if (IsFoldableAtomicRMW(N0, Op) ||
46234        (Commute && IsFoldableAtomicRMW(N1, Op)))
46235      return false;
46236  }
46237  }
46238
46239  PVT = MVT::i32;
46240  return true;
46241}
46242
46243bool X86TargetLowering::
46244    isDesirableToCombineBuildVectorToShuffleTruncate(
46245        ArrayRef<int> ShuffleMask, EVT SrcVT, EVT TruncVT) const {
46246
46247  assert(SrcVT.getVectorNumElements() == ShuffleMask.size() &&
46248         "Element count mismatch");
46249  assert(
46250      Subtarget.getTargetLowering()->isShuffleMaskLegal(ShuffleMask, SrcVT) &&
46251      "Shuffle Mask expected to be legal");
46252
46253  // For 32-bit elements VPERMD is better than shuffle+truncate.
46254  // TODO: After we improve lowerBuildVector, add execption for VPERMW.
46255  if (SrcVT.getScalarSizeInBits() == 32 || !Subtarget.hasAVX2())
46256    return false;
46257
46258  if (is128BitLaneCrossingShuffleMask(SrcVT.getSimpleVT(), ShuffleMask))
46259    return false;
46260
46261  return true;
46262}
46263
46264//===----------------------------------------------------------------------===//
46265//                           X86 Inline Assembly Support
46266//===----------------------------------------------------------------------===//
46267
46268// Helper to match a string separated by whitespace.
46269static bool matchAsm(StringRef S, ArrayRef<const char *> Pieces) {
46270  S = S.substr(S.find_first_not_of(" \t")); // Skip leading whitespace.
46271
46272  for (StringRef Piece : Pieces) {
46273    if (!S.startswith(Piece)) // Check if the piece matches.
46274      return false;
46275
46276    S = S.substr(Piece.size());
46277    StringRef::size_type Pos = S.find_first_not_of(" \t");
46278    if (Pos == 0) // We matched a prefix.
46279      return false;
46280
46281    S = S.substr(Pos);
46282  }
46283
46284  return S.empty();
46285}
46286
46287static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
46288
46289  if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
46290    if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
46291        std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
46292        std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
46293
46294      if (AsmPieces.size() == 3)
46295        return true;
46296      else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
46297        return true;
46298    }
46299  }
46300  return false;
46301}
46302
46303bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
46304  InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
46305
46306  const std::string &AsmStr = IA->getAsmString();
46307
46308  IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
46309  if (!Ty || Ty->getBitWidth() % 16 != 0)
46310    return false;
46311
46312  // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
46313  SmallVector<StringRef, 4> AsmPieces;
46314  SplitString(AsmStr, AsmPieces, ";\n");
46315
46316  switch (AsmPieces.size()) {
46317  default: return false;
46318  case 1:
46319    // FIXME: this should verify that we are targeting a 486 or better.  If not,
46320    // we will turn this bswap into something that will be lowered to logical
46321    // ops instead of emitting the bswap asm.  For now, we don't support 486 or
46322    // lower so don't worry about this.
46323    // bswap $0
46324    if (matchAsm(AsmPieces[0], {"bswap", "$0"}) ||
46325        matchAsm(AsmPieces[0], {"bswapl", "$0"}) ||
46326        matchAsm(AsmPieces[0], {"bswapq", "$0"}) ||
46327        matchAsm(AsmPieces[0], {"bswap", "${0:q}"}) ||
46328        matchAsm(AsmPieces[0], {"bswapl", "${0:q}"}) ||
46329        matchAsm(AsmPieces[0], {"bswapq", "${0:q}"})) {
46330      // No need to check constraints, nothing other than the equivalent of
46331      // "=r,0" would be valid here.
46332      return IntrinsicLowering::LowerToByteSwap(CI);
46333    }
46334
46335    // rorw $$8, ${0:w}  -->  llvm.bswap.i16
46336    if (CI->getType()->isIntegerTy(16) &&
46337        IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
46338        (matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) ||
46339         matchAsm(AsmPieces[0], {"rolw", "$$8,", "${0:w}"}))) {
46340      AsmPieces.clear();
46341      StringRef ConstraintsStr = IA->getConstraintString();
46342      SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
46343      array_pod_sort(AsmPieces.begin(), AsmPieces.end());
46344      if (clobbersFlagRegisters(AsmPieces))
46345        return IntrinsicLowering::LowerToByteSwap(CI);
46346    }
46347    break;
46348  case 3:
46349    if (CI->getType()->isIntegerTy(32) &&
46350        IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
46351        matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) &&
46352        matchAsm(AsmPieces[1], {"rorl", "$$16,", "$0"}) &&
46353        matchAsm(AsmPieces[2], {"rorw", "$$8,", "${0:w}"})) {
46354      AsmPieces.clear();
46355      StringRef ConstraintsStr = IA->getConstraintString();
46356      SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
46357      array_pod_sort(AsmPieces.begin(), AsmPieces.end());
46358      if (clobbersFlagRegisters(AsmPieces))
46359        return IntrinsicLowering::LowerToByteSwap(CI);
46360    }
46361
46362    if (CI->getType()->isIntegerTy(64)) {
46363      InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
46364      if (Constraints.size() >= 2 &&
46365          Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
46366          Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
46367        // bswap %eax / bswap %edx / xchgl %eax, %edx  -> llvm.bswap.i64
46368        if (matchAsm(AsmPieces[0], {"bswap", "%eax"}) &&
46369            matchAsm(AsmPieces[1], {"bswap", "%edx"}) &&
46370            matchAsm(AsmPieces[2], {"xchgl", "%eax,", "%edx"}))
46371          return IntrinsicLowering::LowerToByteSwap(CI);
46372      }
46373    }
46374    break;
46375  }
46376  return false;
46377}
46378
46379static X86::CondCode parseConstraintCode(llvm::StringRef Constraint) {
46380  X86::CondCode Cond = StringSwitch<X86::CondCode>(Constraint)
46381                           .Case("{@cca}", X86::COND_A)
46382                           .Case("{@ccae}", X86::COND_AE)
46383                           .Case("{@ccb}", X86::COND_B)
46384                           .Case("{@ccbe}", X86::COND_BE)
46385                           .Case("{@ccc}", X86::COND_B)
46386                           .Case("{@cce}", X86::COND_E)
46387                           .Case("{@ccz}", X86::COND_E)
46388                           .Case("{@ccg}", X86::COND_G)
46389                           .Case("{@ccge}", X86::COND_GE)
46390                           .Case("{@ccl}", X86::COND_L)
46391                           .Case("{@ccle}", X86::COND_LE)
46392                           .Case("{@ccna}", X86::COND_BE)
46393                           .Case("{@ccnae}", X86::COND_B)
46394                           .Case("{@ccnb}", X86::COND_AE)
46395                           .Case("{@ccnbe}", X86::COND_A)
46396                           .Case("{@ccnc}", X86::COND_AE)
46397                           .Case("{@ccne}", X86::COND_NE)
46398                           .Case("{@ccnz}", X86::COND_NE)
46399                           .Case("{@ccng}", X86::COND_LE)
46400                           .Case("{@ccnge}", X86::COND_L)
46401                           .Case("{@ccnl}", X86::COND_GE)
46402                           .Case("{@ccnle}", X86::COND_G)
46403                           .Case("{@ccno}", X86::COND_NO)
46404                           .Case("{@ccnp}", X86::COND_P)
46405                           .Case("{@ccns}", X86::COND_NS)
46406                           .Case("{@cco}", X86::COND_O)
46407                           .Case("{@ccp}", X86::COND_P)
46408                           .Case("{@ccs}", X86::COND_S)
46409                           .Default(X86::COND_INVALID);
46410  return Cond;
46411}
46412
46413/// Given a constraint letter, return the type of constraint for this target.
46414X86TargetLowering::ConstraintType
46415X86TargetLowering::getConstraintType(StringRef Constraint) const {
46416  if (Constraint.size() == 1) {
46417    switch (Constraint[0]) {
46418    case 'R':
46419    case 'q':
46420    case 'Q':
46421    case 'f':
46422    case 't':
46423    case 'u':
46424    case 'y':
46425    case 'x':
46426    case 'v':
46427    case 'Y':
46428    case 'l':
46429    case 'k': // AVX512 masking registers.
46430      return C_RegisterClass;
46431    case 'a':
46432    case 'b':
46433    case 'c':
46434    case 'd':
46435    case 'S':
46436    case 'D':
46437    case 'A':
46438      return C_Register;
46439    case 'I':
46440    case 'J':
46441    case 'K':
46442    case 'N':
46443    case 'G':
46444    case 'L':
46445    case 'M':
46446      return C_Immediate;
46447    case 'C':
46448    case 'e':
46449    case 'Z':
46450      return C_Other;
46451    default:
46452      break;
46453    }
46454  }
46455  else if (Constraint.size() == 2) {
46456    switch (Constraint[0]) {
46457    default:
46458      break;
46459    case 'Y':
46460      switch (Constraint[1]) {
46461      default:
46462        break;
46463      case 'z':
46464      case '0':
46465        return C_Register;
46466      case 'i':
46467      case 'm':
46468      case 'k':
46469      case 't':
46470      case '2':
46471        return C_RegisterClass;
46472      }
46473    }
46474  } else if (parseConstraintCode(Constraint) != X86::COND_INVALID)
46475    return C_Other;
46476  return TargetLowering::getConstraintType(Constraint);
46477}
46478
46479/// Examine constraint type and operand type and determine a weight value.
46480/// This object must already have been set up with the operand type
46481/// and the current alternative constraint selected.
46482TargetLowering::ConstraintWeight
46483  X86TargetLowering::getSingleConstraintMatchWeight(
46484    AsmOperandInfo &info, const char *constraint) const {
46485  ConstraintWeight weight = CW_Invalid;
46486  Value *CallOperandVal = info.CallOperandVal;
46487    // If we don't have a value, we can't do a match,
46488    // but allow it at the lowest weight.
46489  if (!CallOperandVal)
46490    return CW_Default;
46491  Type *type = CallOperandVal->getType();
46492  // Look at the constraint type.
46493  switch (*constraint) {
46494  default:
46495    weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
46496    LLVM_FALLTHROUGH;
46497  case 'R':
46498  case 'q':
46499  case 'Q':
46500  case 'a':
46501  case 'b':
46502  case 'c':
46503  case 'd':
46504  case 'S':
46505  case 'D':
46506  case 'A':
46507    if (CallOperandVal->getType()->isIntegerTy())
46508      weight = CW_SpecificReg;
46509    break;
46510  case 'f':
46511  case 't':
46512  case 'u':
46513    if (type->isFloatingPointTy())
46514      weight = CW_SpecificReg;
46515    break;
46516  case 'y':
46517    if (type->isX86_MMXTy() && Subtarget.hasMMX())
46518      weight = CW_SpecificReg;
46519    break;
46520  case 'Y': {
46521    unsigned Size = StringRef(constraint).size();
46522    // Pick 'i' as the next char as 'Yi' and 'Y' are synonymous, when matching 'Y'
46523    char NextChar = Size == 2 ? constraint[1] : 'i';
46524    if (Size > 2)
46525      break;
46526    switch (NextChar) {
46527      default:
46528        return CW_Invalid;
46529      // XMM0
46530      case 'z':
46531      case '0':
46532        if ((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1())
46533          return CW_SpecificReg;
46534        return CW_Invalid;
46535      // Conditional OpMask regs (AVX512)
46536      case 'k':
46537        if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
46538          return CW_Register;
46539        return CW_Invalid;
46540      // Any MMX reg
46541      case 'm':
46542        if (type->isX86_MMXTy() && Subtarget.hasMMX())
46543          return weight;
46544        return CW_Invalid;
46545      // Any SSE reg when ISA >= SSE2, same as 'Y'
46546      case 'i':
46547      case 't':
46548      case '2':
46549        if (!Subtarget.hasSSE2())
46550          return CW_Invalid;
46551        break;
46552    }
46553    // Fall through (handle "Y" constraint).
46554    LLVM_FALLTHROUGH;
46555  }
46556  case 'v':
46557    if ((type->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512())
46558      weight = CW_Register;
46559    LLVM_FALLTHROUGH;
46560  case 'x':
46561    if (((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
46562        ((type->getPrimitiveSizeInBits() == 256) && Subtarget.hasAVX()))
46563      weight = CW_Register;
46564    break;
46565  case 'k':
46566    // Enable conditional vector operations using %k<#> registers.
46567    if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
46568      weight = CW_Register;
46569    break;
46570  case 'I':
46571    if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
46572      if (C->getZExtValue() <= 31)
46573        weight = CW_Constant;
46574    }
46575    break;
46576  case 'J':
46577    if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
46578      if (C->getZExtValue() <= 63)
46579        weight = CW_Constant;
46580    }
46581    break;
46582  case 'K':
46583    if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
46584      if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
46585        weight = CW_Constant;
46586    }
46587    break;
46588  case 'L':
46589    if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
46590      if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
46591        weight = CW_Constant;
46592    }
46593    break;
46594  case 'M':
46595    if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
46596      if (C->getZExtValue() <= 3)
46597        weight = CW_Constant;
46598    }
46599    break;
46600  case 'N':
46601    if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
46602      if (C->getZExtValue() <= 0xff)
46603        weight = CW_Constant;
46604    }
46605    break;
46606  case 'G':
46607  case 'C':
46608    if (isa<ConstantFP>(CallOperandVal)) {
46609      weight = CW_Constant;
46610    }
46611    break;
46612  case 'e':
46613    if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
46614      if ((C->getSExtValue() >= -0x80000000LL) &&
46615          (C->getSExtValue() <= 0x7fffffffLL))
46616        weight = CW_Constant;
46617    }
46618    break;
46619  case 'Z':
46620    if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
46621      if (C->getZExtValue() <= 0xffffffff)
46622        weight = CW_Constant;
46623    }
46624    break;
46625  }
46626  return weight;
46627}
46628
46629/// Try to replace an X constraint, which matches anything, with another that
46630/// has more specific requirements based on the type of the corresponding
46631/// operand.
46632const char *X86TargetLowering::
46633LowerXConstraint(EVT ConstraintVT) const {
46634  // FP X constraints get lowered to SSE1/2 registers if available, otherwise
46635  // 'f' like normal targets.
46636  if (ConstraintVT.isFloatingPoint()) {
46637    if (Subtarget.hasSSE2())
46638      return "Y";
46639    if (Subtarget.hasSSE1())
46640      return "x";
46641  }
46642
46643  return TargetLowering::LowerXConstraint(ConstraintVT);
46644}
46645
46646// Lower @cc targets via setcc.
46647SDValue X86TargetLowering::LowerAsmOutputForConstraint(
46648    SDValue &Chain, SDValue &Flag, SDLoc DL, const AsmOperandInfo &OpInfo,
46649    SelectionDAG &DAG) const {
46650  X86::CondCode Cond = parseConstraintCode(OpInfo.ConstraintCode);
46651  if (Cond == X86::COND_INVALID)
46652    return SDValue();
46653  // Check that return type is valid.
46654  if (OpInfo.ConstraintVT.isVector() || !OpInfo.ConstraintVT.isInteger() ||
46655      OpInfo.ConstraintVT.getSizeInBits() < 8)
46656    report_fatal_error("Flag output operand is of invalid type");
46657
46658  // Get EFLAGS register. Only update chain when copyfrom is glued.
46659  if (Flag.getNode()) {
46660    Flag = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32, Flag);
46661    Chain = Flag.getValue(1);
46662  } else
46663    Flag = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32);
46664  // Extract CC code.
46665  SDValue CC = getSETCC(Cond, Flag, DL, DAG);
46666  // Extend to 32-bits
46667  SDValue Result = DAG.getNode(ISD::ZERO_EXTEND, DL, OpInfo.ConstraintVT, CC);
46668
46669  return Result;
46670}
46671
46672/// Lower the specified operand into the Ops vector.
46673/// If it is invalid, don't add anything to Ops.
46674void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
46675                                                     std::string &Constraint,
46676                                                     std::vector<SDValue>&Ops,
46677                                                     SelectionDAG &DAG) const {
46678  SDValue Result;
46679
46680  // Only support length 1 constraints for now.
46681  if (Constraint.length() > 1) return;
46682
46683  char ConstraintLetter = Constraint[0];
46684  switch (ConstraintLetter) {
46685  default: break;
46686  case 'I':
46687    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
46688      if (C->getZExtValue() <= 31) {
46689        Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
46690                                       Op.getValueType());
46691        break;
46692      }
46693    }
46694    return;
46695  case 'J':
46696    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
46697      if (C->getZExtValue() <= 63) {
46698        Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
46699                                       Op.getValueType());
46700        break;
46701      }
46702    }
46703    return;
46704  case 'K':
46705    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
46706      if (isInt<8>(C->getSExtValue())) {
46707        Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
46708                                       Op.getValueType());
46709        break;
46710      }
46711    }
46712    return;
46713  case 'L':
46714    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
46715      if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
46716          (Subtarget.is64Bit() && C->getZExtValue() == 0xffffffff)) {
46717        Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
46718                                       Op.getValueType());
46719        break;
46720      }
46721    }
46722    return;
46723  case 'M':
46724    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
46725      if (C->getZExtValue() <= 3) {
46726        Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
46727                                       Op.getValueType());
46728        break;
46729      }
46730    }
46731    return;
46732  case 'N':
46733    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
46734      if (C->getZExtValue() <= 255) {
46735        Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
46736                                       Op.getValueType());
46737        break;
46738      }
46739    }
46740    return;
46741  case 'O':
46742    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
46743      if (C->getZExtValue() <= 127) {
46744        Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
46745                                       Op.getValueType());
46746        break;
46747      }
46748    }
46749    return;
46750  case 'e': {
46751    // 32-bit signed value
46752    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
46753      if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
46754                                           C->getSExtValue())) {
46755        // Widen to 64 bits here to get it sign extended.
46756        Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), MVT::i64);
46757        break;
46758      }
46759    // FIXME gcc accepts some relocatable values here too, but only in certain
46760    // memory models; it's complicated.
46761    }
46762    return;
46763  }
46764  case 'Z': {
46765    // 32-bit unsigned value
46766    if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
46767      if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
46768                                           C->getZExtValue())) {
46769        Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
46770                                       Op.getValueType());
46771        break;
46772      }
46773    }
46774    // FIXME gcc accepts some relocatable values here too, but only in certain
46775    // memory models; it's complicated.
46776    return;
46777  }
46778  case 'i': {
46779    // Literal immediates are always ok.
46780    if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
46781      bool IsBool = CST->getConstantIntValue()->getBitWidth() == 1;
46782      BooleanContent BCont = getBooleanContents(MVT::i64);
46783      ISD::NodeType ExtOpc = IsBool ? getExtendForContent(BCont)
46784                                    : ISD::SIGN_EXTEND;
46785      int64_t ExtVal = ExtOpc == ISD::ZERO_EXTEND ? CST->getZExtValue()
46786                                                  : CST->getSExtValue();
46787      Result = DAG.getTargetConstant(ExtVal, SDLoc(Op), MVT::i64);
46788      break;
46789    }
46790
46791    // In any sort of PIC mode addresses need to be computed at runtime by
46792    // adding in a register or some sort of table lookup.  These can't
46793    // be used as immediates.
46794    if (Subtarget.isPICStyleGOT() || Subtarget.isPICStyleStubPIC())
46795      return;
46796
46797    // If we are in non-pic codegen mode, we allow the address of a global (with
46798    // an optional displacement) to be used with 'i'.
46799    if (auto *GA = dyn_cast<GlobalAddressSDNode>(Op))
46800      // If we require an extra load to get this address, as in PIC mode, we
46801      // can't accept it.
46802      if (isGlobalStubReference(
46803              Subtarget.classifyGlobalReference(GA->getGlobal())))
46804        return;
46805    break;
46806  }
46807  }
46808
46809  if (Result.getNode()) {
46810    Ops.push_back(Result);
46811    return;
46812  }
46813  return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
46814}
46815
46816/// Check if \p RC is a general purpose register class.
46817/// I.e., GR* or one of their variant.
46818static bool isGRClass(const TargetRegisterClass &RC) {
46819  return RC.hasSuperClassEq(&X86::GR8RegClass) ||
46820         RC.hasSuperClassEq(&X86::GR16RegClass) ||
46821         RC.hasSuperClassEq(&X86::GR32RegClass) ||
46822         RC.hasSuperClassEq(&X86::GR64RegClass) ||
46823         RC.hasSuperClassEq(&X86::LOW32_ADDR_ACCESS_RBPRegClass);
46824}
46825
46826/// Check if \p RC is a vector register class.
46827/// I.e., FR* / VR* or one of their variant.
46828static bool isFRClass(const TargetRegisterClass &RC) {
46829  return RC.hasSuperClassEq(&X86::FR32XRegClass) ||
46830         RC.hasSuperClassEq(&X86::FR64XRegClass) ||
46831         RC.hasSuperClassEq(&X86::VR128XRegClass) ||
46832         RC.hasSuperClassEq(&X86::VR256XRegClass) ||
46833         RC.hasSuperClassEq(&X86::VR512RegClass);
46834}
46835
46836/// Check if \p RC is a mask register class.
46837/// I.e., VK* or one of their variant.
46838static bool isVKClass(const TargetRegisterClass &RC) {
46839  return RC.hasSuperClassEq(&X86::VK1RegClass) ||
46840         RC.hasSuperClassEq(&X86::VK2RegClass) ||
46841         RC.hasSuperClassEq(&X86::VK4RegClass) ||
46842         RC.hasSuperClassEq(&X86::VK8RegClass) ||
46843         RC.hasSuperClassEq(&X86::VK16RegClass) ||
46844         RC.hasSuperClassEq(&X86::VK32RegClass) ||
46845         RC.hasSuperClassEq(&X86::VK64RegClass);
46846}
46847
46848std::pair<unsigned, const TargetRegisterClass *>
46849X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
46850                                                StringRef Constraint,
46851                                                MVT VT) const {
46852  // First, see if this is a constraint that directly corresponds to an LLVM
46853  // register class.
46854  if (Constraint.size() == 1) {
46855    // GCC Constraint Letters
46856    switch (Constraint[0]) {
46857    default: break;
46858    // 'A' means [ER]AX + [ER]DX.
46859    case 'A':
46860      if (Subtarget.is64Bit())
46861        return std::make_pair(X86::RAX, &X86::GR64_ADRegClass);
46862      assert((Subtarget.is32Bit() || Subtarget.is16Bit()) &&
46863             "Expecting 64, 32 or 16 bit subtarget");
46864      return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
46865
46866      // TODO: Slight differences here in allocation order and leaving
46867      // RIP in the class. Do they matter any more here than they do
46868      // in the normal allocation?
46869    case 'k':
46870      if (Subtarget.hasAVX512()) {
46871        if (VT == MVT::i1)
46872          return std::make_pair(0U, &X86::VK1RegClass);
46873        if (VT == MVT::i8)
46874          return std::make_pair(0U, &X86::VK8RegClass);
46875        if (VT == MVT::i16)
46876          return std::make_pair(0U, &X86::VK16RegClass);
46877      }
46878      if (Subtarget.hasBWI()) {
46879        if (VT == MVT::i32)
46880          return std::make_pair(0U, &X86::VK32RegClass);
46881        if (VT == MVT::i64)
46882          return std::make_pair(0U, &X86::VK64RegClass);
46883      }
46884      break;
46885    case 'q':   // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
46886      if (Subtarget.is64Bit()) {
46887        if (VT == MVT::i32 || VT == MVT::f32)
46888          return std::make_pair(0U, &X86::GR32RegClass);
46889        if (VT == MVT::i16)
46890          return std::make_pair(0U, &X86::GR16RegClass);
46891        if (VT == MVT::i8 || VT == MVT::i1)
46892          return std::make_pair(0U, &X86::GR8RegClass);
46893        if (VT == MVT::i64 || VT == MVT::f64)
46894          return std::make_pair(0U, &X86::GR64RegClass);
46895        break;
46896      }
46897      LLVM_FALLTHROUGH;
46898      // 32-bit fallthrough
46899    case 'Q':   // Q_REGS
46900      if (VT == MVT::i32 || VT == MVT::f32)
46901        return std::make_pair(0U, &X86::GR32_ABCDRegClass);
46902      if (VT == MVT::i16)
46903        return std::make_pair(0U, &X86::GR16_ABCDRegClass);
46904      if (VT == MVT::i8 || VT == MVT::i1)
46905        return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
46906      if (VT == MVT::i64)
46907        return std::make_pair(0U, &X86::GR64_ABCDRegClass);
46908      break;
46909    case 'r':   // GENERAL_REGS
46910    case 'l':   // INDEX_REGS
46911      if (VT == MVT::i8 || VT == MVT::i1)
46912        return std::make_pair(0U, &X86::GR8RegClass);
46913      if (VT == MVT::i16)
46914        return std::make_pair(0U, &X86::GR16RegClass);
46915      if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget.is64Bit())
46916        return std::make_pair(0U, &X86::GR32RegClass);
46917      return std::make_pair(0U, &X86::GR64RegClass);
46918    case 'R':   // LEGACY_REGS
46919      if (VT == MVT::i8 || VT == MVT::i1)
46920        return std::make_pair(0U, &X86::GR8_NOREXRegClass);
46921      if (VT == MVT::i16)
46922        return std::make_pair(0U, &X86::GR16_NOREXRegClass);
46923      if (VT == MVT::i32 || !Subtarget.is64Bit())
46924        return std::make_pair(0U, &X86::GR32_NOREXRegClass);
46925      return std::make_pair(0U, &X86::GR64_NOREXRegClass);
46926    case 'f':  // FP Stack registers.
46927      // If SSE is enabled for this VT, use f80 to ensure the isel moves the
46928      // value to the correct fpstack register class.
46929      if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
46930        return std::make_pair(0U, &X86::RFP32RegClass);
46931      if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
46932        return std::make_pair(0U, &X86::RFP64RegClass);
46933      return std::make_pair(0U, &X86::RFP80RegClass);
46934    case 'y':   // MMX_REGS if MMX allowed.
46935      if (!Subtarget.hasMMX()) break;
46936      return std::make_pair(0U, &X86::VR64RegClass);
46937    case 'Y':   // SSE_REGS if SSE2 allowed
46938      if (!Subtarget.hasSSE2()) break;
46939      LLVM_FALLTHROUGH;
46940    case 'v':
46941    case 'x':   // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
46942      if (!Subtarget.hasSSE1()) break;
46943      bool VConstraint = (Constraint[0] == 'v');
46944
46945      switch (VT.SimpleTy) {
46946      default: break;
46947      // Scalar SSE types.
46948      case MVT::f32:
46949      case MVT::i32:
46950        if (VConstraint && Subtarget.hasVLX())
46951          return std::make_pair(0U, &X86::FR32XRegClass);
46952        return std::make_pair(0U, &X86::FR32RegClass);
46953      case MVT::f64:
46954      case MVT::i64:
46955        if (VConstraint && Subtarget.hasVLX())
46956          return std::make_pair(0U, &X86::FR64XRegClass);
46957        return std::make_pair(0U, &X86::FR64RegClass);
46958      // TODO: Handle i128 in FR128RegClass after it is tested well.
46959      // Vector types and fp128.
46960      case MVT::f128:
46961      case MVT::v16i8:
46962      case MVT::v8i16:
46963      case MVT::v4i32:
46964      case MVT::v2i64:
46965      case MVT::v4f32:
46966      case MVT::v2f64:
46967        if (VConstraint && Subtarget.hasVLX())
46968          return std::make_pair(0U, &X86::VR128XRegClass);
46969        return std::make_pair(0U, &X86::VR128RegClass);
46970      // AVX types.
46971      case MVT::v32i8:
46972      case MVT::v16i16:
46973      case MVT::v8i32:
46974      case MVT::v4i64:
46975      case MVT::v8f32:
46976      case MVT::v4f64:
46977        if (VConstraint && Subtarget.hasVLX())
46978          return std::make_pair(0U, &X86::VR256XRegClass);
46979        if (Subtarget.hasAVX())
46980          return std::make_pair(0U, &X86::VR256RegClass);
46981        break;
46982      case MVT::v8f64:
46983      case MVT::v16f32:
46984      case MVT::v16i32:
46985      case MVT::v8i64:
46986        if (!Subtarget.hasAVX512()) break;
46987        if (VConstraint)
46988          return std::make_pair(0U, &X86::VR512RegClass);
46989        return std::make_pair(0U, &X86::VR512_0_15RegClass);
46990      }
46991      break;
46992    }
46993  } else if (Constraint.size() == 2 && Constraint[0] == 'Y') {
46994    switch (Constraint[1]) {
46995    default:
46996      break;
46997    case 'i':
46998    case 't':
46999    case '2':
47000      return getRegForInlineAsmConstraint(TRI, "Y", VT);
47001    case 'm':
47002      if (!Subtarget.hasMMX()) break;
47003      return std::make_pair(0U, &X86::VR64RegClass);
47004    case 'z':
47005    case '0':
47006      if (!Subtarget.hasSSE1()) break;
47007      return std::make_pair(X86::XMM0, &X86::VR128RegClass);
47008    case 'k':
47009      // This register class doesn't allocate k0 for masked vector operation.
47010      if (Subtarget.hasAVX512()) {
47011        if (VT == MVT::i1)
47012          return std::make_pair(0U, &X86::VK1WMRegClass);
47013        if (VT == MVT::i8)
47014          return std::make_pair(0U, &X86::VK8WMRegClass);
47015        if (VT == MVT::i16)
47016          return std::make_pair(0U, &X86::VK16WMRegClass);
47017      }
47018      if (Subtarget.hasBWI()) {
47019        if (VT == MVT::i32)
47020          return std::make_pair(0U, &X86::VK32WMRegClass);
47021        if (VT == MVT::i64)
47022          return std::make_pair(0U, &X86::VK64WMRegClass);
47023      }
47024      break;
47025    }
47026  }
47027
47028  if (parseConstraintCode(Constraint) != X86::COND_INVALID)
47029    return std::make_pair(0U, &X86::GR32RegClass);
47030
47031  // Use the default implementation in TargetLowering to convert the register
47032  // constraint into a member of a register class.
47033  std::pair<unsigned, const TargetRegisterClass*> Res;
47034  Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
47035
47036  // Not found as a standard register?
47037  if (!Res.second) {
47038    // Map st(0) -> st(7) -> ST0
47039    if (Constraint.size() == 7 && Constraint[0] == '{' &&
47040        tolower(Constraint[1]) == 's' && tolower(Constraint[2]) == 't' &&
47041        Constraint[3] == '(' &&
47042        (Constraint[4] >= '0' && Constraint[4] <= '7') &&
47043        Constraint[5] == ')' && Constraint[6] == '}') {
47044      // st(7) is not allocatable and thus not a member of RFP80. Return
47045      // singleton class in cases where we have a reference to it.
47046      if (Constraint[4] == '7')
47047        return std::make_pair(X86::FP7, &X86::RFP80_7RegClass);
47048      return std::make_pair(X86::FP0 + Constraint[4] - '0',
47049                            &X86::RFP80RegClass);
47050    }
47051
47052    // GCC allows "st(0)" to be called just plain "st".
47053    if (StringRef("{st}").equals_lower(Constraint))
47054      return std::make_pair(X86::FP0, &X86::RFP80RegClass);
47055
47056    // flags -> EFLAGS
47057    if (StringRef("{flags}").equals_lower(Constraint))
47058      return std::make_pair(X86::EFLAGS, &X86::CCRRegClass);
47059
47060    // dirflag -> DF
47061    if (StringRef("{dirflag}").equals_lower(Constraint))
47062      return std::make_pair(X86::DF, &X86::DFCCRRegClass);
47063
47064    // fpsr -> FPSW
47065    if (StringRef("{fpsr}").equals_lower(Constraint))
47066      return std::make_pair(X86::FPSW, &X86::FPCCRRegClass);
47067
47068    return Res;
47069  }
47070
47071  // Make sure it isn't a register that requires 64-bit mode.
47072  if (!Subtarget.is64Bit() &&
47073      (isFRClass(*Res.second) || isGRClass(*Res.second)) &&
47074      TRI->getEncodingValue(Res.first) >= 8) {
47075    // Register requires REX prefix, but we're in 32-bit mode.
47076    return std::make_pair(0, nullptr);
47077  }
47078
47079  // Make sure it isn't a register that requires AVX512.
47080  if (!Subtarget.hasAVX512() && isFRClass(*Res.second) &&
47081      TRI->getEncodingValue(Res.first) & 0x10) {
47082    // Register requires EVEX prefix.
47083    return std::make_pair(0, nullptr);
47084  }
47085
47086  // Otherwise, check to see if this is a register class of the wrong value
47087  // type.  For example, we want to map "{ax},i32" -> {eax}, we don't want it to
47088  // turn into {ax},{dx}.
47089  // MVT::Other is used to specify clobber names.
47090  if (TRI->isTypeLegalForClass(*Res.second, VT) || VT == MVT::Other)
47091    return Res;   // Correct type already, nothing to do.
47092
47093  // Get a matching integer of the correct size. i.e. "ax" with MVT::32 should
47094  // return "eax". This should even work for things like getting 64bit integer
47095  // registers when given an f64 type.
47096  const TargetRegisterClass *Class = Res.second;
47097  // The generic code will match the first register class that contains the
47098  // given register. Thus, based on the ordering of the tablegened file,
47099  // the "plain" GR classes might not come first.
47100  // Therefore, use a helper method.
47101  if (isGRClass(*Class)) {
47102    unsigned Size = VT.getSizeInBits();
47103    if (Size == 1) Size = 8;
47104    unsigned DestReg = getX86SubSuperRegisterOrZero(Res.first, Size);
47105    if (DestReg > 0) {
47106      bool is64Bit = Subtarget.is64Bit();
47107      const TargetRegisterClass *RC =
47108          Size == 8 ? (is64Bit ? &X86::GR8RegClass : &X86::GR8_NOREXRegClass)
47109        : Size == 16 ? (is64Bit ? &X86::GR16RegClass : &X86::GR16_NOREXRegClass)
47110        : Size == 32 ? (is64Bit ? &X86::GR32RegClass : &X86::GR32_NOREXRegClass)
47111        : Size == 64 ? (is64Bit ? &X86::GR64RegClass : nullptr)
47112        : nullptr;
47113      if (Size == 64 && !is64Bit) {
47114        // Model GCC's behavior here and select a fixed pair of 32-bit
47115        // registers.
47116        switch (DestReg) {
47117        case X86::RAX:
47118          return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
47119        case X86::RDX:
47120          return std::make_pair(X86::EDX, &X86::GR32_DCRegClass);
47121        case X86::RCX:
47122          return std::make_pair(X86::ECX, &X86::GR32_CBRegClass);
47123        case X86::RBX:
47124          return std::make_pair(X86::EBX, &X86::GR32_BSIRegClass);
47125        case X86::RSI:
47126          return std::make_pair(X86::ESI, &X86::GR32_SIDIRegClass);
47127        case X86::RDI:
47128          return std::make_pair(X86::EDI, &X86::GR32_DIBPRegClass);
47129        case X86::RBP:
47130          return std::make_pair(X86::EBP, &X86::GR32_BPSPRegClass);
47131        default:
47132          return std::make_pair(0, nullptr);
47133        }
47134      }
47135      if (RC && RC->contains(DestReg))
47136        return std::make_pair(DestReg, RC);
47137      return Res;
47138    }
47139    // No register found/type mismatch.
47140    return std::make_pair(0, nullptr);
47141  } else if (isFRClass(*Class)) {
47142    // Handle references to XMM physical registers that got mapped into the
47143    // wrong class.  This can happen with constraints like {xmm0} where the
47144    // target independent register mapper will just pick the first match it can
47145    // find, ignoring the required type.
47146
47147    // TODO: Handle f128 and i128 in FR128RegClass after it is tested well.
47148    if (VT == MVT::f32 || VT == MVT::i32)
47149      Res.second = &X86::FR32XRegClass;
47150    else if (VT == MVT::f64 || VT == MVT::i64)
47151      Res.second = &X86::FR64XRegClass;
47152    else if (TRI->isTypeLegalForClass(X86::VR128XRegClass, VT))
47153      Res.second = &X86::VR128XRegClass;
47154    else if (TRI->isTypeLegalForClass(X86::VR256XRegClass, VT))
47155      Res.second = &X86::VR256XRegClass;
47156    else if (TRI->isTypeLegalForClass(X86::VR512RegClass, VT))
47157      Res.second = &X86::VR512RegClass;
47158    else {
47159      // Type mismatch and not a clobber: Return an error;
47160      Res.first = 0;
47161      Res.second = nullptr;
47162    }
47163  } else if (isVKClass(*Class)) {
47164    if (VT == MVT::i1)
47165      Res.second = &X86::VK1RegClass;
47166    else if (VT == MVT::i8)
47167      Res.second = &X86::VK8RegClass;
47168    else if (VT == MVT::i16)
47169      Res.second = &X86::VK16RegClass;
47170    else if (VT == MVT::i32)
47171      Res.second = &X86::VK32RegClass;
47172    else if (VT == MVT::i64)
47173      Res.second = &X86::VK64RegClass;
47174    else {
47175      // Type mismatch and not a clobber: Return an error;
47176      Res.first = 0;
47177      Res.second = nullptr;
47178    }
47179  }
47180
47181  return Res;
47182}
47183
47184int X86TargetLowering::getScalingFactorCost(const DataLayout &DL,
47185                                            const AddrMode &AM, Type *Ty,
47186                                            unsigned AS) const {
47187  // Scaling factors are not free at all.
47188  // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
47189  // will take 2 allocations in the out of order engine instead of 1
47190  // for plain addressing mode, i.e. inst (reg1).
47191  // E.g.,
47192  // vaddps (%rsi,%rdx), %ymm0, %ymm1
47193  // Requires two allocations (one for the load, one for the computation)
47194  // whereas:
47195  // vaddps (%rsi), %ymm0, %ymm1
47196  // Requires just 1 allocation, i.e., freeing allocations for other operations
47197  // and having less micro operations to execute.
47198  //
47199  // For some X86 architectures, this is even worse because for instance for
47200  // stores, the complex addressing mode forces the instruction to use the
47201  // "load" ports instead of the dedicated "store" port.
47202  // E.g., on Haswell:
47203  // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
47204  // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
47205  if (isLegalAddressingMode(DL, AM, Ty, AS))
47206    // Scale represents reg2 * scale, thus account for 1
47207    // as soon as we use a second register.
47208    return AM.Scale != 0;
47209  return -1;
47210}
47211
47212bool X86TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
47213  // Integer division on x86 is expensive. However, when aggressively optimizing
47214  // for code size, we prefer to use a div instruction, as it is usually smaller
47215  // than the alternative sequence.
47216  // The exception to this is vector division. Since x86 doesn't have vector
47217  // integer division, leaving the division as-is is a loss even in terms of
47218  // size, because it will have to be scalarized, while the alternative code
47219  // sequence can be performed in vector form.
47220  bool OptSize =
47221      Attr.hasAttribute(AttributeList::FunctionIndex, Attribute::MinSize);
47222  return OptSize && !VT.isVector();
47223}
47224
47225void X86TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
47226  if (!Subtarget.is64Bit())
47227    return;
47228
47229  // Update IsSplitCSR in X86MachineFunctionInfo.
47230  X86MachineFunctionInfo *AFI =
47231      Entry->getParent()->getInfo<X86MachineFunctionInfo>();
47232  AFI->setIsSplitCSR(true);
47233}
47234
47235void X86TargetLowering::insertCopiesSplitCSR(
47236    MachineBasicBlock *Entry,
47237    const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
47238  const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
47239  const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
47240  if (!IStart)
47241    return;
47242
47243  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
47244  MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
47245  MachineBasicBlock::iterator MBBI = Entry->begin();
47246  for (const MCPhysReg *I = IStart; *I; ++I) {
47247    const TargetRegisterClass *RC = nullptr;
47248    if (X86::GR64RegClass.contains(*I))
47249      RC = &X86::GR64RegClass;
47250    else
47251      llvm_unreachable("Unexpected register class in CSRsViaCopy!");
47252
47253    Register NewVR = MRI->createVirtualRegister(RC);
47254    // Create copy from CSR to a virtual register.
47255    // FIXME: this currently does not emit CFI pseudo-instructions, it works
47256    // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
47257    // nounwind. If we want to generalize this later, we may need to emit
47258    // CFI pseudo-instructions.
47259    assert(
47260        Entry->getParent()->getFunction().hasFnAttribute(Attribute::NoUnwind) &&
47261        "Function should be nounwind in insertCopiesSplitCSR!");
47262    Entry->addLiveIn(*I);
47263    BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
47264        .addReg(*I);
47265
47266    // Insert the copy-back instructions right before the terminator.
47267    for (auto *Exit : Exits)
47268      BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
47269              TII->get(TargetOpcode::COPY), *I)
47270          .addReg(NewVR);
47271  }
47272}
47273
47274bool X86TargetLowering::supportSwiftError() const {
47275  return Subtarget.is64Bit();
47276}
47277
47278/// Returns the name of the symbol used to emit stack probes or the empty
47279/// string if not applicable.
47280StringRef
47281X86TargetLowering::getStackProbeSymbolName(MachineFunction &MF) const {
47282  // If the function specifically requests stack probes, emit them.
47283  if (MF.getFunction().hasFnAttribute("probe-stack"))
47284    return MF.getFunction().getFnAttribute("probe-stack").getValueAsString();
47285
47286  // Generally, if we aren't on Windows, the platform ABI does not include
47287  // support for stack probes, so don't emit them.
47288  if (!Subtarget.isOSWindows() || Subtarget.isTargetMachO() ||
47289      MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
47290    return "";
47291
47292  // We need a stack probe to conform to the Windows ABI. Choose the right
47293  // symbol.
47294  if (Subtarget.is64Bit())
47295    return Subtarget.isTargetCygMing() ? "___chkstk_ms" : "__chkstk";
47296  return Subtarget.isTargetCygMing() ? "_alloca" : "_chkstk";
47297}
47298
47299unsigned
47300X86TargetLowering::getStackProbeSize(MachineFunction &MF) const {
47301  // The default stack probe size is 4096 if the function has no stackprobesize
47302  // attribute.
47303  unsigned StackProbeSize = 4096;
47304  const Function &Fn = MF.getFunction();
47305  if (Fn.hasFnAttribute("stack-probe-size"))
47306    Fn.getFnAttribute("stack-probe-size")
47307        .getValueAsString()
47308        .getAsInteger(0, StackProbeSize);
47309  return StackProbeSize;
47310}
47311