X86Subtarget.h revision 363496
1//===-- X86Subtarget.h - Define Subtarget for the X86 ----------*- C++ -*--===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the X86 specific subclass of TargetSubtargetInfo.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_LIB_TARGET_X86_X86SUBTARGET_H
14#define LLVM_LIB_TARGET_X86_X86SUBTARGET_H
15
16#include "X86FrameLowering.h"
17#include "X86ISelLowering.h"
18#include "X86InstrInfo.h"
19#include "X86SelectionDAGInfo.h"
20#include "llvm/ADT/StringRef.h"
21#include "llvm/ADT/Triple.h"
22#include "llvm/CodeGen/GlobalISel/CallLowering.h"
23#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
24#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
25#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
26#include "llvm/CodeGen/TargetSubtargetInfo.h"
27#include "llvm/IR/CallingConv.h"
28#include "llvm/Target/TargetMachine.h"
29#include <climits>
30#include <memory>
31
32#define GET_SUBTARGETINFO_HEADER
33#include "X86GenSubtargetInfo.inc"
34
35namespace llvm {
36
37class GlobalValue;
38
39/// The X86 backend supports a number of different styles of PIC.
40///
41namespace PICStyles {
42
43enum class Style {
44  StubPIC,          // Used on i386-darwin in pic mode.
45  GOT,              // Used on 32 bit elf on when in pic mode.
46  RIPRel,           // Used on X86-64 when in pic mode.
47  None              // Set when not in pic mode.
48};
49
50} // end namespace PICStyles
51
52class X86Subtarget final : public X86GenSubtargetInfo {
53public:
54  // NOTE: Do not add anything new to this list. Coarse, CPU name based flags
55  // are not a good idea. We should be migrating away from these.
56  enum X86ProcFamilyEnum {
57    Others,
58    IntelAtom,
59    IntelSLM
60  };
61
62protected:
63  enum X86SSEEnum {
64    NoSSE, SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42, AVX, AVX2, AVX512F
65  };
66
67  enum X863DNowEnum {
68    NoThreeDNow, MMX, ThreeDNow, ThreeDNowA
69  };
70
71  /// X86 processor family: Intel Atom, and others
72  X86ProcFamilyEnum X86ProcFamily = Others;
73
74  /// Which PIC style to use
75  PICStyles::Style PICStyle;
76
77  const TargetMachine &TM;
78
79  /// SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42, or none supported.
80  X86SSEEnum X86SSELevel = NoSSE;
81
82  /// MMX, 3DNow, 3DNow Athlon, or none supported.
83  X863DNowEnum X863DNowLevel = NoThreeDNow;
84
85  /// True if the processor supports X87 instructions.
86  bool HasX87 = false;
87
88  /// True if the processor supports CMPXCHG8B.
89  bool HasCmpxchg8b = false;
90
91  /// True if this processor has NOPL instruction
92  /// (generally pentium pro+).
93  bool HasNOPL = false;
94
95  /// True if this processor has conditional move instructions
96  /// (generally pentium pro+).
97  bool HasCMov = false;
98
99  /// True if the processor supports X86-64 instructions.
100  bool HasX86_64 = false;
101
102  /// True if the processor supports POPCNT.
103  bool HasPOPCNT = false;
104
105  /// True if the processor supports SSE4A instructions.
106  bool HasSSE4A = false;
107
108  /// Target has AES instructions
109  bool HasAES = false;
110  bool HasVAES = false;
111
112  /// Target has FXSAVE/FXRESTOR instructions
113  bool HasFXSR = false;
114
115  /// Target has XSAVE instructions
116  bool HasXSAVE = false;
117
118  /// Target has XSAVEOPT instructions
119  bool HasXSAVEOPT = false;
120
121  /// Target has XSAVEC instructions
122  bool HasXSAVEC = false;
123
124  /// Target has XSAVES instructions
125  bool HasXSAVES = false;
126
127  /// Target has carry-less multiplication
128  bool HasPCLMUL = false;
129  bool HasVPCLMULQDQ = false;
130
131  /// Target has Galois Field Arithmetic instructions
132  bool HasGFNI = false;
133
134  /// Target has 3-operand fused multiply-add
135  bool HasFMA = false;
136
137  /// Target has 4-operand fused multiply-add
138  bool HasFMA4 = false;
139
140  /// Target has XOP instructions
141  bool HasXOP = false;
142
143  /// Target has TBM instructions.
144  bool HasTBM = false;
145
146  /// Target has LWP instructions
147  bool HasLWP = false;
148
149  /// True if the processor has the MOVBE instruction.
150  bool HasMOVBE = false;
151
152  /// True if the processor has the RDRAND instruction.
153  bool HasRDRAND = false;
154
155  /// Processor has 16-bit floating point conversion instructions.
156  bool HasF16C = false;
157
158  /// Processor has FS/GS base insturctions.
159  bool HasFSGSBase = false;
160
161  /// Processor has LZCNT instruction.
162  bool HasLZCNT = false;
163
164  /// Processor has BMI1 instructions.
165  bool HasBMI = false;
166
167  /// Processor has BMI2 instructions.
168  bool HasBMI2 = false;
169
170  /// Processor has VBMI instructions.
171  bool HasVBMI = false;
172
173  /// Processor has VBMI2 instructions.
174  bool HasVBMI2 = false;
175
176  /// Processor has Integer Fused Multiply Add
177  bool HasIFMA = false;
178
179  /// Processor has RTM instructions.
180  bool HasRTM = false;
181
182  /// Processor has ADX instructions.
183  bool HasADX = false;
184
185  /// Processor has SHA instructions.
186  bool HasSHA = false;
187
188  /// Processor has PRFCHW instructions.
189  bool HasPRFCHW = false;
190
191  /// Processor has RDSEED instructions.
192  bool HasRDSEED = false;
193
194  /// Processor has LAHF/SAHF instructions.
195  bool HasLAHFSAHF = false;
196
197  /// Processor has MONITORX/MWAITX instructions.
198  bool HasMWAITX = false;
199
200  /// Processor has Cache Line Zero instruction
201  bool HasCLZERO = false;
202
203  /// Processor has Cache Line Demote instruction
204  bool HasCLDEMOTE = false;
205
206  /// Processor has MOVDIRI instruction (direct store integer).
207  bool HasMOVDIRI = false;
208
209  /// Processor has MOVDIR64B instruction (direct store 64 bytes).
210  bool HasMOVDIR64B = false;
211
212  /// Processor has ptwrite instruction.
213  bool HasPTWRITE = false;
214
215  /// Processor has Prefetch with intent to Write instruction
216  bool HasPREFETCHWT1 = false;
217
218  /// True if SHLD instructions are slow.
219  bool IsSHLDSlow = false;
220
221  /// True if the PMULLD instruction is slow compared to PMULLW/PMULHW and
222  //  PMULUDQ.
223  bool IsPMULLDSlow = false;
224
225  /// True if the PMADDWD instruction is slow compared to PMULLD.
226  bool IsPMADDWDSlow = false;
227
228  /// True if unaligned memory accesses of 16-bytes are slow.
229  bool IsUAMem16Slow = false;
230
231  /// True if unaligned memory accesses of 32-bytes are slow.
232  bool IsUAMem32Slow = false;
233
234  /// True if SSE operations can have unaligned memory operands.
235  /// This may require setting a configuration bit in the processor.
236  bool HasSSEUnalignedMem = false;
237
238  /// True if this processor has the CMPXCHG16B instruction;
239  /// this is true for most x86-64 chips, but not the first AMD chips.
240  bool HasCmpxchg16b = false;
241
242  /// True if the LEA instruction should be used for adjusting
243  /// the stack pointer. This is an optimization for Intel Atom processors.
244  bool UseLeaForSP = false;
245
246  /// True if POPCNT instruction has a false dependency on the destination register.
247  bool HasPOPCNTFalseDeps = false;
248
249  /// True if LZCNT/TZCNT instructions have a false dependency on the destination register.
250  bool HasLZCNTFalseDeps = false;
251
252  /// True if its preferable to combine to a single shuffle using a variable
253  /// mask over multiple fixed shuffles.
254  bool HasFastVariableShuffle = false;
255
256  /// True if vzeroupper instructions should be inserted after code that uses
257  /// ymm or zmm registers.
258  bool InsertVZEROUPPER = false;
259
260  /// True if there is no performance penalty for writing NOPs with up to
261  /// 11 bytes.
262  bool HasFast11ByteNOP = false;
263
264  /// True if there is no performance penalty for writing NOPs with up to
265  /// 15 bytes.
266  bool HasFast15ByteNOP = false;
267
268  /// True if gather is reasonably fast. This is true for Skylake client and
269  /// all AVX-512 CPUs.
270  bool HasFastGather = false;
271
272  /// True if hardware SQRTSS instruction is at least as fast (latency) as
273  /// RSQRTSS followed by a Newton-Raphson iteration.
274  bool HasFastScalarFSQRT = false;
275
276  /// True if hardware SQRTPS/VSQRTPS instructions are at least as fast
277  /// (throughput) as RSQRTPS/VRSQRTPS followed by a Newton-Raphson iteration.
278  bool HasFastVectorFSQRT = false;
279
280  /// True if 8-bit divisions are significantly faster than
281  /// 32-bit divisions and should be used when possible.
282  bool HasSlowDivide32 = false;
283
284  /// True if 32-bit divides are significantly faster than
285  /// 64-bit divisions and should be used when possible.
286  bool HasSlowDivide64 = false;
287
288  /// True if LZCNT instruction is fast.
289  bool HasFastLZCNT = false;
290
291  /// True if SHLD based rotate is fast.
292  bool HasFastSHLDRotate = false;
293
294  /// True if the processor supports macrofusion.
295  bool HasMacroFusion = false;
296
297  /// True if the processor supports branch fusion.
298  bool HasBranchFusion = false;
299
300  /// True if the processor has enhanced REP MOVSB/STOSB.
301  bool HasERMSB = false;
302
303  /// True if the short functions should be padded to prevent
304  /// a stall when returning too early.
305  bool PadShortFunctions = false;
306
307  /// True if two memory operand instructions should use a temporary register
308  /// instead.
309  bool SlowTwoMemOps = false;
310
311  /// True if the LEA instruction inputs have to be ready at address generation
312  /// (AG) time.
313  bool LEAUsesAG = false;
314
315  /// True if the LEA instruction with certain arguments is slow
316  bool SlowLEA = false;
317
318  /// True if the LEA instruction has all three source operands: base, index,
319  /// and offset or if the LEA instruction uses base and index registers where
320  /// the base is EBP, RBP,or R13
321  bool Slow3OpsLEA = false;
322
323  /// True if INC and DEC instructions are slow when writing to flags
324  bool SlowIncDec = false;
325
326  /// Processor has AVX-512 PreFetch Instructions
327  bool HasPFI = false;
328
329  /// Processor has AVX-512 Exponential and Reciprocal Instructions
330  bool HasERI = false;
331
332  /// Processor has AVX-512 Conflict Detection Instructions
333  bool HasCDI = false;
334
335  /// Processor has AVX-512 population count Instructions
336  bool HasVPOPCNTDQ = false;
337
338  /// Processor has AVX-512 Doubleword and Quadword instructions
339  bool HasDQI = false;
340
341  /// Processor has AVX-512 Byte and Word instructions
342  bool HasBWI = false;
343
344  /// Processor has AVX-512 Vector Length eXtenstions
345  bool HasVLX = false;
346
347  /// Processor has PKU extenstions
348  bool HasPKU = false;
349
350  /// Processor has AVX-512 Vector Neural Network Instructions
351  bool HasVNNI = false;
352
353  /// Processor has AVX-512 bfloat16 floating-point extensions
354  bool HasBF16 = false;
355
356  /// Processor supports ENQCMD instructions
357  bool HasENQCMD = false;
358
359  /// Processor has AVX-512 Bit Algorithms instructions
360  bool HasBITALG = false;
361
362  /// Processor has AVX-512 vp2intersect instructions
363  bool HasVP2INTERSECT = false;
364
365  /// Deprecated flag for MPX instructions.
366  bool DeprecatedHasMPX = false;
367
368  /// Processor supports CET SHSTK - Control-Flow Enforcement Technology
369  /// using Shadow Stack
370  bool HasSHSTK = false;
371
372  /// Processor supports Invalidate Process-Context Identifier
373  bool HasINVPCID = false;
374
375  /// Processor has Software Guard Extensions
376  bool HasSGX = false;
377
378  /// Processor supports Flush Cache Line instruction
379  bool HasCLFLUSHOPT = false;
380
381  /// Processor supports Cache Line Write Back instruction
382  bool HasCLWB = false;
383
384  /// Processor supports Write Back No Invalidate instruction
385  bool HasWBNOINVD = false;
386
387  /// Processor support RDPID instruction
388  bool HasRDPID = false;
389
390  /// Processor supports WaitPKG instructions
391  bool HasWAITPKG = false;
392
393  /// Processor supports PCONFIG instruction
394  bool HasPCONFIG = false;
395
396  /// Processor has a single uop BEXTR implementation.
397  bool HasFastBEXTR = false;
398
399  /// Try harder to combine to horizontal vector ops if they are fast.
400  bool HasFastHorizontalOps = false;
401
402  /// Prefer a left/right scalar logical shifts pair over a shift+and pair.
403  bool HasFastScalarShiftMasks = false;
404
405  /// Prefer a left/right vector logical shifts pair over a shift+and pair.
406  bool HasFastVectorShiftMasks = false;
407
408  /// Use a retpoline thunk rather than indirect calls to block speculative
409  /// execution.
410  bool UseRetpolineIndirectCalls = false;
411
412  /// Use a retpoline thunk or remove any indirect branch to block speculative
413  /// execution.
414  bool UseRetpolineIndirectBranches = false;
415
416  /// Deprecated flag, query `UseRetpolineIndirectCalls` and
417  /// `UseRetpolineIndirectBranches` instead.
418  bool DeprecatedUseRetpoline = false;
419
420  /// When using a retpoline thunk, call an externally provided thunk rather
421  /// than emitting one inside the compiler.
422  bool UseRetpolineExternalThunk = false;
423
424  /// Prevent generation of indirect call/branch instructions from memory,
425  /// and force all indirect call/branch instructions from a register to be
426  /// preceded by an LFENCE. Also decompose RET instructions into a
427  /// POP+LFENCE+JMP sequence.
428  bool UseLVIControlFlowIntegrity = false;
429
430  /// Insert LFENCE instructions to prevent data speculatively injected into
431  /// loads from being used maliciously.
432  bool UseLVILoadHardening = false;
433
434  /// Use software floating point for code generation.
435  bool UseSoftFloat = false;
436
437  /// Use alias analysis during code generation.
438  bool UseAA = false;
439
440  /// The minimum alignment known to hold of the stack frame on
441  /// entry to the function and which must be maintained by every function.
442  Align stackAlignment = Align(4);
443
444  /// Max. memset / memcpy size that is turned into rep/movs, rep/stos ops.
445  ///
446  // FIXME: this is a known good value for Yonah. How about others?
447  unsigned MaxInlineSizeThreshold = 128;
448
449  /// Indicates target prefers 128 bit instructions.
450  bool Prefer128Bit = false;
451
452  /// Indicates target prefers 256 bit instructions.
453  bool Prefer256Bit = false;
454
455  /// Indicates target prefers AVX512 mask registers.
456  bool PreferMaskRegisters = false;
457
458  /// Threeway branch is profitable in this subtarget.
459  bool ThreewayBranchProfitable = false;
460
461  /// Use Goldmont specific floating point div/sqrt costs.
462  bool UseGLMDivSqrtCosts = false;
463
464  /// What processor and OS we're targeting.
465  Triple TargetTriple;
466
467  /// GlobalISel related APIs.
468  std::unique_ptr<CallLowering> CallLoweringInfo;
469  std::unique_ptr<LegalizerInfo> Legalizer;
470  std::unique_ptr<RegisterBankInfo> RegBankInfo;
471  std::unique_ptr<InstructionSelector> InstSelector;
472
473private:
474  /// Override the stack alignment.
475  MaybeAlign StackAlignOverride;
476
477  /// Preferred vector width from function attribute.
478  unsigned PreferVectorWidthOverride;
479
480  /// Resolved preferred vector width from function attribute and subtarget
481  /// features.
482  unsigned PreferVectorWidth = UINT32_MAX;
483
484  /// Required vector width from function attribute.
485  unsigned RequiredVectorWidth;
486
487  /// True if compiling for 64-bit, false for 16-bit or 32-bit.
488  bool In64BitMode;
489
490  /// True if compiling for 32-bit, false for 16-bit or 64-bit.
491  bool In32BitMode;
492
493  /// True if compiling for 16-bit, false for 32-bit or 64-bit.
494  bool In16BitMode;
495
496  /// Contains the Overhead of gather\scatter instructions
497  int GatherOverhead = 1024;
498  int ScatterOverhead = 1024;
499
500  X86SelectionDAGInfo TSInfo;
501  // Ordering here is important. X86InstrInfo initializes X86RegisterInfo which
502  // X86TargetLowering needs.
503  X86InstrInfo InstrInfo;
504  X86TargetLowering TLInfo;
505  X86FrameLowering FrameLowering;
506
507public:
508  /// This constructor initializes the data members to match that
509  /// of the specified triple.
510  ///
511  X86Subtarget(const Triple &TT, StringRef CPU, StringRef FS,
512               const X86TargetMachine &TM, MaybeAlign StackAlignOverride,
513               unsigned PreferVectorWidthOverride,
514               unsigned RequiredVectorWidth);
515
516  const X86TargetLowering *getTargetLowering() const override {
517    return &TLInfo;
518  }
519
520  const X86InstrInfo *getInstrInfo() const override { return &InstrInfo; }
521
522  const X86FrameLowering *getFrameLowering() const override {
523    return &FrameLowering;
524  }
525
526  const X86SelectionDAGInfo *getSelectionDAGInfo() const override {
527    return &TSInfo;
528  }
529
530  const X86RegisterInfo *getRegisterInfo() const override {
531    return &getInstrInfo()->getRegisterInfo();
532  }
533
534  /// Returns the minimum alignment known to hold of the
535  /// stack frame on entry to the function and which must be maintained by every
536  /// function for this subtarget.
537  Align getStackAlignment() const { return stackAlignment; }
538
539  /// Returns the maximum memset / memcpy size
540  /// that still makes it profitable to inline the call.
541  unsigned getMaxInlineSizeThreshold() const { return MaxInlineSizeThreshold; }
542
543  /// ParseSubtargetFeatures - Parses features string setting specified
544  /// subtarget options.  Definition of function is auto generated by tblgen.
545  void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
546
547  /// Methods used by Global ISel
548  const CallLowering *getCallLowering() const override;
549  InstructionSelector *getInstructionSelector() const override;
550  const LegalizerInfo *getLegalizerInfo() const override;
551  const RegisterBankInfo *getRegBankInfo() const override;
552
553private:
554  /// Initialize the full set of dependencies so we can use an initializer
555  /// list for X86Subtarget.
556  X86Subtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS);
557  void initSubtargetFeatures(StringRef CPU, StringRef FS);
558
559public:
560  /// Is this x86_64? (disregarding specific ABI / programming model)
561  bool is64Bit() const {
562    return In64BitMode;
563  }
564
565  bool is32Bit() const {
566    return In32BitMode;
567  }
568
569  bool is16Bit() const {
570    return In16BitMode;
571  }
572
573  /// Is this x86_64 with the ILP32 programming model (x32 ABI)?
574  bool isTarget64BitILP32() const {
575    return In64BitMode && (TargetTriple.getEnvironment() == Triple::GNUX32 ||
576                           TargetTriple.isOSNaCl());
577  }
578
579  /// Is this x86_64 with the LP64 programming model (standard AMD64, no x32)?
580  bool isTarget64BitLP64() const {
581    return In64BitMode && (TargetTriple.getEnvironment() != Triple::GNUX32 &&
582                           !TargetTriple.isOSNaCl());
583  }
584
585  PICStyles::Style getPICStyle() const { return PICStyle; }
586  void setPICStyle(PICStyles::Style Style)  { PICStyle = Style; }
587
588  bool hasX87() const { return HasX87; }
589  bool hasCmpxchg8b() const { return HasCmpxchg8b; }
590  bool hasNOPL() const { return HasNOPL; }
591  // SSE codegen depends on cmovs, and all SSE1+ processors support them.
592  // All 64-bit processors support cmov.
593  bool hasCMov() const { return HasCMov || X86SSELevel >= SSE1 || is64Bit(); }
594  bool hasSSE1() const { return X86SSELevel >= SSE1; }
595  bool hasSSE2() const { return X86SSELevel >= SSE2; }
596  bool hasSSE3() const { return X86SSELevel >= SSE3; }
597  bool hasSSSE3() const { return X86SSELevel >= SSSE3; }
598  bool hasSSE41() const { return X86SSELevel >= SSE41; }
599  bool hasSSE42() const { return X86SSELevel >= SSE42; }
600  bool hasAVX() const { return X86SSELevel >= AVX; }
601  bool hasAVX2() const { return X86SSELevel >= AVX2; }
602  bool hasAVX512() const { return X86SSELevel >= AVX512F; }
603  bool hasInt256() const { return hasAVX2(); }
604  bool hasSSE4A() const { return HasSSE4A; }
605  bool hasMMX() const { return X863DNowLevel >= MMX; }
606  bool has3DNow() const { return X863DNowLevel >= ThreeDNow; }
607  bool has3DNowA() const { return X863DNowLevel >= ThreeDNowA; }
608  bool hasPOPCNT() const { return HasPOPCNT; }
609  bool hasAES() const { return HasAES; }
610  bool hasVAES() const { return HasVAES; }
611  bool hasFXSR() const { return HasFXSR; }
612  bool hasXSAVE() const { return HasXSAVE; }
613  bool hasXSAVEOPT() const { return HasXSAVEOPT; }
614  bool hasXSAVEC() const { return HasXSAVEC; }
615  bool hasXSAVES() const { return HasXSAVES; }
616  bool hasPCLMUL() const { return HasPCLMUL; }
617  bool hasVPCLMULQDQ() const { return HasVPCLMULQDQ; }
618  bool hasGFNI() const { return HasGFNI; }
619  // Prefer FMA4 to FMA - its better for commutation/memory folding and
620  // has equal or better performance on all supported targets.
621  bool hasFMA() const { return HasFMA; }
622  bool hasFMA4() const { return HasFMA4; }
623  bool hasAnyFMA() const { return hasFMA() || hasFMA4(); }
624  bool hasXOP() const { return HasXOP; }
625  bool hasTBM() const { return HasTBM; }
626  bool hasLWP() const { return HasLWP; }
627  bool hasMOVBE() const { return HasMOVBE; }
628  bool hasRDRAND() const { return HasRDRAND; }
629  bool hasF16C() const { return HasF16C; }
630  bool hasFSGSBase() const { return HasFSGSBase; }
631  bool hasLZCNT() const { return HasLZCNT; }
632  bool hasBMI() const { return HasBMI; }
633  bool hasBMI2() const { return HasBMI2; }
634  bool hasVBMI() const { return HasVBMI; }
635  bool hasVBMI2() const { return HasVBMI2; }
636  bool hasIFMA() const { return HasIFMA; }
637  bool hasRTM() const { return HasRTM; }
638  bool hasADX() const { return HasADX; }
639  bool hasSHA() const { return HasSHA; }
640  bool hasPRFCHW() const { return HasPRFCHW || HasPREFETCHWT1; }
641  bool hasPREFETCHWT1() const { return HasPREFETCHWT1; }
642  bool hasSSEPrefetch() const {
643    // We implicitly enable these when we have a write prefix supporting cache
644    // level OR if we have prfchw, but don't already have a read prefetch from
645    // 3dnow.
646    return hasSSE1() || (hasPRFCHW() && !has3DNow()) || hasPREFETCHWT1();
647  }
648  bool hasRDSEED() const { return HasRDSEED; }
649  bool hasLAHFSAHF() const { return HasLAHFSAHF; }
650  bool hasMWAITX() const { return HasMWAITX; }
651  bool hasCLZERO() const { return HasCLZERO; }
652  bool hasCLDEMOTE() const { return HasCLDEMOTE; }
653  bool hasMOVDIRI() const { return HasMOVDIRI; }
654  bool hasMOVDIR64B() const { return HasMOVDIR64B; }
655  bool hasPTWRITE() const { return HasPTWRITE; }
656  bool isSHLDSlow() const { return IsSHLDSlow; }
657  bool isPMULLDSlow() const { return IsPMULLDSlow; }
658  bool isPMADDWDSlow() const { return IsPMADDWDSlow; }
659  bool isUnalignedMem16Slow() const { return IsUAMem16Slow; }
660  bool isUnalignedMem32Slow() const { return IsUAMem32Slow; }
661  int getGatherOverhead() const { return GatherOverhead; }
662  int getScatterOverhead() const { return ScatterOverhead; }
663  bool hasSSEUnalignedMem() const { return HasSSEUnalignedMem; }
664  bool hasCmpxchg16b() const { return HasCmpxchg16b && is64Bit(); }
665  bool useLeaForSP() const { return UseLeaForSP; }
666  bool hasPOPCNTFalseDeps() const { return HasPOPCNTFalseDeps; }
667  bool hasLZCNTFalseDeps() const { return HasLZCNTFalseDeps; }
668  bool hasFastVariableShuffle() const {
669    return HasFastVariableShuffle;
670  }
671  bool insertVZEROUPPER() const { return InsertVZEROUPPER; }
672  bool hasFastGather() const { return HasFastGather; }
673  bool hasFastScalarFSQRT() const { return HasFastScalarFSQRT; }
674  bool hasFastVectorFSQRT() const { return HasFastVectorFSQRT; }
675  bool hasFastLZCNT() const { return HasFastLZCNT; }
676  bool hasFastSHLDRotate() const { return HasFastSHLDRotate; }
677  bool hasFastBEXTR() const { return HasFastBEXTR; }
678  bool hasFastHorizontalOps() const { return HasFastHorizontalOps; }
679  bool hasFastScalarShiftMasks() const { return HasFastScalarShiftMasks; }
680  bool hasFastVectorShiftMasks() const { return HasFastVectorShiftMasks; }
681  bool hasMacroFusion() const { return HasMacroFusion; }
682  bool hasBranchFusion() const { return HasBranchFusion; }
683  bool hasERMSB() const { return HasERMSB; }
684  bool hasSlowDivide32() const { return HasSlowDivide32; }
685  bool hasSlowDivide64() const { return HasSlowDivide64; }
686  bool padShortFunctions() const { return PadShortFunctions; }
687  bool slowTwoMemOps() const { return SlowTwoMemOps; }
688  bool LEAusesAG() const { return LEAUsesAG; }
689  bool slowLEA() const { return SlowLEA; }
690  bool slow3OpsLEA() const { return Slow3OpsLEA; }
691  bool slowIncDec() const { return SlowIncDec; }
692  bool hasCDI() const { return HasCDI; }
693  bool hasVPOPCNTDQ() const { return HasVPOPCNTDQ; }
694  bool hasPFI() const { return HasPFI; }
695  bool hasERI() const { return HasERI; }
696  bool hasDQI() const { return HasDQI; }
697  bool hasBWI() const { return HasBWI; }
698  bool hasVLX() const { return HasVLX; }
699  bool hasPKU() const { return HasPKU; }
700  bool hasVNNI() const { return HasVNNI; }
701  bool hasBF16() const { return HasBF16; }
702  bool hasVP2INTERSECT() const { return HasVP2INTERSECT; }
703  bool hasBITALG() const { return HasBITALG; }
704  bool hasSHSTK() const { return HasSHSTK; }
705  bool hasCLFLUSHOPT() const { return HasCLFLUSHOPT; }
706  bool hasCLWB() const { return HasCLWB; }
707  bool hasWBNOINVD() const { return HasWBNOINVD; }
708  bool hasRDPID() const { return HasRDPID; }
709  bool hasWAITPKG() const { return HasWAITPKG; }
710  bool hasPCONFIG() const { return HasPCONFIG; }
711  bool hasSGX() const { return HasSGX; }
712  bool threewayBranchProfitable() const { return ThreewayBranchProfitable; }
713  bool hasINVPCID() const { return HasINVPCID; }
714  bool hasENQCMD() const { return HasENQCMD; }
715  bool useRetpolineIndirectCalls() const { return UseRetpolineIndirectCalls; }
716  bool useRetpolineIndirectBranches() const {
717    return UseRetpolineIndirectBranches;
718  }
719  bool useRetpolineExternalThunk() const { return UseRetpolineExternalThunk; }
720
721  // These are generic getters that OR together all of the thunk types
722  // supported by the subtarget. Therefore useIndirectThunk*() will return true
723  // if any respective thunk feature is enabled.
724  bool useIndirectThunkCalls() const {
725    return useRetpolineIndirectCalls() || useLVIControlFlowIntegrity();
726  }
727  bool useIndirectThunkBranches() const {
728    return useRetpolineIndirectBranches() || useLVIControlFlowIntegrity();
729  }
730
731  bool preferMaskRegisters() const { return PreferMaskRegisters; }
732  bool useGLMDivSqrtCosts() const { return UseGLMDivSqrtCosts; }
733  bool useLVIControlFlowIntegrity() const { return UseLVIControlFlowIntegrity; }
734  bool useLVILoadHardening() const { return UseLVILoadHardening; }
735
736  unsigned getPreferVectorWidth() const { return PreferVectorWidth; }
737  unsigned getRequiredVectorWidth() const { return RequiredVectorWidth; }
738
739  // Helper functions to determine when we should allow widening to 512-bit
740  // during codegen.
741  // TODO: Currently we're always allowing widening on CPUs without VLX,
742  // because for many cases we don't have a better option.
743  bool canExtendTo512DQ() const {
744    return hasAVX512() && (!hasVLX() || getPreferVectorWidth() >= 512);
745  }
746  bool canExtendTo512BW() const  {
747    return hasBWI() && canExtendTo512DQ();
748  }
749
750  // If there are no 512-bit vectors and we prefer not to use 512-bit registers,
751  // disable them in the legalizer.
752  bool useAVX512Regs() const {
753    return hasAVX512() && (canExtendTo512DQ() || RequiredVectorWidth > 256);
754  }
755
756  bool useBWIRegs() const {
757    return hasBWI() && useAVX512Regs();
758  }
759
760  bool isXRaySupported() const override { return is64Bit(); }
761
762  X86ProcFamilyEnum getProcFamily() const { return X86ProcFamily; }
763
764  /// TODO: to be removed later and replaced with suitable properties
765  bool isAtom() const { return X86ProcFamily == IntelAtom; }
766  bool isSLM() const { return X86ProcFamily == IntelSLM; }
767  bool useSoftFloat() const { return UseSoftFloat; }
768  bool useAA() const override { return UseAA; }
769
770  /// Use mfence if we have SSE2 or we're on x86-64 (even if we asked for
771  /// no-sse2). There isn't any reason to disable it if the target processor
772  /// supports it.
773  bool hasMFence() const { return hasSSE2() || is64Bit(); }
774
775  const Triple &getTargetTriple() const { return TargetTriple; }
776
777  bool isTargetDarwin() const { return TargetTriple.isOSDarwin(); }
778  bool isTargetFreeBSD() const { return TargetTriple.isOSFreeBSD(); }
779  bool isTargetDragonFly() const { return TargetTriple.isOSDragonFly(); }
780  bool isTargetSolaris() const { return TargetTriple.isOSSolaris(); }
781  bool isTargetPS4() const { return TargetTriple.isPS4CPU(); }
782
783  bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); }
784  bool isTargetCOFF() const { return TargetTriple.isOSBinFormatCOFF(); }
785  bool isTargetMachO() const { return TargetTriple.isOSBinFormatMachO(); }
786
787  bool isTargetLinux() const { return TargetTriple.isOSLinux(); }
788  bool isTargetKFreeBSD() const { return TargetTriple.isOSKFreeBSD(); }
789  bool isTargetGlibc() const { return TargetTriple.isOSGlibc(); }
790  bool isTargetAndroid() const { return TargetTriple.isAndroid(); }
791  bool isTargetNaCl() const { return TargetTriple.isOSNaCl(); }
792  bool isTargetNaCl32() const { return isTargetNaCl() && !is64Bit(); }
793  bool isTargetNaCl64() const { return isTargetNaCl() && is64Bit(); }
794  bool isTargetMCU() const { return TargetTriple.isOSIAMCU(); }
795  bool isTargetFuchsia() const { return TargetTriple.isOSFuchsia(); }
796
797  bool isTargetWindowsMSVC() const {
798    return TargetTriple.isWindowsMSVCEnvironment();
799  }
800
801  bool isTargetWindowsCoreCLR() const {
802    return TargetTriple.isWindowsCoreCLREnvironment();
803  }
804
805  bool isTargetWindowsCygwin() const {
806    return TargetTriple.isWindowsCygwinEnvironment();
807  }
808
809  bool isTargetWindowsGNU() const {
810    return TargetTriple.isWindowsGNUEnvironment();
811  }
812
813  bool isTargetWindowsItanium() const {
814    return TargetTriple.isWindowsItaniumEnvironment();
815  }
816
817  bool isTargetCygMing() const { return TargetTriple.isOSCygMing(); }
818
819  bool isOSWindows() const { return TargetTriple.isOSWindows(); }
820
821  bool isTargetWin64() const { return In64BitMode && isOSWindows(); }
822
823  bool isTargetWin32() const { return !In64BitMode && isOSWindows(); }
824
825  bool isPICStyleGOT() const { return PICStyle == PICStyles::Style::GOT; }
826  bool isPICStyleRIPRel() const { return PICStyle == PICStyles::Style::RIPRel; }
827
828  bool isPICStyleStubPIC() const {
829    return PICStyle == PICStyles::Style::StubPIC;
830  }
831
832  bool isPositionIndependent() const { return TM.isPositionIndependent(); }
833
834  bool isCallingConvWin64(CallingConv::ID CC) const {
835    switch (CC) {
836    // On Win64, all these conventions just use the default convention.
837    case CallingConv::C:
838    case CallingConv::Fast:
839    case CallingConv::Tail:
840    case CallingConv::Swift:
841    case CallingConv::X86_FastCall:
842    case CallingConv::X86_StdCall:
843    case CallingConv::X86_ThisCall:
844    case CallingConv::X86_VectorCall:
845    case CallingConv::Intel_OCL_BI:
846      return isTargetWin64();
847    // This convention allows using the Win64 convention on other targets.
848    case CallingConv::Win64:
849      return true;
850    // This convention allows using the SysV convention on Windows targets.
851    case CallingConv::X86_64_SysV:
852      return false;
853    // Otherwise, who knows what this is.
854    default:
855      return false;
856    }
857  }
858
859  /// Classify a global variable reference for the current subtarget according
860  /// to how we should reference it in a non-pcrel context.
861  unsigned char classifyLocalReference(const GlobalValue *GV) const;
862
863  unsigned char classifyGlobalReference(const GlobalValue *GV,
864                                        const Module &M) const;
865  unsigned char classifyGlobalReference(const GlobalValue *GV) const;
866
867  /// Classify a global function reference for the current subtarget.
868  unsigned char classifyGlobalFunctionReference(const GlobalValue *GV,
869                                                const Module &M) const;
870  unsigned char classifyGlobalFunctionReference(const GlobalValue *GV) const;
871
872  /// Classify a blockaddress reference for the current subtarget according to
873  /// how we should reference it in a non-pcrel context.
874  unsigned char classifyBlockAddressReference() const;
875
876  /// Return true if the subtarget allows calls to immediate address.
877  bool isLegalToCallImmediateAddr() const;
878
879  /// If we are using indirect thunks, we need to expand indirectbr to avoid it
880  /// lowering to an actual indirect jump.
881  bool enableIndirectBrExpand() const override {
882    return useIndirectThunkBranches();
883  }
884
885  /// Enable the MachineScheduler pass for all X86 subtargets.
886  bool enableMachineScheduler() const override { return true; }
887
888  bool enableEarlyIfConversion() const override;
889
890  void getPostRAMutations(std::vector<std::unique_ptr<ScheduleDAGMutation>>
891                              &Mutations) const override;
892
893  AntiDepBreakMode getAntiDepBreakMode() const override {
894    return TargetSubtargetInfo::ANTIDEP_CRITICAL;
895  }
896
897  bool enableAdvancedRASplitCost() const override { return true; }
898};
899
900} // end namespace llvm
901
902#endif // LLVM_LIB_TARGET_X86_X86SUBTARGET_H
903