1//===-- X86Subtarget.cpp - X86 Subtarget Information ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the X86 specific subclass of TargetSubtargetInfo.
10//
11//===----------------------------------------------------------------------===//
12
13#include "X86Subtarget.h"
14#include "MCTargetDesc/X86BaseInfo.h"
15#include "X86.h"
16#include "X86CallLowering.h"
17#include "X86LegalizerInfo.h"
18#include "X86MacroFusion.h"
19#include "X86RegisterBankInfo.h"
20#include "X86TargetMachine.h"
21#include "llvm/ADT/Triple.h"
22#include "llvm/CodeGen/GlobalISel/CallLowering.h"
23#include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
24#include "llvm/IR/Attributes.h"
25#include "llvm/IR/ConstantRange.h"
26#include "llvm/IR/Function.h"
27#include "llvm/IR/GlobalValue.h"
28#include "llvm/Support/Casting.h"
29#include "llvm/Support/CodeGen.h"
30#include "llvm/Support/CommandLine.h"
31#include "llvm/Support/Debug.h"
32#include "llvm/Support/ErrorHandling.h"
33#include "llvm/Support/raw_ostream.h"
34#include "llvm/Target/TargetMachine.h"
35
36#if defined(_MSC_VER)
37#include <intrin.h>
38#endif
39
40using namespace llvm;
41
42#define DEBUG_TYPE "subtarget"
43
44#define GET_SUBTARGETINFO_TARGET_DESC
45#define GET_SUBTARGETINFO_CTOR
46#include "X86GenSubtargetInfo.inc"
47
48// Temporary option to control early if-conversion for x86 while adding machine
49// models.
50static cl::opt<bool>
51X86EarlyIfConv("x86-early-ifcvt", cl::Hidden,
52               cl::desc("Enable early if-conversion on X86"));
53
54
55/// Classify a blockaddress reference for the current subtarget according to how
56/// we should reference it in a non-pcrel context.
57unsigned char X86Subtarget::classifyBlockAddressReference() const {
58  return classifyLocalReference(nullptr);
59}
60
61/// Classify a global variable reference for the current subtarget according to
62/// how we should reference it in a non-pcrel context.
63unsigned char
64X86Subtarget::classifyGlobalReference(const GlobalValue *GV) const {
65  return classifyGlobalReference(GV, *GV->getParent());
66}
67
68unsigned char
69X86Subtarget::classifyLocalReference(const GlobalValue *GV) const {
70  // If we're not PIC, it's not very interesting.
71  if (!isPositionIndependent())
72    return X86II::MO_NO_FLAG;
73
74  if (is64Bit()) {
75    // 64-bit ELF PIC local references may use GOTOFF relocations.
76    if (isTargetELF()) {
77      switch (TM.getCodeModel()) {
78      // 64-bit small code model is simple: All rip-relative.
79      case CodeModel::Tiny:
80        llvm_unreachable("Tiny codesize model not supported on X86");
81      case CodeModel::Small:
82      case CodeModel::Kernel:
83        return X86II::MO_NO_FLAG;
84
85      // The large PIC code model uses GOTOFF.
86      case CodeModel::Large:
87        return X86II::MO_GOTOFF;
88
89      // Medium is a hybrid: RIP-rel for code, GOTOFF for DSO local data.
90      case CodeModel::Medium:
91        // Constant pool and jump table handling pass a nullptr to this
92        // function so we need to use isa_and_nonnull.
93        if (isa_and_nonnull<Function>(GV))
94          return X86II::MO_NO_FLAG; // All code is RIP-relative
95        return X86II::MO_GOTOFF;    // Local symbols use GOTOFF.
96      }
97      llvm_unreachable("invalid code model");
98    }
99
100    // Otherwise, this is either a RIP-relative reference or a 64-bit movabsq,
101    // both of which use MO_NO_FLAG.
102    return X86II::MO_NO_FLAG;
103  }
104
105  // The COFF dynamic linker just patches the executable sections.
106  if (isTargetCOFF())
107    return X86II::MO_NO_FLAG;
108
109  if (isTargetDarwin()) {
110    // 32 bit macho has no relocation for a-b if a is undefined, even if
111    // b is in the section that is being relocated.
112    // This means we have to use o load even for GVs that are known to be
113    // local to the dso.
114    if (GV && (GV->isDeclarationForLinker() || GV->hasCommonLinkage()))
115      return X86II::MO_DARWIN_NONLAZY_PIC_BASE;
116
117    return X86II::MO_PIC_BASE_OFFSET;
118  }
119
120  return X86II::MO_GOTOFF;
121}
122
123unsigned char X86Subtarget::classifyGlobalReference(const GlobalValue *GV,
124                                                    const Module &M) const {
125  // The static large model never uses stubs.
126  if (TM.getCodeModel() == CodeModel::Large && !isPositionIndependent())
127    return X86II::MO_NO_FLAG;
128
129  // Absolute symbols can be referenced directly.
130  if (GV) {
131    if (Optional<ConstantRange> CR = GV->getAbsoluteSymbolRange()) {
132      // See if we can use the 8-bit immediate form. Note that some instructions
133      // will sign extend the immediate operand, so to be conservative we only
134      // accept the range [0,128).
135      if (CR->getUnsignedMax().ult(128))
136        return X86II::MO_ABS8;
137      else
138        return X86II::MO_NO_FLAG;
139    }
140  }
141
142  if (TM.shouldAssumeDSOLocal(M, GV))
143    return classifyLocalReference(GV);
144
145  if (isTargetCOFF()) {
146    if (GV->hasDLLImportStorageClass())
147      return X86II::MO_DLLIMPORT;
148    return X86II::MO_COFFSTUB;
149  }
150  // Some JIT users use *-win32-elf triples; these shouldn't use GOT tables.
151  if (isOSWindows())
152    return X86II::MO_NO_FLAG;
153
154  if (is64Bit()) {
155    // ELF supports a large, truly PIC code model with non-PC relative GOT
156    // references. Other object file formats do not. Use the no-flag, 64-bit
157    // reference for them.
158    if (TM.getCodeModel() == CodeModel::Large)
159      return isTargetELF() ? X86II::MO_GOT : X86II::MO_NO_FLAG;
160    return X86II::MO_GOTPCREL;
161  }
162
163  if (isTargetDarwin()) {
164    if (!isPositionIndependent())
165      return X86II::MO_DARWIN_NONLAZY;
166    return X86II::MO_DARWIN_NONLAZY_PIC_BASE;
167  }
168
169  return X86II::MO_GOT;
170}
171
172unsigned char
173X86Subtarget::classifyGlobalFunctionReference(const GlobalValue *GV) const {
174  return classifyGlobalFunctionReference(GV, *GV->getParent());
175}
176
177unsigned char
178X86Subtarget::classifyGlobalFunctionReference(const GlobalValue *GV,
179                                              const Module &M) const {
180  if (TM.shouldAssumeDSOLocal(M, GV))
181    return X86II::MO_NO_FLAG;
182
183  // Functions on COFF can be non-DSO local for two reasons:
184  // - They are marked dllimport
185  // - They are extern_weak, and a stub is needed
186  if (isTargetCOFF()) {
187    if (GV->hasDLLImportStorageClass())
188      return X86II::MO_DLLIMPORT;
189    return X86II::MO_COFFSTUB;
190  }
191
192  const Function *F = dyn_cast_or_null<Function>(GV);
193
194  if (isTargetELF()) {
195    if (is64Bit() && F && (CallingConv::X86_RegCall == F->getCallingConv()))
196      // According to psABI, PLT stub clobbers XMM8-XMM15.
197      // In Regcall calling convention those registers are used for passing
198      // parameters. Thus we need to prevent lazy binding in Regcall.
199      return X86II::MO_GOTPCREL;
200    // If PLT must be avoided then the call should be via GOTPCREL.
201    if (((F && F->hasFnAttribute(Attribute::NonLazyBind)) ||
202         (!F && M.getRtLibUseGOT())) &&
203        is64Bit())
204       return X86II::MO_GOTPCREL;
205    return X86II::MO_PLT;
206  }
207
208  if (is64Bit()) {
209    if (F && F->hasFnAttribute(Attribute::NonLazyBind))
210      // If the function is marked as non-lazy, generate an indirect call
211      // which loads from the GOT directly. This avoids runtime overhead
212      // at the cost of eager binding (and one extra byte of encoding).
213      return X86II::MO_GOTPCREL;
214    return X86II::MO_NO_FLAG;
215  }
216
217  return X86II::MO_NO_FLAG;
218}
219
220/// Return true if the subtarget allows calls to immediate address.
221bool X86Subtarget::isLegalToCallImmediateAddr() const {
222  // FIXME: I386 PE/COFF supports PC relative calls using IMAGE_REL_I386_REL32
223  // but WinCOFFObjectWriter::RecordRelocation cannot emit them.  Once it does,
224  // the following check for Win32 should be removed.
225  if (In64BitMode || isTargetWin32())
226    return false;
227  return isTargetELF() || TM.getRelocationModel() == Reloc::Static;
228}
229
230void X86Subtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
231  std::string CPUName = std::string(CPU);
232  if (CPUName.empty())
233    CPUName = "generic";
234
235  std::string FullFS = std::string(FS);
236  if (In64BitMode) {
237    // SSE2 should default to enabled in 64-bit mode, but can be turned off
238    // explicitly.
239    if (!FullFS.empty())
240      FullFS = "+sse2," + FullFS;
241    else
242      FullFS = "+sse2";
243
244    // If no CPU was specified, enable 64bit feature to satisy later check.
245    if (CPUName == "generic") {
246      if (!FullFS.empty())
247        FullFS = "+64bit," + FullFS;
248      else
249        FullFS = "+64bit";
250    }
251  }
252
253  // LAHF/SAHF are always supported in non-64-bit mode.
254  if (!In64BitMode) {
255    if (!FullFS.empty())
256      FullFS = "+sahf," + FullFS;
257    else
258      FullFS = "+sahf";
259  }
260
261  // Parse features string and set the CPU.
262  ParseSubtargetFeatures(CPUName, FullFS);
263
264  // All CPUs that implement SSE4.2 or SSE4A support unaligned accesses of
265  // 16-bytes and under that are reasonably fast. These features were
266  // introduced with Intel's Nehalem/Silvermont and AMD's Family10h
267  // micro-architectures respectively.
268  if (hasSSE42() || hasSSE4A())
269    IsUAMem16Slow = false;
270
271  // It's important to keep the MCSubtargetInfo feature bits in sync with
272  // target data structure which is shared with MC code emitter, etc.
273  if (In64BitMode)
274    ToggleFeature(X86::Mode64Bit);
275  else if (In32BitMode)
276    ToggleFeature(X86::Mode32Bit);
277  else if (In16BitMode)
278    ToggleFeature(X86::Mode16Bit);
279  else
280    llvm_unreachable("Not 16-bit, 32-bit or 64-bit mode!");
281
282  LLVM_DEBUG(dbgs() << "Subtarget features: SSELevel " << X86SSELevel
283                    << ", 3DNowLevel " << X863DNowLevel << ", 64bit "
284                    << HasX86_64 << "\n");
285  if (In64BitMode && !HasX86_64)
286    report_fatal_error("64-bit code requested on a subtarget that doesn't "
287                       "support it!");
288
289  // Stack alignment is 16 bytes on Darwin, Linux, kFreeBSD and Solaris (both
290  // 32 and 64 bit) and for all 64-bit targets.
291  if (StackAlignOverride)
292    stackAlignment = *StackAlignOverride;
293  else if (isTargetDarwin() || isTargetLinux() || isTargetSolaris() ||
294           isTargetKFreeBSD() || In64BitMode)
295    stackAlignment = Align(16);
296
297  // Some CPUs have more overhead for gather. The specified overhead is relative
298  // to the Load operation. "2" is the number provided by Intel architects. This
299  // parameter is used for cost estimation of Gather Op and comparison with
300  // other alternatives.
301  // TODO: Remove the explicit hasAVX512()?, That would mean we would only
302  // enable gather with a -march.
303  if (hasAVX512() || (hasAVX2() && hasFastGather()))
304    GatherOverhead = 2;
305  if (hasAVX512())
306    ScatterOverhead = 2;
307
308  // Consume the vector width attribute or apply any target specific limit.
309  if (PreferVectorWidthOverride)
310    PreferVectorWidth = PreferVectorWidthOverride;
311  else if (Prefer128Bit)
312    PreferVectorWidth = 128;
313  else if (Prefer256Bit)
314    PreferVectorWidth = 256;
315}
316
317X86Subtarget &X86Subtarget::initializeSubtargetDependencies(StringRef CPU,
318                                                            StringRef FS) {
319  initSubtargetFeatures(CPU, FS);
320  return *this;
321}
322
323X86Subtarget::X86Subtarget(const Triple &TT, StringRef CPU, StringRef FS,
324                           const X86TargetMachine &TM,
325                           MaybeAlign StackAlignOverride,
326                           unsigned PreferVectorWidthOverride,
327                           unsigned RequiredVectorWidth)
328    : X86GenSubtargetInfo(TT, CPU, FS), PICStyle(PICStyles::Style::None),
329      TM(TM), TargetTriple(TT), StackAlignOverride(StackAlignOverride),
330      PreferVectorWidthOverride(PreferVectorWidthOverride),
331      RequiredVectorWidth(RequiredVectorWidth),
332      In64BitMode(TargetTriple.getArch() == Triple::x86_64),
333      In32BitMode(TargetTriple.getArch() == Triple::x86 &&
334                  TargetTriple.getEnvironment() != Triple::CODE16),
335      In16BitMode(TargetTriple.getArch() == Triple::x86 &&
336                  TargetTriple.getEnvironment() == Triple::CODE16),
337      InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM, *this),
338      FrameLowering(*this, getStackAlignment()) {
339  // Determine the PICStyle based on the target selected.
340  if (!isPositionIndependent())
341    setPICStyle(PICStyles::Style::None);
342  else if (is64Bit())
343    setPICStyle(PICStyles::Style::RIPRel);
344  else if (isTargetCOFF())
345    setPICStyle(PICStyles::Style::None);
346  else if (isTargetDarwin())
347    setPICStyle(PICStyles::Style::StubPIC);
348  else if (isTargetELF())
349    setPICStyle(PICStyles::Style::GOT);
350
351  CallLoweringInfo.reset(new X86CallLowering(*getTargetLowering()));
352  Legalizer.reset(new X86LegalizerInfo(*this, TM));
353
354  auto *RBI = new X86RegisterBankInfo(*getRegisterInfo());
355  RegBankInfo.reset(RBI);
356  InstSelector.reset(createX86InstructionSelector(TM, *this, *RBI));
357}
358
359const CallLowering *X86Subtarget::getCallLowering() const {
360  return CallLoweringInfo.get();
361}
362
363InstructionSelector *X86Subtarget::getInstructionSelector() const {
364  return InstSelector.get();
365}
366
367const LegalizerInfo *X86Subtarget::getLegalizerInfo() const {
368  return Legalizer.get();
369}
370
371const RegisterBankInfo *X86Subtarget::getRegBankInfo() const {
372  return RegBankInfo.get();
373}
374
375bool X86Subtarget::enableEarlyIfConversion() const {
376  return hasCMov() && X86EarlyIfConv;
377}
378
379void X86Subtarget::getPostRAMutations(
380    std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations) const {
381  Mutations.push_back(createX86MacroFusionDAGMutation());
382}
383
384bool X86Subtarget::isPositionIndependent() const {
385  return TM.isPositionIndependent();
386}
387