1//===-- X86MCTargetDesc.cpp - X86 Target Descriptions ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file provides X86 specific target descriptions.
10//
11//===----------------------------------------------------------------------===//
12
13#include "X86MCTargetDesc.h"
14#include "TargetInfo/X86TargetInfo.h"
15#include "X86ATTInstPrinter.h"
16#include "X86BaseInfo.h"
17#include "X86IntelInstPrinter.h"
18#include "X86MCAsmInfo.h"
19#include "X86TargetStreamer.h"
20#include "llvm/ADT/APInt.h"
21#include "llvm/DebugInfo/CodeView/CodeView.h"
22#include "llvm/MC/MCDwarf.h"
23#include "llvm/MC/MCInstrAnalysis.h"
24#include "llvm/MC/MCInstrInfo.h"
25#include "llvm/MC/MCRegisterInfo.h"
26#include "llvm/MC/MCStreamer.h"
27#include "llvm/MC/MCSubtargetInfo.h"
28#include "llvm/MC/MachineLocation.h"
29#include "llvm/MC/TargetRegistry.h"
30#include "llvm/Support/ErrorHandling.h"
31#include "llvm/TargetParser/Host.h"
32#include "llvm/TargetParser/Triple.h"
33
34using namespace llvm;
35
36#define GET_REGINFO_MC_DESC
37#include "X86GenRegisterInfo.inc"
38
39#define GET_INSTRINFO_MC_DESC
40#define GET_INSTRINFO_MC_HELPERS
41#define ENABLE_INSTR_PREDICATE_VERIFIER
42#include "X86GenInstrInfo.inc"
43
44#define GET_SUBTARGETINFO_MC_DESC
45#include "X86GenSubtargetInfo.inc"
46
47std::string X86_MC::ParseX86Triple(const Triple &TT) {
48  std::string FS;
49  // SSE2 should default to enabled in 64-bit mode, but can be turned off
50  // explicitly.
51  if (TT.isArch64Bit())
52    FS = "+64bit-mode,-32bit-mode,-16bit-mode,+sse2";
53  else if (TT.getEnvironment() != Triple::CODE16)
54    FS = "-64bit-mode,+32bit-mode,-16bit-mode";
55  else
56    FS = "-64bit-mode,-32bit-mode,+16bit-mode";
57
58  return FS;
59}
60
61unsigned X86_MC::getDwarfRegFlavour(const Triple &TT, bool isEH) {
62  if (TT.getArch() == Triple::x86_64)
63    return DWARFFlavour::X86_64;
64
65  if (TT.isOSDarwin())
66    return isEH ? DWARFFlavour::X86_32_DarwinEH : DWARFFlavour::X86_32_Generic;
67  if (TT.isOSCygMing())
68    // Unsupported by now, just quick fallback
69    return DWARFFlavour::X86_32_Generic;
70  return DWARFFlavour::X86_32_Generic;
71}
72
73bool X86_MC::hasLockPrefix(const MCInst &MI) {
74  return MI.getFlags() & X86::IP_HAS_LOCK;
75}
76
77static bool isMemOperand(const MCInst &MI, unsigned Op, unsigned RegClassID) {
78  const MCOperand &Base = MI.getOperand(Op + X86::AddrBaseReg);
79  const MCOperand &Index = MI.getOperand(Op + X86::AddrIndexReg);
80  const MCRegisterClass &RC = X86MCRegisterClasses[RegClassID];
81
82  return (Base.isReg() && Base.getReg() != 0 && RC.contains(Base.getReg())) ||
83         (Index.isReg() && Index.getReg() != 0 && RC.contains(Index.getReg()));
84}
85
86bool X86_MC::is16BitMemOperand(const MCInst &MI, unsigned Op,
87                               const MCSubtargetInfo &STI) {
88  const MCOperand &Base = MI.getOperand(Op + X86::AddrBaseReg);
89  const MCOperand &Index = MI.getOperand(Op + X86::AddrIndexReg);
90
91  if (STI.hasFeature(X86::Is16Bit) && Base.isReg() && Base.getReg() == 0 &&
92      Index.isReg() && Index.getReg() == 0)
93    return true;
94  return isMemOperand(MI, Op, X86::GR16RegClassID);
95}
96
97bool X86_MC::is32BitMemOperand(const MCInst &MI, unsigned Op) {
98  const MCOperand &Base = MI.getOperand(Op + X86::AddrBaseReg);
99  const MCOperand &Index = MI.getOperand(Op + X86::AddrIndexReg);
100  if (Base.isReg() && Base.getReg() == X86::EIP) {
101    assert(Index.isReg() && Index.getReg() == 0 && "Invalid eip-based address");
102    return true;
103  }
104  if (Index.isReg() && Index.getReg() == X86::EIZ)
105    return true;
106  return isMemOperand(MI, Op, X86::GR32RegClassID);
107}
108
109#ifndef NDEBUG
110bool X86_MC::is64BitMemOperand(const MCInst &MI, unsigned Op) {
111  return isMemOperand(MI, Op, X86::GR64RegClassID);
112}
113#endif
114
115bool X86_MC::needsAddressSizeOverride(const MCInst &MI,
116                                      const MCSubtargetInfo &STI,
117                                      int MemoryOperand, uint64_t TSFlags) {
118  uint64_t AdSize = TSFlags & X86II::AdSizeMask;
119  bool Is16BitMode = STI.hasFeature(X86::Is16Bit);
120  bool Is32BitMode = STI.hasFeature(X86::Is32Bit);
121  bool Is64BitMode = STI.hasFeature(X86::Is64Bit);
122  if ((Is16BitMode && AdSize == X86II::AdSize32) ||
123      (Is32BitMode && AdSize == X86II::AdSize16) ||
124      (Is64BitMode && AdSize == X86II::AdSize32))
125    return true;
126  uint64_t Form = TSFlags & X86II::FormMask;
127  switch (Form) {
128  default:
129    break;
130  case X86II::RawFrmDstSrc: {
131    unsigned siReg = MI.getOperand(1).getReg();
132    assert(((siReg == X86::SI && MI.getOperand(0).getReg() == X86::DI) ||
133            (siReg == X86::ESI && MI.getOperand(0).getReg() == X86::EDI) ||
134            (siReg == X86::RSI && MI.getOperand(0).getReg() == X86::RDI)) &&
135           "SI and DI register sizes do not match");
136    return (!Is32BitMode && siReg == X86::ESI) ||
137           (Is32BitMode && siReg == X86::SI);
138  }
139  case X86II::RawFrmSrc: {
140    unsigned siReg = MI.getOperand(0).getReg();
141    return (!Is32BitMode && siReg == X86::ESI) ||
142           (Is32BitMode && siReg == X86::SI);
143  }
144  case X86II::RawFrmDst: {
145    unsigned siReg = MI.getOperand(0).getReg();
146    return (!Is32BitMode && siReg == X86::EDI) ||
147           (Is32BitMode && siReg == X86::DI);
148  }
149  }
150
151  // Determine where the memory operand starts, if present.
152  if (MemoryOperand < 0)
153    return false;
154
155  if (STI.hasFeature(X86::Is64Bit)) {
156    assert(!is16BitMemOperand(MI, MemoryOperand, STI));
157    return is32BitMemOperand(MI, MemoryOperand);
158  }
159  if (STI.hasFeature(X86::Is32Bit)) {
160    assert(!is64BitMemOperand(MI, MemoryOperand));
161    return is16BitMemOperand(MI, MemoryOperand, STI);
162  }
163  assert(STI.hasFeature(X86::Is16Bit));
164  assert(!is64BitMemOperand(MI, MemoryOperand));
165  return !is16BitMemOperand(MI, MemoryOperand, STI);
166}
167
168void X86_MC::initLLVMToSEHAndCVRegMapping(MCRegisterInfo *MRI) {
169  // FIXME: TableGen these.
170  for (unsigned Reg = X86::NoRegister + 1; Reg < X86::NUM_TARGET_REGS; ++Reg) {
171    unsigned SEH = MRI->getEncodingValue(Reg);
172    MRI->mapLLVMRegToSEHReg(Reg, SEH);
173  }
174
175  // Mapping from CodeView to MC register id.
176  static const struct {
177    codeview::RegisterId CVReg;
178    MCPhysReg Reg;
179  } RegMap[] = {
180      {codeview::RegisterId::AL, X86::AL},
181      {codeview::RegisterId::CL, X86::CL},
182      {codeview::RegisterId::DL, X86::DL},
183      {codeview::RegisterId::BL, X86::BL},
184      {codeview::RegisterId::AH, X86::AH},
185      {codeview::RegisterId::CH, X86::CH},
186      {codeview::RegisterId::DH, X86::DH},
187      {codeview::RegisterId::BH, X86::BH},
188      {codeview::RegisterId::AX, X86::AX},
189      {codeview::RegisterId::CX, X86::CX},
190      {codeview::RegisterId::DX, X86::DX},
191      {codeview::RegisterId::BX, X86::BX},
192      {codeview::RegisterId::SP, X86::SP},
193      {codeview::RegisterId::BP, X86::BP},
194      {codeview::RegisterId::SI, X86::SI},
195      {codeview::RegisterId::DI, X86::DI},
196      {codeview::RegisterId::EAX, X86::EAX},
197      {codeview::RegisterId::ECX, X86::ECX},
198      {codeview::RegisterId::EDX, X86::EDX},
199      {codeview::RegisterId::EBX, X86::EBX},
200      {codeview::RegisterId::ESP, X86::ESP},
201      {codeview::RegisterId::EBP, X86::EBP},
202      {codeview::RegisterId::ESI, X86::ESI},
203      {codeview::RegisterId::EDI, X86::EDI},
204
205      {codeview::RegisterId::EFLAGS, X86::EFLAGS},
206
207      {codeview::RegisterId::ST0, X86::ST0},
208      {codeview::RegisterId::ST1, X86::ST1},
209      {codeview::RegisterId::ST2, X86::ST2},
210      {codeview::RegisterId::ST3, X86::ST3},
211      {codeview::RegisterId::ST4, X86::ST4},
212      {codeview::RegisterId::ST5, X86::ST5},
213      {codeview::RegisterId::ST6, X86::ST6},
214      {codeview::RegisterId::ST7, X86::ST7},
215
216      {codeview::RegisterId::ST0, X86::FP0},
217      {codeview::RegisterId::ST1, X86::FP1},
218      {codeview::RegisterId::ST2, X86::FP2},
219      {codeview::RegisterId::ST3, X86::FP3},
220      {codeview::RegisterId::ST4, X86::FP4},
221      {codeview::RegisterId::ST5, X86::FP5},
222      {codeview::RegisterId::ST6, X86::FP6},
223      {codeview::RegisterId::ST7, X86::FP7},
224
225      {codeview::RegisterId::MM0, X86::MM0},
226      {codeview::RegisterId::MM1, X86::MM1},
227      {codeview::RegisterId::MM2, X86::MM2},
228      {codeview::RegisterId::MM3, X86::MM3},
229      {codeview::RegisterId::MM4, X86::MM4},
230      {codeview::RegisterId::MM5, X86::MM5},
231      {codeview::RegisterId::MM6, X86::MM6},
232      {codeview::RegisterId::MM7, X86::MM7},
233
234      {codeview::RegisterId::XMM0, X86::XMM0},
235      {codeview::RegisterId::XMM1, X86::XMM1},
236      {codeview::RegisterId::XMM2, X86::XMM2},
237      {codeview::RegisterId::XMM3, X86::XMM3},
238      {codeview::RegisterId::XMM4, X86::XMM4},
239      {codeview::RegisterId::XMM5, X86::XMM5},
240      {codeview::RegisterId::XMM6, X86::XMM6},
241      {codeview::RegisterId::XMM7, X86::XMM7},
242
243      {codeview::RegisterId::XMM8, X86::XMM8},
244      {codeview::RegisterId::XMM9, X86::XMM9},
245      {codeview::RegisterId::XMM10, X86::XMM10},
246      {codeview::RegisterId::XMM11, X86::XMM11},
247      {codeview::RegisterId::XMM12, X86::XMM12},
248      {codeview::RegisterId::XMM13, X86::XMM13},
249      {codeview::RegisterId::XMM14, X86::XMM14},
250      {codeview::RegisterId::XMM15, X86::XMM15},
251
252      {codeview::RegisterId::SIL, X86::SIL},
253      {codeview::RegisterId::DIL, X86::DIL},
254      {codeview::RegisterId::BPL, X86::BPL},
255      {codeview::RegisterId::SPL, X86::SPL},
256      {codeview::RegisterId::RAX, X86::RAX},
257      {codeview::RegisterId::RBX, X86::RBX},
258      {codeview::RegisterId::RCX, X86::RCX},
259      {codeview::RegisterId::RDX, X86::RDX},
260      {codeview::RegisterId::RSI, X86::RSI},
261      {codeview::RegisterId::RDI, X86::RDI},
262      {codeview::RegisterId::RBP, X86::RBP},
263      {codeview::RegisterId::RSP, X86::RSP},
264      {codeview::RegisterId::R8, X86::R8},
265      {codeview::RegisterId::R9, X86::R9},
266      {codeview::RegisterId::R10, X86::R10},
267      {codeview::RegisterId::R11, X86::R11},
268      {codeview::RegisterId::R12, X86::R12},
269      {codeview::RegisterId::R13, X86::R13},
270      {codeview::RegisterId::R14, X86::R14},
271      {codeview::RegisterId::R15, X86::R15},
272      {codeview::RegisterId::R8B, X86::R8B},
273      {codeview::RegisterId::R9B, X86::R9B},
274      {codeview::RegisterId::R10B, X86::R10B},
275      {codeview::RegisterId::R11B, X86::R11B},
276      {codeview::RegisterId::R12B, X86::R12B},
277      {codeview::RegisterId::R13B, X86::R13B},
278      {codeview::RegisterId::R14B, X86::R14B},
279      {codeview::RegisterId::R15B, X86::R15B},
280      {codeview::RegisterId::R8W, X86::R8W},
281      {codeview::RegisterId::R9W, X86::R9W},
282      {codeview::RegisterId::R10W, X86::R10W},
283      {codeview::RegisterId::R11W, X86::R11W},
284      {codeview::RegisterId::R12W, X86::R12W},
285      {codeview::RegisterId::R13W, X86::R13W},
286      {codeview::RegisterId::R14W, X86::R14W},
287      {codeview::RegisterId::R15W, X86::R15W},
288      {codeview::RegisterId::R8D, X86::R8D},
289      {codeview::RegisterId::R9D, X86::R9D},
290      {codeview::RegisterId::R10D, X86::R10D},
291      {codeview::RegisterId::R11D, X86::R11D},
292      {codeview::RegisterId::R12D, X86::R12D},
293      {codeview::RegisterId::R13D, X86::R13D},
294      {codeview::RegisterId::R14D, X86::R14D},
295      {codeview::RegisterId::R15D, X86::R15D},
296      {codeview::RegisterId::AMD64_YMM0, X86::YMM0},
297      {codeview::RegisterId::AMD64_YMM1, X86::YMM1},
298      {codeview::RegisterId::AMD64_YMM2, X86::YMM2},
299      {codeview::RegisterId::AMD64_YMM3, X86::YMM3},
300      {codeview::RegisterId::AMD64_YMM4, X86::YMM4},
301      {codeview::RegisterId::AMD64_YMM5, X86::YMM5},
302      {codeview::RegisterId::AMD64_YMM6, X86::YMM6},
303      {codeview::RegisterId::AMD64_YMM7, X86::YMM7},
304      {codeview::RegisterId::AMD64_YMM8, X86::YMM8},
305      {codeview::RegisterId::AMD64_YMM9, X86::YMM9},
306      {codeview::RegisterId::AMD64_YMM10, X86::YMM10},
307      {codeview::RegisterId::AMD64_YMM11, X86::YMM11},
308      {codeview::RegisterId::AMD64_YMM12, X86::YMM12},
309      {codeview::RegisterId::AMD64_YMM13, X86::YMM13},
310      {codeview::RegisterId::AMD64_YMM14, X86::YMM14},
311      {codeview::RegisterId::AMD64_YMM15, X86::YMM15},
312      {codeview::RegisterId::AMD64_YMM16, X86::YMM16},
313      {codeview::RegisterId::AMD64_YMM17, X86::YMM17},
314      {codeview::RegisterId::AMD64_YMM18, X86::YMM18},
315      {codeview::RegisterId::AMD64_YMM19, X86::YMM19},
316      {codeview::RegisterId::AMD64_YMM20, X86::YMM20},
317      {codeview::RegisterId::AMD64_YMM21, X86::YMM21},
318      {codeview::RegisterId::AMD64_YMM22, X86::YMM22},
319      {codeview::RegisterId::AMD64_YMM23, X86::YMM23},
320      {codeview::RegisterId::AMD64_YMM24, X86::YMM24},
321      {codeview::RegisterId::AMD64_YMM25, X86::YMM25},
322      {codeview::RegisterId::AMD64_YMM26, X86::YMM26},
323      {codeview::RegisterId::AMD64_YMM27, X86::YMM27},
324      {codeview::RegisterId::AMD64_YMM28, X86::YMM28},
325      {codeview::RegisterId::AMD64_YMM29, X86::YMM29},
326      {codeview::RegisterId::AMD64_YMM30, X86::YMM30},
327      {codeview::RegisterId::AMD64_YMM31, X86::YMM31},
328      {codeview::RegisterId::AMD64_ZMM0, X86::ZMM0},
329      {codeview::RegisterId::AMD64_ZMM1, X86::ZMM1},
330      {codeview::RegisterId::AMD64_ZMM2, X86::ZMM2},
331      {codeview::RegisterId::AMD64_ZMM3, X86::ZMM3},
332      {codeview::RegisterId::AMD64_ZMM4, X86::ZMM4},
333      {codeview::RegisterId::AMD64_ZMM5, X86::ZMM5},
334      {codeview::RegisterId::AMD64_ZMM6, X86::ZMM6},
335      {codeview::RegisterId::AMD64_ZMM7, X86::ZMM7},
336      {codeview::RegisterId::AMD64_ZMM8, X86::ZMM8},
337      {codeview::RegisterId::AMD64_ZMM9, X86::ZMM9},
338      {codeview::RegisterId::AMD64_ZMM10, X86::ZMM10},
339      {codeview::RegisterId::AMD64_ZMM11, X86::ZMM11},
340      {codeview::RegisterId::AMD64_ZMM12, X86::ZMM12},
341      {codeview::RegisterId::AMD64_ZMM13, X86::ZMM13},
342      {codeview::RegisterId::AMD64_ZMM14, X86::ZMM14},
343      {codeview::RegisterId::AMD64_ZMM15, X86::ZMM15},
344      {codeview::RegisterId::AMD64_ZMM16, X86::ZMM16},
345      {codeview::RegisterId::AMD64_ZMM17, X86::ZMM17},
346      {codeview::RegisterId::AMD64_ZMM18, X86::ZMM18},
347      {codeview::RegisterId::AMD64_ZMM19, X86::ZMM19},
348      {codeview::RegisterId::AMD64_ZMM20, X86::ZMM20},
349      {codeview::RegisterId::AMD64_ZMM21, X86::ZMM21},
350      {codeview::RegisterId::AMD64_ZMM22, X86::ZMM22},
351      {codeview::RegisterId::AMD64_ZMM23, X86::ZMM23},
352      {codeview::RegisterId::AMD64_ZMM24, X86::ZMM24},
353      {codeview::RegisterId::AMD64_ZMM25, X86::ZMM25},
354      {codeview::RegisterId::AMD64_ZMM26, X86::ZMM26},
355      {codeview::RegisterId::AMD64_ZMM27, X86::ZMM27},
356      {codeview::RegisterId::AMD64_ZMM28, X86::ZMM28},
357      {codeview::RegisterId::AMD64_ZMM29, X86::ZMM29},
358      {codeview::RegisterId::AMD64_ZMM30, X86::ZMM30},
359      {codeview::RegisterId::AMD64_ZMM31, X86::ZMM31},
360      {codeview::RegisterId::AMD64_K0, X86::K0},
361      {codeview::RegisterId::AMD64_K1, X86::K1},
362      {codeview::RegisterId::AMD64_K2, X86::K2},
363      {codeview::RegisterId::AMD64_K3, X86::K3},
364      {codeview::RegisterId::AMD64_K4, X86::K4},
365      {codeview::RegisterId::AMD64_K5, X86::K5},
366      {codeview::RegisterId::AMD64_K6, X86::K6},
367      {codeview::RegisterId::AMD64_K7, X86::K7},
368      {codeview::RegisterId::AMD64_XMM16, X86::XMM16},
369      {codeview::RegisterId::AMD64_XMM17, X86::XMM17},
370      {codeview::RegisterId::AMD64_XMM18, X86::XMM18},
371      {codeview::RegisterId::AMD64_XMM19, X86::XMM19},
372      {codeview::RegisterId::AMD64_XMM20, X86::XMM20},
373      {codeview::RegisterId::AMD64_XMM21, X86::XMM21},
374      {codeview::RegisterId::AMD64_XMM22, X86::XMM22},
375      {codeview::RegisterId::AMD64_XMM23, X86::XMM23},
376      {codeview::RegisterId::AMD64_XMM24, X86::XMM24},
377      {codeview::RegisterId::AMD64_XMM25, X86::XMM25},
378      {codeview::RegisterId::AMD64_XMM26, X86::XMM26},
379      {codeview::RegisterId::AMD64_XMM27, X86::XMM27},
380      {codeview::RegisterId::AMD64_XMM28, X86::XMM28},
381      {codeview::RegisterId::AMD64_XMM29, X86::XMM29},
382      {codeview::RegisterId::AMD64_XMM30, X86::XMM30},
383      {codeview::RegisterId::AMD64_XMM31, X86::XMM31},
384
385  };
386  for (const auto &I : RegMap)
387    MRI->mapLLVMRegToCVReg(I.Reg, static_cast<int>(I.CVReg));
388}
389
390MCSubtargetInfo *X86_MC::createX86MCSubtargetInfo(const Triple &TT,
391                                                  StringRef CPU, StringRef FS) {
392  std::string ArchFS = X86_MC::ParseX86Triple(TT);
393  assert(!ArchFS.empty() && "Failed to parse X86 triple");
394  if (!FS.empty())
395    ArchFS = (Twine(ArchFS) + "," + FS).str();
396
397  if (CPU.empty())
398    CPU = "generic";
399
400  size_t posNoEVEX512 = FS.rfind("-evex512");
401  // Make sure we won't be cheated by "-avx512fp16".
402  size_t posNoAVX512F =
403      FS.ends_with("-avx512f") ? FS.size() - 8 : FS.rfind("-avx512f,");
404  size_t posEVEX512 = FS.rfind("+evex512");
405  size_t posAVX512F = FS.rfind("+avx512"); // Any AVX512XXX will enable AVX512F.
406
407  if (posAVX512F != StringRef::npos &&
408      (posNoAVX512F == StringRef::npos || posNoAVX512F < posAVX512F))
409    if (posEVEX512 == StringRef::npos && posNoEVEX512 == StringRef::npos)
410      ArchFS += ",+evex512";
411
412  return createX86MCSubtargetInfoImpl(TT, CPU, /*TuneCPU*/ CPU, ArchFS);
413}
414
415static MCInstrInfo *createX86MCInstrInfo() {
416  MCInstrInfo *X = new MCInstrInfo();
417  InitX86MCInstrInfo(X);
418  return X;
419}
420
421static MCRegisterInfo *createX86MCRegisterInfo(const Triple &TT) {
422  unsigned RA = (TT.getArch() == Triple::x86_64)
423                    ? X86::RIP  // Should have dwarf #16.
424                    : X86::EIP; // Should have dwarf #8.
425
426  MCRegisterInfo *X = new MCRegisterInfo();
427  InitX86MCRegisterInfo(X, RA, X86_MC::getDwarfRegFlavour(TT, false),
428                        X86_MC::getDwarfRegFlavour(TT, true), RA);
429  X86_MC::initLLVMToSEHAndCVRegMapping(X);
430  return X;
431}
432
433static MCAsmInfo *createX86MCAsmInfo(const MCRegisterInfo &MRI,
434                                     const Triple &TheTriple,
435                                     const MCTargetOptions &Options) {
436  bool is64Bit = TheTriple.getArch() == Triple::x86_64;
437
438  MCAsmInfo *MAI;
439  if (TheTriple.isOSBinFormatMachO()) {
440    if (is64Bit)
441      MAI = new X86_64MCAsmInfoDarwin(TheTriple);
442    else
443      MAI = new X86MCAsmInfoDarwin(TheTriple);
444  } else if (TheTriple.isOSBinFormatELF()) {
445    // Force the use of an ELF container.
446    MAI = new X86ELFMCAsmInfo(TheTriple);
447  } else if (TheTriple.isWindowsMSVCEnvironment() ||
448             TheTriple.isWindowsCoreCLREnvironment()) {
449    if (Options.getAssemblyLanguage().equals_insensitive("masm"))
450      MAI = new X86MCAsmInfoMicrosoftMASM(TheTriple);
451    else
452      MAI = new X86MCAsmInfoMicrosoft(TheTriple);
453  } else if (TheTriple.isOSCygMing() ||
454             TheTriple.isWindowsItaniumEnvironment()) {
455    MAI = new X86MCAsmInfoGNUCOFF(TheTriple);
456  } else if (TheTriple.isUEFI()) {
457    MAI = new X86MCAsmInfoGNUCOFF(TheTriple);
458  } else {
459    // The default is ELF.
460    MAI = new X86ELFMCAsmInfo(TheTriple);
461  }
462
463  // Initialize initial frame state.
464  // Calculate amount of bytes used for return address storing
465  int stackGrowth = is64Bit ? -8 : -4;
466
467  // Initial state of the frame pointer is esp+stackGrowth.
468  unsigned StackPtr = is64Bit ? X86::RSP : X86::ESP;
469  MCCFIInstruction Inst = MCCFIInstruction::cfiDefCfa(
470      nullptr, MRI.getDwarfRegNum(StackPtr, true), -stackGrowth);
471  MAI->addInitialFrameState(Inst);
472
473  // Add return address to move list
474  unsigned InstPtr = is64Bit ? X86::RIP : X86::EIP;
475  MCCFIInstruction Inst2 = MCCFIInstruction::createOffset(
476      nullptr, MRI.getDwarfRegNum(InstPtr, true), stackGrowth);
477  MAI->addInitialFrameState(Inst2);
478
479  return MAI;
480}
481
482static MCInstPrinter *createX86MCInstPrinter(const Triple &T,
483                                             unsigned SyntaxVariant,
484                                             const MCAsmInfo &MAI,
485                                             const MCInstrInfo &MII,
486                                             const MCRegisterInfo &MRI) {
487  if (SyntaxVariant == 0)
488    return new X86ATTInstPrinter(MAI, MII, MRI);
489  if (SyntaxVariant == 1)
490    return new X86IntelInstPrinter(MAI, MII, MRI);
491  return nullptr;
492}
493
494static MCRelocationInfo *createX86MCRelocationInfo(const Triple &TheTriple,
495                                                   MCContext &Ctx) {
496  // Default to the stock relocation info.
497  return llvm::createMCRelocationInfo(TheTriple, Ctx);
498}
499
500namespace llvm {
501namespace X86_MC {
502
503class X86MCInstrAnalysis : public MCInstrAnalysis {
504  X86MCInstrAnalysis(const X86MCInstrAnalysis &) = delete;
505  X86MCInstrAnalysis &operator=(const X86MCInstrAnalysis &) = delete;
506  virtual ~X86MCInstrAnalysis() = default;
507
508public:
509  X86MCInstrAnalysis(const MCInstrInfo *MCII) : MCInstrAnalysis(MCII) {}
510
511#define GET_STIPREDICATE_DECLS_FOR_MC_ANALYSIS
512#include "X86GenSubtargetInfo.inc"
513
514  bool clearsSuperRegisters(const MCRegisterInfo &MRI, const MCInst &Inst,
515                            APInt &Mask) const override;
516  std::vector<std::pair<uint64_t, uint64_t>>
517  findPltEntries(uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents,
518                 const Triple &TargetTriple) const override;
519
520  bool evaluateBranch(const MCInst &Inst, uint64_t Addr, uint64_t Size,
521                      uint64_t &Target) const override;
522  std::optional<uint64_t>
523  evaluateMemoryOperandAddress(const MCInst &Inst, const MCSubtargetInfo *STI,
524                               uint64_t Addr, uint64_t Size) const override;
525  std::optional<uint64_t>
526  getMemoryOperandRelocationOffset(const MCInst &Inst,
527                                   uint64_t Size) const override;
528};
529
530#define GET_STIPREDICATE_DEFS_FOR_MC_ANALYSIS
531#include "X86GenSubtargetInfo.inc"
532
533bool X86MCInstrAnalysis::clearsSuperRegisters(const MCRegisterInfo &MRI,
534                                              const MCInst &Inst,
535                                              APInt &Mask) const {
536  const MCInstrDesc &Desc = Info->get(Inst.getOpcode());
537  unsigned NumDefs = Desc.getNumDefs();
538  unsigned NumImplicitDefs = Desc.implicit_defs().size();
539  assert(Mask.getBitWidth() == NumDefs + NumImplicitDefs &&
540         "Unexpected number of bits in the mask!");
541
542  bool HasVEX = (Desc.TSFlags & X86II::EncodingMask) == X86II::VEX;
543  bool HasEVEX = (Desc.TSFlags & X86II::EncodingMask) == X86II::EVEX;
544  bool HasXOP = (Desc.TSFlags & X86II::EncodingMask) == X86II::XOP;
545
546  const MCRegisterClass &GR32RC = MRI.getRegClass(X86::GR32RegClassID);
547  const MCRegisterClass &VR128XRC = MRI.getRegClass(X86::VR128XRegClassID);
548  const MCRegisterClass &VR256XRC = MRI.getRegClass(X86::VR256XRegClassID);
549
550  auto ClearsSuperReg = [=](unsigned RegID) {
551    // On X86-64, a general purpose integer register is viewed as a 64-bit
552    // register internal to the processor.
553    // An update to the lower 32 bits of a 64 bit integer register is
554    // architecturally defined to zero extend the upper 32 bits.
555    if (GR32RC.contains(RegID))
556      return true;
557
558    // Early exit if this instruction has no vex/evex/xop prefix.
559    if (!HasEVEX && !HasVEX && !HasXOP)
560      return false;
561
562    // All VEX and EVEX encoded instructions are defined to zero the high bits
563    // of the destination register up to VLMAX (i.e. the maximum vector register
564    // width pertaining to the instruction).
565    // We assume the same behavior for XOP instructions too.
566    return VR128XRC.contains(RegID) || VR256XRC.contains(RegID);
567  };
568
569  Mask.clearAllBits();
570  for (unsigned I = 0, E = NumDefs; I < E; ++I) {
571    const MCOperand &Op = Inst.getOperand(I);
572    if (ClearsSuperReg(Op.getReg()))
573      Mask.setBit(I);
574  }
575
576  for (unsigned I = 0, E = NumImplicitDefs; I < E; ++I) {
577    const MCPhysReg Reg = Desc.implicit_defs()[I];
578    if (ClearsSuperReg(Reg))
579      Mask.setBit(NumDefs + I);
580  }
581
582  return Mask.getBoolValue();
583}
584
585static std::vector<std::pair<uint64_t, uint64_t>>
586findX86PltEntries(uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents) {
587  // Do a lightweight parsing of PLT entries.
588  std::vector<std::pair<uint64_t, uint64_t>> Result;
589  for (uint64_t Byte = 0, End = PltContents.size(); Byte + 6 < End; ) {
590    // Recognize a jmp.
591    if (PltContents[Byte] == 0xff && PltContents[Byte + 1] == 0xa3) {
592      // The jmp instruction at the beginning of each PLT entry jumps to the
593      // address of the base of the .got.plt section plus the immediate.
594      // Set the 1 << 32 bit to let ELFObjectFileBase::getPltEntries convert the
595      // offset to an address. Imm may be a negative int32_t if the GOT entry is
596      // in .got.
597      uint32_t Imm = support::endian::read32le(PltContents.data() + Byte + 2);
598      Result.emplace_back(PltSectionVA + Byte, Imm | (uint64_t(1) << 32));
599      Byte += 6;
600    } else if (PltContents[Byte] == 0xff && PltContents[Byte + 1] == 0x25) {
601      // The jmp instruction at the beginning of each PLT entry jumps to the
602      // immediate.
603      uint32_t Imm = support::endian::read32le(PltContents.data() + Byte + 2);
604      Result.push_back(std::make_pair(PltSectionVA + Byte, Imm));
605      Byte += 6;
606    } else
607      Byte++;
608  }
609  return Result;
610}
611
612static std::vector<std::pair<uint64_t, uint64_t>>
613findX86_64PltEntries(uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents) {
614  // Do a lightweight parsing of PLT entries.
615  std::vector<std::pair<uint64_t, uint64_t>> Result;
616  for (uint64_t Byte = 0, End = PltContents.size(); Byte + 6 < End; ) {
617    // Recognize a jmp.
618    if (PltContents[Byte] == 0xff && PltContents[Byte + 1] == 0x25) {
619      // The jmp instruction at the beginning of each PLT entry jumps to the
620      // address of the next instruction plus the immediate.
621      uint32_t Imm = support::endian::read32le(PltContents.data() + Byte + 2);
622      Result.push_back(
623          std::make_pair(PltSectionVA + Byte, PltSectionVA + Byte + 6 + Imm));
624      Byte += 6;
625    } else
626      Byte++;
627  }
628  return Result;
629}
630
631std::vector<std::pair<uint64_t, uint64_t>>
632X86MCInstrAnalysis::findPltEntries(uint64_t PltSectionVA,
633                                   ArrayRef<uint8_t> PltContents,
634                                   const Triple &TargetTriple) const {
635  switch (TargetTriple.getArch()) {
636  case Triple::x86:
637    return findX86PltEntries(PltSectionVA, PltContents);
638  case Triple::x86_64:
639    return findX86_64PltEntries(PltSectionVA, PltContents);
640  default:
641    return {};
642  }
643}
644
645bool X86MCInstrAnalysis::evaluateBranch(const MCInst &Inst, uint64_t Addr,
646                                        uint64_t Size, uint64_t &Target) const {
647  if (Inst.getNumOperands() == 0 ||
648      Info->get(Inst.getOpcode()).operands()[0].OperandType !=
649          MCOI::OPERAND_PCREL)
650    return false;
651  Target = Addr + Size + Inst.getOperand(0).getImm();
652  return true;
653}
654
655std::optional<uint64_t> X86MCInstrAnalysis::evaluateMemoryOperandAddress(
656    const MCInst &Inst, const MCSubtargetInfo *STI, uint64_t Addr,
657    uint64_t Size) const {
658  const MCInstrDesc &MCID = Info->get(Inst.getOpcode());
659  int MemOpStart = X86II::getMemoryOperandNo(MCID.TSFlags);
660  if (MemOpStart == -1)
661    return std::nullopt;
662  MemOpStart += X86II::getOperandBias(MCID);
663
664  const MCOperand &SegReg = Inst.getOperand(MemOpStart + X86::AddrSegmentReg);
665  const MCOperand &BaseReg = Inst.getOperand(MemOpStart + X86::AddrBaseReg);
666  const MCOperand &IndexReg = Inst.getOperand(MemOpStart + X86::AddrIndexReg);
667  const MCOperand &ScaleAmt = Inst.getOperand(MemOpStart + X86::AddrScaleAmt);
668  const MCOperand &Disp = Inst.getOperand(MemOpStart + X86::AddrDisp);
669  if (SegReg.getReg() != 0 || IndexReg.getReg() != 0 || ScaleAmt.getImm() != 1 ||
670      !Disp.isImm())
671    return std::nullopt;
672
673  // RIP-relative addressing.
674  if (BaseReg.getReg() == X86::RIP)
675    return Addr + Size + Disp.getImm();
676
677  return std::nullopt;
678}
679
680std::optional<uint64_t>
681X86MCInstrAnalysis::getMemoryOperandRelocationOffset(const MCInst &Inst,
682                                                     uint64_t Size) const {
683  if (Inst.getOpcode() != X86::LEA64r)
684    return std::nullopt;
685  const MCInstrDesc &MCID = Info->get(Inst.getOpcode());
686  int MemOpStart = X86II::getMemoryOperandNo(MCID.TSFlags);
687  if (MemOpStart == -1)
688    return std::nullopt;
689  MemOpStart += X86II::getOperandBias(MCID);
690  const MCOperand &SegReg = Inst.getOperand(MemOpStart + X86::AddrSegmentReg);
691  const MCOperand &BaseReg = Inst.getOperand(MemOpStart + X86::AddrBaseReg);
692  const MCOperand &IndexReg = Inst.getOperand(MemOpStart + X86::AddrIndexReg);
693  const MCOperand &ScaleAmt = Inst.getOperand(MemOpStart + X86::AddrScaleAmt);
694  const MCOperand &Disp = Inst.getOperand(MemOpStart + X86::AddrDisp);
695  // Must be a simple rip-relative address.
696  if (BaseReg.getReg() != X86::RIP || SegReg.getReg() != 0 ||
697      IndexReg.getReg() != 0 || ScaleAmt.getImm() != 1 || !Disp.isImm())
698    return std::nullopt;
699  // rip-relative ModR/M immediate is 32 bits.
700  assert(Size > 4 && "invalid instruction size for rip-relative lea");
701  return Size - 4;
702}
703
704} // end of namespace X86_MC
705
706} // end of namespace llvm
707
708static MCInstrAnalysis *createX86MCInstrAnalysis(const MCInstrInfo *Info) {
709  return new X86_MC::X86MCInstrAnalysis(Info);
710}
711
712// Force static initialization.
713extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeX86TargetMC() {
714  for (Target *T : {&getTheX86_32Target(), &getTheX86_64Target()}) {
715    // Register the MC asm info.
716    RegisterMCAsmInfoFn X(*T, createX86MCAsmInfo);
717
718    // Register the MC instruction info.
719    TargetRegistry::RegisterMCInstrInfo(*T, createX86MCInstrInfo);
720
721    // Register the MC register info.
722    TargetRegistry::RegisterMCRegInfo(*T, createX86MCRegisterInfo);
723
724    // Register the MC subtarget info.
725    TargetRegistry::RegisterMCSubtargetInfo(*T,
726                                            X86_MC::createX86MCSubtargetInfo);
727
728    // Register the MC instruction analyzer.
729    TargetRegistry::RegisterMCInstrAnalysis(*T, createX86MCInstrAnalysis);
730
731    // Register the code emitter.
732    TargetRegistry::RegisterMCCodeEmitter(*T, createX86MCCodeEmitter);
733
734    // Register the obj target streamer.
735    TargetRegistry::RegisterObjectTargetStreamer(*T,
736                                                 createX86ObjectTargetStreamer);
737
738    // Register the asm target streamer.
739    TargetRegistry::RegisterAsmTargetStreamer(*T, createX86AsmTargetStreamer);
740
741    // Register the null streamer.
742    TargetRegistry::RegisterNullTargetStreamer(*T, createX86NullTargetStreamer);
743
744    TargetRegistry::RegisterCOFFStreamer(*T, createX86WinCOFFStreamer);
745
746    // Register the MCInstPrinter.
747    TargetRegistry::RegisterMCInstPrinter(*T, createX86MCInstPrinter);
748
749    // Register the MC relocation info.
750    TargetRegistry::RegisterMCRelocationInfo(*T, createX86MCRelocationInfo);
751  }
752
753  // Register the asm backend.
754  TargetRegistry::RegisterMCAsmBackend(getTheX86_32Target(),
755                                       createX86_32AsmBackend);
756  TargetRegistry::RegisterMCAsmBackend(getTheX86_64Target(),
757                                       createX86_64AsmBackend);
758}
759
760MCRegister llvm::getX86SubSuperRegister(MCRegister Reg, unsigned Size,
761                                        bool High) {
762#define DEFAULT_NOREG                                                          \
763  default:                                                                     \
764    return X86::NoRegister;
765#define SUB_SUPER(R1, R2, R3, R4, R)                                           \
766  case X86::R1:                                                                \
767  case X86::R2:                                                                \
768  case X86::R3:                                                                \
769  case X86::R4:                                                                \
770    return X86::R;
771#define A_SUB_SUPER(R)                                                         \
772  case X86::AH:                                                                \
773    SUB_SUPER(AL, AX, EAX, RAX, R)
774#define D_SUB_SUPER(R)                                                         \
775  case X86::DH:                                                                \
776    SUB_SUPER(DL, DX, EDX, RDX, R)
777#define C_SUB_SUPER(R)                                                         \
778  case X86::CH:                                                                \
779    SUB_SUPER(CL, CX, ECX, RCX, R)
780#define B_SUB_SUPER(R)                                                         \
781  case X86::BH:                                                                \
782    SUB_SUPER(BL, BX, EBX, RBX, R)
783#define SI_SUB_SUPER(R) SUB_SUPER(SIL, SI, ESI, RSI, R)
784#define DI_SUB_SUPER(R) SUB_SUPER(DIL, DI, EDI, RDI, R)
785#define BP_SUB_SUPER(R) SUB_SUPER(BPL, BP, EBP, RBP, R)
786#define SP_SUB_SUPER(R) SUB_SUPER(SPL, SP, ESP, RSP, R)
787#define NO_SUB_SUPER(NO, REG)                                                  \
788  SUB_SUPER(R##NO##B, R##NO##W, R##NO##D, R##NO, REG)
789#define NO_SUB_SUPER_B(NO) NO_SUB_SUPER(NO, R##NO##B)
790#define NO_SUB_SUPER_W(NO) NO_SUB_SUPER(NO, R##NO##W)
791#define NO_SUB_SUPER_D(NO) NO_SUB_SUPER(NO, R##NO##D)
792#define NO_SUB_SUPER_Q(NO) NO_SUB_SUPER(NO, R##NO)
793  switch (Size) {
794  default:
795    llvm_unreachable("illegal register size");
796  case 8:
797    if (High) {
798      switch (Reg.id()) {
799        DEFAULT_NOREG
800        A_SUB_SUPER(AH)
801        D_SUB_SUPER(DH)
802        C_SUB_SUPER(CH)
803        B_SUB_SUPER(BH)
804      }
805    } else {
806      switch (Reg.id()) {
807        DEFAULT_NOREG
808        A_SUB_SUPER(AL)
809        D_SUB_SUPER(DL)
810        C_SUB_SUPER(CL)
811        B_SUB_SUPER(BL)
812        SI_SUB_SUPER(SIL)
813        DI_SUB_SUPER(DIL)
814        BP_SUB_SUPER(BPL)
815        SP_SUB_SUPER(SPL)
816        NO_SUB_SUPER_B(8)
817        NO_SUB_SUPER_B(9)
818        NO_SUB_SUPER_B(10)
819        NO_SUB_SUPER_B(11)
820        NO_SUB_SUPER_B(12)
821        NO_SUB_SUPER_B(13)
822        NO_SUB_SUPER_B(14)
823        NO_SUB_SUPER_B(15)
824        NO_SUB_SUPER_B(16)
825        NO_SUB_SUPER_B(17)
826        NO_SUB_SUPER_B(18)
827        NO_SUB_SUPER_B(19)
828        NO_SUB_SUPER_B(20)
829        NO_SUB_SUPER_B(21)
830        NO_SUB_SUPER_B(22)
831        NO_SUB_SUPER_B(23)
832        NO_SUB_SUPER_B(24)
833        NO_SUB_SUPER_B(25)
834        NO_SUB_SUPER_B(26)
835        NO_SUB_SUPER_B(27)
836        NO_SUB_SUPER_B(28)
837        NO_SUB_SUPER_B(29)
838        NO_SUB_SUPER_B(30)
839        NO_SUB_SUPER_B(31)
840      }
841    }
842  case 16:
843    switch (Reg.id()) {
844      DEFAULT_NOREG
845      A_SUB_SUPER(AX)
846      D_SUB_SUPER(DX)
847      C_SUB_SUPER(CX)
848      B_SUB_SUPER(BX)
849      SI_SUB_SUPER(SI)
850      DI_SUB_SUPER(DI)
851      BP_SUB_SUPER(BP)
852      SP_SUB_SUPER(SP)
853      NO_SUB_SUPER_W(8)
854      NO_SUB_SUPER_W(9)
855      NO_SUB_SUPER_W(10)
856      NO_SUB_SUPER_W(11)
857      NO_SUB_SUPER_W(12)
858      NO_SUB_SUPER_W(13)
859      NO_SUB_SUPER_W(14)
860      NO_SUB_SUPER_W(15)
861      NO_SUB_SUPER_W(16)
862      NO_SUB_SUPER_W(17)
863      NO_SUB_SUPER_W(18)
864      NO_SUB_SUPER_W(19)
865      NO_SUB_SUPER_W(20)
866      NO_SUB_SUPER_W(21)
867      NO_SUB_SUPER_W(22)
868      NO_SUB_SUPER_W(23)
869      NO_SUB_SUPER_W(24)
870      NO_SUB_SUPER_W(25)
871      NO_SUB_SUPER_W(26)
872      NO_SUB_SUPER_W(27)
873      NO_SUB_SUPER_W(28)
874      NO_SUB_SUPER_W(29)
875      NO_SUB_SUPER_W(30)
876      NO_SUB_SUPER_W(31)
877    }
878  case 32:
879    switch (Reg.id()) {
880      DEFAULT_NOREG
881      A_SUB_SUPER(EAX)
882      D_SUB_SUPER(EDX)
883      C_SUB_SUPER(ECX)
884      B_SUB_SUPER(EBX)
885      SI_SUB_SUPER(ESI)
886      DI_SUB_SUPER(EDI)
887      BP_SUB_SUPER(EBP)
888      SP_SUB_SUPER(ESP)
889      NO_SUB_SUPER_D(8)
890      NO_SUB_SUPER_D(9)
891      NO_SUB_SUPER_D(10)
892      NO_SUB_SUPER_D(11)
893      NO_SUB_SUPER_D(12)
894      NO_SUB_SUPER_D(13)
895      NO_SUB_SUPER_D(14)
896      NO_SUB_SUPER_D(15)
897      NO_SUB_SUPER_D(16)
898      NO_SUB_SUPER_D(17)
899      NO_SUB_SUPER_D(18)
900      NO_SUB_SUPER_D(19)
901      NO_SUB_SUPER_D(20)
902      NO_SUB_SUPER_D(21)
903      NO_SUB_SUPER_D(22)
904      NO_SUB_SUPER_D(23)
905      NO_SUB_SUPER_D(24)
906      NO_SUB_SUPER_D(25)
907      NO_SUB_SUPER_D(26)
908      NO_SUB_SUPER_D(27)
909      NO_SUB_SUPER_D(28)
910      NO_SUB_SUPER_D(29)
911      NO_SUB_SUPER_D(30)
912      NO_SUB_SUPER_D(31)
913    }
914  case 64:
915    switch (Reg.id()) {
916      DEFAULT_NOREG
917      A_SUB_SUPER(RAX)
918      D_SUB_SUPER(RDX)
919      C_SUB_SUPER(RCX)
920      B_SUB_SUPER(RBX)
921      SI_SUB_SUPER(RSI)
922      DI_SUB_SUPER(RDI)
923      BP_SUB_SUPER(RBP)
924      SP_SUB_SUPER(RSP)
925      NO_SUB_SUPER_Q(8)
926      NO_SUB_SUPER_Q(9)
927      NO_SUB_SUPER_Q(10)
928      NO_SUB_SUPER_Q(11)
929      NO_SUB_SUPER_Q(12)
930      NO_SUB_SUPER_Q(13)
931      NO_SUB_SUPER_Q(14)
932      NO_SUB_SUPER_Q(15)
933      NO_SUB_SUPER_Q(16)
934      NO_SUB_SUPER_Q(17)
935      NO_SUB_SUPER_Q(18)
936      NO_SUB_SUPER_Q(19)
937      NO_SUB_SUPER_Q(20)
938      NO_SUB_SUPER_Q(21)
939      NO_SUB_SUPER_Q(22)
940      NO_SUB_SUPER_Q(23)
941      NO_SUB_SUPER_Q(24)
942      NO_SUB_SUPER_Q(25)
943      NO_SUB_SUPER_Q(26)
944      NO_SUB_SUPER_Q(27)
945      NO_SUB_SUPER_Q(28)
946      NO_SUB_SUPER_Q(29)
947      NO_SUB_SUPER_Q(30)
948      NO_SUB_SUPER_Q(31)
949    }
950  }
951}
952