X86RegisterBankInfo.cpp revision 360784
1//===- X86RegisterBankInfo.cpp -----------------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the RegisterBankInfo class for X86.
10/// \todo This should be generated by TableGen.
11//===----------------------------------------------------------------------===//
12
13#include "X86RegisterBankInfo.h"
14#include "X86InstrInfo.h"
15#include "llvm/CodeGen/GlobalISel/RegisterBank.h"
16#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
17#include "llvm/CodeGen/MachineRegisterInfo.h"
18#include "llvm/CodeGen/TargetRegisterInfo.h"
19
20#define GET_TARGET_REGBANK_IMPL
21#include "X86GenRegisterBank.inc"
22
23using namespace llvm;
24// This file will be TableGen'ed at some point.
25#define GET_TARGET_REGBANK_INFO_IMPL
26#include "X86GenRegisterBankInfo.def"
27
28X86RegisterBankInfo::X86RegisterBankInfo(const TargetRegisterInfo &TRI)
29    : X86GenRegisterBankInfo() {
30
31  // validate RegBank initialization.
32  const RegisterBank &RBGPR = getRegBank(X86::GPRRegBankID);
33  (void)RBGPR;
34  assert(&X86::GPRRegBank == &RBGPR && "Incorrect RegBanks inizalization.");
35
36  // The GPR register bank is fully defined by all the registers in
37  // GR64 + its subclasses.
38  assert(RBGPR.covers(*TRI.getRegClass(X86::GR64RegClassID)) &&
39         "Subclass not added?");
40  assert(RBGPR.getSize() == 64 && "GPRs should hold up to 64-bit");
41}
42
43const RegisterBank &
44X86RegisterBankInfo::getRegBankFromRegClass(const TargetRegisterClass &RC,
45                                            LLT) const {
46
47  if (X86::GR8RegClass.hasSubClassEq(&RC) ||
48      X86::GR16RegClass.hasSubClassEq(&RC) ||
49      X86::GR32RegClass.hasSubClassEq(&RC) ||
50      X86::GR64RegClass.hasSubClassEq(&RC) ||
51      X86::LOW32_ADDR_ACCESSRegClass.hasSubClassEq(&RC) ||
52      X86::LOW32_ADDR_ACCESS_RBPRegClass.hasSubClassEq(&RC))
53    return getRegBank(X86::GPRRegBankID);
54
55  if (X86::FR32XRegClass.hasSubClassEq(&RC) ||
56      X86::FR64XRegClass.hasSubClassEq(&RC) ||
57      X86::VR128XRegClass.hasSubClassEq(&RC) ||
58      X86::VR256XRegClass.hasSubClassEq(&RC) ||
59      X86::VR512RegClass.hasSubClassEq(&RC))
60    return getRegBank(X86::VECRRegBankID);
61
62  llvm_unreachable("Unsupported register kind yet.");
63}
64
65X86GenRegisterBankInfo::PartialMappingIdx
66X86GenRegisterBankInfo::getPartialMappingIdx(const LLT &Ty, bool isFP) {
67  if ((Ty.isScalar() && !isFP) || Ty.isPointer()) {
68    switch (Ty.getSizeInBits()) {
69    case 1:
70    case 8:
71      return PMI_GPR8;
72    case 16:
73      return PMI_GPR16;
74    case 32:
75      return PMI_GPR32;
76    case 64:
77      return PMI_GPR64;
78    case 128:
79      return PMI_VEC128;
80      break;
81    default:
82      llvm_unreachable("Unsupported register size.");
83    }
84  } else if (Ty.isScalar()) {
85    switch (Ty.getSizeInBits()) {
86    case 32:
87      return PMI_FP32;
88    case 64:
89      return PMI_FP64;
90    case 128:
91      return PMI_VEC128;
92    default:
93      llvm_unreachable("Unsupported register size.");
94    }
95  } else {
96    switch (Ty.getSizeInBits()) {
97    case 128:
98      return PMI_VEC128;
99    case 256:
100      return PMI_VEC256;
101    case 512:
102      return PMI_VEC512;
103    default:
104      llvm_unreachable("Unsupported register size.");
105    }
106  }
107
108  return PMI_None;
109}
110
111void X86RegisterBankInfo::getInstrPartialMappingIdxs(
112    const MachineInstr &MI, const MachineRegisterInfo &MRI, const bool isFP,
113    SmallVectorImpl<PartialMappingIdx> &OpRegBankIdx) {
114
115  unsigned NumOperands = MI.getNumOperands();
116  for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
117    auto &MO = MI.getOperand(Idx);
118    if (!MO.isReg())
119      OpRegBankIdx[Idx] = PMI_None;
120    else
121      OpRegBankIdx[Idx] = getPartialMappingIdx(MRI.getType(MO.getReg()), isFP);
122  }
123}
124
125bool X86RegisterBankInfo::getInstrValueMapping(
126    const MachineInstr &MI,
127    const SmallVectorImpl<PartialMappingIdx> &OpRegBankIdx,
128    SmallVectorImpl<const ValueMapping *> &OpdsMapping) {
129
130  unsigned NumOperands = MI.getNumOperands();
131  for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
132    if (!MI.getOperand(Idx).isReg())
133      continue;
134
135    auto Mapping = getValueMapping(OpRegBankIdx[Idx], 1);
136    if (!Mapping->isValid())
137      return false;
138
139    OpdsMapping[Idx] = Mapping;
140  }
141  return true;
142}
143
144const RegisterBankInfo::InstructionMapping &
145X86RegisterBankInfo::getSameOperandsMapping(const MachineInstr &MI,
146                                            bool isFP) const {
147  const MachineFunction &MF = *MI.getParent()->getParent();
148  const MachineRegisterInfo &MRI = MF.getRegInfo();
149
150  unsigned NumOperands = MI.getNumOperands();
151  LLT Ty = MRI.getType(MI.getOperand(0).getReg());
152
153  if (NumOperands != 3 || (Ty != MRI.getType(MI.getOperand(1).getReg())) ||
154      (Ty != MRI.getType(MI.getOperand(2).getReg())))
155    llvm_unreachable("Unsupported operand mapping yet.");
156
157  auto Mapping = getValueMapping(getPartialMappingIdx(Ty, isFP), 3);
158  return getInstructionMapping(DefaultMappingID, 1, Mapping, NumOperands);
159}
160
161const RegisterBankInfo::InstructionMapping &
162X86RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
163  const MachineFunction &MF = *MI.getParent()->getParent();
164  const MachineRegisterInfo &MRI = MF.getRegInfo();
165  unsigned Opc = MI.getOpcode();
166
167  // Try the default logic for non-generic instructions that are either copies
168  // or already have some operands assigned to banks.
169  if (!isPreISelGenericOpcode(Opc) || Opc == TargetOpcode::G_PHI) {
170    const InstructionMapping &Mapping = getInstrMappingImpl(MI);
171    if (Mapping.isValid())
172      return Mapping;
173  }
174
175  switch (Opc) {
176  case TargetOpcode::G_ADD:
177  case TargetOpcode::G_SUB:
178  case TargetOpcode::G_MUL:
179    return getSameOperandsMapping(MI, false);
180  case TargetOpcode::G_FADD:
181  case TargetOpcode::G_FSUB:
182  case TargetOpcode::G_FMUL:
183  case TargetOpcode::G_FDIV:
184    return getSameOperandsMapping(MI, true);
185  case TargetOpcode::G_SHL:
186  case TargetOpcode::G_LSHR:
187  case TargetOpcode::G_ASHR: {
188    unsigned NumOperands = MI.getNumOperands();
189    LLT Ty = MRI.getType(MI.getOperand(0).getReg());
190
191    auto Mapping = getValueMapping(getPartialMappingIdx(Ty, false), 3);
192    return getInstructionMapping(DefaultMappingID, 1, Mapping, NumOperands);
193
194  }
195  default:
196    break;
197  }
198
199  unsigned NumOperands = MI.getNumOperands();
200  SmallVector<PartialMappingIdx, 4> OpRegBankIdx(NumOperands);
201
202  switch (Opc) {
203  case TargetOpcode::G_FPEXT:
204  case TargetOpcode::G_FPTRUNC:
205  case TargetOpcode::G_FCONSTANT:
206    // Instruction having only floating-point operands (all scalars in VECRReg)
207    getInstrPartialMappingIdxs(MI, MRI, /* isFP */ true, OpRegBankIdx);
208    break;
209  case TargetOpcode::G_SITOFP:
210  case TargetOpcode::G_FPTOSI: {
211    // Some of the floating-point instructions have mixed GPR and FP operands:
212    // fine-tune the computed mapping.
213    auto &Op0 = MI.getOperand(0);
214    auto &Op1 = MI.getOperand(1);
215    const LLT Ty0 = MRI.getType(Op0.getReg());
216    const LLT Ty1 = MRI.getType(Op1.getReg());
217
218    bool FirstArgIsFP = Opc == TargetOpcode::G_SITOFP;
219    bool SecondArgIsFP = Opc == TargetOpcode::G_FPTOSI;
220    OpRegBankIdx[0] = getPartialMappingIdx(Ty0, /* isFP */ FirstArgIsFP);
221    OpRegBankIdx[1] = getPartialMappingIdx(Ty1, /* isFP */ SecondArgIsFP);
222    break;
223  }
224  case TargetOpcode::G_FCMP: {
225    LLT Ty1 = MRI.getType(MI.getOperand(2).getReg());
226    LLT Ty2 = MRI.getType(MI.getOperand(3).getReg());
227    (void)Ty2;
228    assert(Ty1.getSizeInBits() == Ty2.getSizeInBits() &&
229           "Mismatched operand sizes for G_FCMP");
230
231    unsigned Size = Ty1.getSizeInBits();
232    (void)Size;
233    assert((Size == 32 || Size == 64) && "Unsupported size for G_FCMP");
234
235    auto FpRegBank = getPartialMappingIdx(Ty1, /* isFP */ true);
236    OpRegBankIdx = {PMI_GPR8,
237                    /* Predicate */ PMI_None, FpRegBank, FpRegBank};
238    break;
239  }
240  case TargetOpcode::G_TRUNC:
241  case TargetOpcode::G_ANYEXT: {
242    auto &Op0 = MI.getOperand(0);
243    auto &Op1 = MI.getOperand(1);
244    const LLT Ty0 = MRI.getType(Op0.getReg());
245    const LLT Ty1 = MRI.getType(Op1.getReg());
246
247    bool isFPTrunc = (Ty0.getSizeInBits() == 32 || Ty0.getSizeInBits() == 64) &&
248                     Ty1.getSizeInBits() == 128 && Opc == TargetOpcode::G_TRUNC;
249    bool isFPAnyExt =
250        Ty0.getSizeInBits() == 128 &&
251        (Ty1.getSizeInBits() == 32 || Ty1.getSizeInBits() == 64) &&
252        Opc == TargetOpcode::G_ANYEXT;
253
254    getInstrPartialMappingIdxs(MI, MRI, /* isFP */ isFPTrunc || isFPAnyExt,
255                               OpRegBankIdx);
256  } break;
257  default:
258    // Track the bank of each register, use NotFP mapping (all scalars in GPRs)
259    getInstrPartialMappingIdxs(MI, MRI, /* isFP */ false, OpRegBankIdx);
260    break;
261  }
262
263  // Finally construct the computed mapping.
264  SmallVector<const ValueMapping *, 8> OpdsMapping(NumOperands);
265  if (!getInstrValueMapping(MI, OpRegBankIdx, OpdsMapping))
266    return getInvalidInstructionMapping();
267
268  return getInstructionMapping(DefaultMappingID, /* Cost */ 1,
269                               getOperandsMapping(OpdsMapping), NumOperands);
270}
271
272void X86RegisterBankInfo::applyMappingImpl(
273    const OperandsMapper &OpdMapper) const {
274  return applyDefaultMapping(OpdMapper);
275}
276
277RegisterBankInfo::InstructionMappings
278X86RegisterBankInfo::getInstrAlternativeMappings(const MachineInstr &MI) const {
279
280  const MachineFunction &MF = *MI.getParent()->getParent();
281  const TargetSubtargetInfo &STI = MF.getSubtarget();
282  const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
283  const MachineRegisterInfo &MRI = MF.getRegInfo();
284
285  switch (MI.getOpcode()) {
286  case TargetOpcode::G_LOAD:
287  case TargetOpcode::G_STORE:
288  case TargetOpcode::G_IMPLICIT_DEF: {
289    // we going to try to map 32/64 bit to PMI_FP32/PMI_FP64
290    unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, TRI);
291    if (Size != 32 && Size != 64)
292      break;
293
294    unsigned NumOperands = MI.getNumOperands();
295
296    // Track the bank of each register, use FP mapping (all scalars in VEC)
297    SmallVector<PartialMappingIdx, 4> OpRegBankIdx(NumOperands);
298    getInstrPartialMappingIdxs(MI, MRI, /* isFP */ true, OpRegBankIdx);
299
300    // Finally construct the computed mapping.
301    SmallVector<const ValueMapping *, 8> OpdsMapping(NumOperands);
302    if (!getInstrValueMapping(MI, OpRegBankIdx, OpdsMapping))
303      break;
304
305    const RegisterBankInfo::InstructionMapping &Mapping = getInstructionMapping(
306        /*ID*/ 1, /*Cost*/ 1, getOperandsMapping(OpdsMapping), NumOperands);
307    InstructionMappings AltMappings;
308    AltMappings.push_back(&Mapping);
309    return AltMappings;
310  }
311  default:
312    break;
313  }
314  return RegisterBankInfo::getInstrAlternativeMappings(MI);
315}
316