1//===- AArch64RegisterInfo.cpp - AArch64 Register Information -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the AArch64 implementation of the TargetRegisterInfo
10// class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64RegisterInfo.h"
15#include "AArch64FrameLowering.h"
16#include "AArch64InstrInfo.h"
17#include "AArch64MachineFunctionInfo.h"
18#include "AArch64StackOffset.h"
19#include "AArch64Subtarget.h"
20#include "MCTargetDesc/AArch64AddressingModes.h"
21#include "llvm/ADT/BitVector.h"
22#include "llvm/ADT/Triple.h"
23#include "llvm/CodeGen/MachineFrameInfo.h"
24#include "llvm/CodeGen/MachineInstrBuilder.h"
25#include "llvm/CodeGen/MachineRegisterInfo.h"
26#include "llvm/CodeGen/RegisterScavenging.h"
27#include "llvm/CodeGen/TargetFrameLowering.h"
28#include "llvm/IR/DiagnosticInfo.h"
29#include "llvm/IR/Function.h"
30#include "llvm/Support/raw_ostream.h"
31#include "llvm/Target/TargetOptions.h"
32
33using namespace llvm;
34
35#define GET_REGINFO_TARGET_DESC
36#include "AArch64GenRegisterInfo.inc"
37
38AArch64RegisterInfo::AArch64RegisterInfo(const Triple &TT)
39    : AArch64GenRegisterInfo(AArch64::LR), TT(TT) {
40  AArch64_MC::initLLVMToCVRegMapping(this);
41}
42
43/// Return whether the register needs a CFI entry. Not all unwinders may know
44/// about SVE registers, so we assume the lowest common denominator, i.e. the
45/// callee-saves required by the base ABI. For the SVE registers z8-z15 only the
46/// lower 64-bits (d8-d15) need to be saved. The lower 64-bits subreg is
47/// returned in \p RegToUseForCFI.
48bool AArch64RegisterInfo::regNeedsCFI(unsigned Reg,
49                                      unsigned &RegToUseForCFI) const {
50  if (AArch64::PPRRegClass.contains(Reg))
51    return false;
52
53  if (AArch64::ZPRRegClass.contains(Reg)) {
54    RegToUseForCFI = getSubReg(Reg, AArch64::dsub);
55    for (int I = 0; CSR_AArch64_AAPCS_SaveList[I]; ++I) {
56      if (CSR_AArch64_AAPCS_SaveList[I] == RegToUseForCFI)
57        return true;
58    }
59    return false;
60  }
61
62  RegToUseForCFI = Reg;
63  return true;
64}
65
66bool AArch64RegisterInfo::hasSVEArgsOrReturn(const MachineFunction *MF) {
67  const Function &F = MF->getFunction();
68  return isa<ScalableVectorType>(F.getReturnType()) ||
69         any_of(F.args(), [](const Argument &Arg) {
70           return isa<ScalableVectorType>(Arg.getType());
71         });
72}
73
74const MCPhysReg *
75AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
76  assert(MF && "Invalid MachineFunction pointer.");
77
78  if (MF->getFunction().getCallingConv() == CallingConv::GHC)
79    // GHC set of callee saved regs is empty as all those regs are
80    // used for passing STG regs around
81    return CSR_AArch64_NoRegs_SaveList;
82  if (MF->getFunction().getCallingConv() == CallingConv::AnyReg)
83    return CSR_AArch64_AllRegs_SaveList;
84
85  // Darwin has its own CSR_AArch64_AAPCS_SaveList, which means most CSR save
86  // lists depending on that will need to have their Darwin variant as well.
87  if (MF->getSubtarget<AArch64Subtarget>().isTargetDarwin())
88    return getDarwinCalleeSavedRegs(MF);
89
90  if (MF->getFunction().getCallingConv() == CallingConv::CFGuard_Check)
91    return CSR_Win_AArch64_CFGuard_Check_SaveList;
92  if (MF->getSubtarget<AArch64Subtarget>().isTargetWindows())
93    return CSR_Win_AArch64_AAPCS_SaveList;
94  if (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall)
95    return CSR_AArch64_AAVPCS_SaveList;
96  if (MF->getFunction().getCallingConv() == CallingConv::AArch64_SVE_VectorCall)
97    return CSR_AArch64_SVE_AAPCS_SaveList;
98  if (MF->getSubtarget<AArch64Subtarget>().getTargetLowering()
99          ->supportSwiftError() &&
100      MF->getFunction().getAttributes().hasAttrSomewhere(
101          Attribute::SwiftError))
102    return CSR_AArch64_AAPCS_SwiftError_SaveList;
103  if (MF->getFunction().getCallingConv() == CallingConv::PreserveMost)
104    return CSR_AArch64_RT_MostRegs_SaveList;
105  if (MF->getFunction().getCallingConv() == CallingConv::Win64)
106    // This is for OSes other than Windows; Windows is a separate case further
107    // above.
108    return CSR_AArch64_AAPCS_X18_SaveList;
109  if (hasSVEArgsOrReturn(MF))
110    return CSR_AArch64_SVE_AAPCS_SaveList;
111  return CSR_AArch64_AAPCS_SaveList;
112}
113
114const MCPhysReg *
115AArch64RegisterInfo::getDarwinCalleeSavedRegs(const MachineFunction *MF) const {
116  assert(MF && "Invalid MachineFunction pointer.");
117  assert(MF->getSubtarget<AArch64Subtarget>().isTargetDarwin() &&
118         "Invalid subtarget for getDarwinCalleeSavedRegs");
119
120  if (MF->getFunction().getCallingConv() == CallingConv::CFGuard_Check)
121    report_fatal_error(
122        "Calling convention CFGuard_Check is unsupported on Darwin.");
123  if (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall)
124    return CSR_Darwin_AArch64_AAVPCS_SaveList;
125  if (MF->getFunction().getCallingConv() == CallingConv::AArch64_SVE_VectorCall)
126    report_fatal_error(
127        "Calling convention SVE_VectorCall is unsupported on Darwin.");
128  if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS)
129    return MF->getInfo<AArch64FunctionInfo>()->isSplitCSR()
130               ? CSR_Darwin_AArch64_CXX_TLS_PE_SaveList
131               : CSR_Darwin_AArch64_CXX_TLS_SaveList;
132  if (MF->getSubtarget<AArch64Subtarget>().getTargetLowering()
133          ->supportSwiftError() &&
134      MF->getFunction().getAttributes().hasAttrSomewhere(
135          Attribute::SwiftError))
136    return CSR_Darwin_AArch64_AAPCS_SwiftError_SaveList;
137  if (MF->getFunction().getCallingConv() == CallingConv::PreserveMost)
138    return CSR_Darwin_AArch64_RT_MostRegs_SaveList;
139  return CSR_Darwin_AArch64_AAPCS_SaveList;
140}
141
142const MCPhysReg *AArch64RegisterInfo::getCalleeSavedRegsViaCopy(
143    const MachineFunction *MF) const {
144  assert(MF && "Invalid MachineFunction pointer.");
145  if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
146      MF->getInfo<AArch64FunctionInfo>()->isSplitCSR())
147    return CSR_Darwin_AArch64_CXX_TLS_ViaCopy_SaveList;
148  return nullptr;
149}
150
151void AArch64RegisterInfo::UpdateCustomCalleeSavedRegs(
152    MachineFunction &MF) const {
153  const MCPhysReg *CSRs = getCalleeSavedRegs(&MF);
154  SmallVector<MCPhysReg, 32> UpdatedCSRs;
155  for (const MCPhysReg *I = CSRs; *I; ++I)
156    UpdatedCSRs.push_back(*I);
157
158  for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
159    if (MF.getSubtarget<AArch64Subtarget>().isXRegCustomCalleeSaved(i)) {
160      UpdatedCSRs.push_back(AArch64::GPR64commonRegClass.getRegister(i));
161    }
162  }
163  // Register lists are zero-terminated.
164  UpdatedCSRs.push_back(0);
165  MF.getRegInfo().setCalleeSavedRegs(UpdatedCSRs);
166}
167
168const TargetRegisterClass *
169AArch64RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC,
170                                       unsigned Idx) const {
171  // edge case for GPR/FPR register classes
172  if (RC == &AArch64::GPR32allRegClass && Idx == AArch64::hsub)
173    return &AArch64::FPR32RegClass;
174  else if (RC == &AArch64::GPR64allRegClass && Idx == AArch64::hsub)
175    return &AArch64::FPR64RegClass;
176
177  // Forward to TableGen's default version.
178  return AArch64GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
179}
180
181const uint32_t *
182AArch64RegisterInfo::getDarwinCallPreservedMask(const MachineFunction &MF,
183                                                CallingConv::ID CC) const {
184  assert(MF.getSubtarget<AArch64Subtarget>().isTargetDarwin() &&
185         "Invalid subtarget for getDarwinCallPreservedMask");
186
187  if (CC == CallingConv::CXX_FAST_TLS)
188    return CSR_Darwin_AArch64_CXX_TLS_RegMask;
189  if (CC == CallingConv::AArch64_VectorCall)
190    return CSR_Darwin_AArch64_AAVPCS_RegMask;
191  if (CC == CallingConv::AArch64_SVE_VectorCall)
192    report_fatal_error(
193        "Calling convention SVE_VectorCall is unsupported on Darwin.");
194  if (CC == CallingConv::CFGuard_Check)
195    report_fatal_error(
196        "Calling convention CFGuard_Check is unsupported on Darwin.");
197  if (MF.getSubtarget<AArch64Subtarget>()
198          .getTargetLowering()
199          ->supportSwiftError() &&
200      MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
201    return CSR_Darwin_AArch64_AAPCS_SwiftError_RegMask;
202  if (CC == CallingConv::PreserveMost)
203    return CSR_Darwin_AArch64_RT_MostRegs_RegMask;
204  return CSR_Darwin_AArch64_AAPCS_RegMask;
205}
206
207const uint32_t *
208AArch64RegisterInfo::getCallPreservedMask(const MachineFunction &MF,
209                                          CallingConv::ID CC) const {
210  bool SCS = MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack);
211  if (CC == CallingConv::GHC)
212    // This is academic because all GHC calls are (supposed to be) tail calls
213    return SCS ? CSR_AArch64_NoRegs_SCS_RegMask : CSR_AArch64_NoRegs_RegMask;
214  if (CC == CallingConv::AnyReg)
215    return SCS ? CSR_AArch64_AllRegs_SCS_RegMask : CSR_AArch64_AllRegs_RegMask;
216
217  // All the following calling conventions are handled differently on Darwin.
218  if (MF.getSubtarget<AArch64Subtarget>().isTargetDarwin()) {
219    if (SCS)
220      report_fatal_error("ShadowCallStack attribute not supported on Darwin.");
221    return getDarwinCallPreservedMask(MF, CC);
222  }
223
224  if (CC == CallingConv::AArch64_VectorCall)
225    return SCS ? CSR_AArch64_AAVPCS_SCS_RegMask : CSR_AArch64_AAVPCS_RegMask;
226  if (CC == CallingConv::AArch64_SVE_VectorCall)
227    return SCS ? CSR_AArch64_SVE_AAPCS_SCS_RegMask
228               : CSR_AArch64_SVE_AAPCS_RegMask;
229  if (CC == CallingConv::CFGuard_Check)
230    return CSR_Win_AArch64_CFGuard_Check_RegMask;
231  if (MF.getSubtarget<AArch64Subtarget>().getTargetLowering()
232          ->supportSwiftError() &&
233      MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
234    return SCS ? CSR_AArch64_AAPCS_SwiftError_SCS_RegMask
235               : CSR_AArch64_AAPCS_SwiftError_RegMask;
236  if (CC == CallingConv::PreserveMost)
237    return SCS ? CSR_AArch64_RT_MostRegs_SCS_RegMask
238               : CSR_AArch64_RT_MostRegs_RegMask;
239  else
240    return SCS ? CSR_AArch64_AAPCS_SCS_RegMask : CSR_AArch64_AAPCS_RegMask;
241}
242
243const uint32_t *AArch64RegisterInfo::getTLSCallPreservedMask() const {
244  if (TT.isOSDarwin())
245    return CSR_Darwin_AArch64_TLS_RegMask;
246
247  assert(TT.isOSBinFormatELF() && "Invalid target");
248  return CSR_AArch64_TLS_ELF_RegMask;
249}
250
251void AArch64RegisterInfo::UpdateCustomCallPreservedMask(MachineFunction &MF,
252                                                 const uint32_t **Mask) const {
253  uint32_t *UpdatedMask = MF.allocateRegMask();
254  unsigned RegMaskSize = MachineOperand::getRegMaskSize(getNumRegs());
255  memcpy(UpdatedMask, *Mask, sizeof(UpdatedMask[0]) * RegMaskSize);
256
257  for (size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
258    if (MF.getSubtarget<AArch64Subtarget>().isXRegCustomCalleeSaved(i)) {
259      for (MCSubRegIterator SubReg(AArch64::GPR64commonRegClass.getRegister(i),
260                                   this, true);
261           SubReg.isValid(); ++SubReg) {
262        // See TargetRegisterInfo::getCallPreservedMask for how to interpret the
263        // register mask.
264        UpdatedMask[*SubReg / 32] |= 1u << (*SubReg % 32);
265      }
266    }
267  }
268  *Mask = UpdatedMask;
269}
270
271const uint32_t *AArch64RegisterInfo::getNoPreservedMask() const {
272  return CSR_AArch64_NoRegs_RegMask;
273}
274
275const uint32_t *
276AArch64RegisterInfo::getThisReturnPreservedMask(const MachineFunction &MF,
277                                                CallingConv::ID CC) const {
278  // This should return a register mask that is the same as that returned by
279  // getCallPreservedMask but that additionally preserves the register used for
280  // the first i64 argument (which must also be the register used to return a
281  // single i64 return value)
282  //
283  // In case that the calling convention does not use the same register for
284  // both, the function should return NULL (does not currently apply)
285  assert(CC != CallingConv::GHC && "should not be GHC calling convention.");
286  if (MF.getSubtarget<AArch64Subtarget>().isTargetDarwin())
287    return CSR_Darwin_AArch64_AAPCS_ThisReturn_RegMask;
288  return CSR_AArch64_AAPCS_ThisReturn_RegMask;
289}
290
291const uint32_t *AArch64RegisterInfo::getWindowsStackProbePreservedMask() const {
292  return CSR_AArch64_StackProbe_Windows_RegMask;
293}
294
295BitVector
296AArch64RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
297  const AArch64FrameLowering *TFI = getFrameLowering(MF);
298
299  // FIXME: avoid re-calculating this every time.
300  BitVector Reserved(getNumRegs());
301  markSuperRegs(Reserved, AArch64::WSP);
302  markSuperRegs(Reserved, AArch64::WZR);
303
304  if (TFI->hasFP(MF) || TT.isOSDarwin())
305    markSuperRegs(Reserved, AArch64::W29);
306
307  for (size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
308    if (MF.getSubtarget<AArch64Subtarget>().isXRegisterReserved(i))
309      markSuperRegs(Reserved, AArch64::GPR32commonRegClass.getRegister(i));
310  }
311
312  if (hasBasePointer(MF))
313    markSuperRegs(Reserved, AArch64::W19);
314
315  // SLH uses register W16/X16 as the taint register.
316  if (MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening))
317    markSuperRegs(Reserved, AArch64::W16);
318
319  assert(checkAllSuperRegsMarked(Reserved));
320  return Reserved;
321}
322
323bool AArch64RegisterInfo::isReservedReg(const MachineFunction &MF,
324                                        MCRegister Reg) const {
325  return getReservedRegs(MF)[Reg];
326}
327
328bool AArch64RegisterInfo::isAnyArgRegReserved(const MachineFunction &MF) const {
329  return std::any_of(std::begin(*AArch64::GPR64argRegClass.MC),
330                     std::end(*AArch64::GPR64argRegClass.MC),
331                     [this, &MF](MCPhysReg r){return isReservedReg(MF, r);});
332}
333
334void AArch64RegisterInfo::emitReservedArgRegCallError(
335    const MachineFunction &MF) const {
336  const Function &F = MF.getFunction();
337  F.getContext().diagnose(DiagnosticInfoUnsupported{F, "AArch64 doesn't support"
338    " function calls if any of the argument registers is reserved."});
339}
340
341bool AArch64RegisterInfo::isAsmClobberable(const MachineFunction &MF,
342                                          MCRegister PhysReg) const {
343  return !isReservedReg(MF, PhysReg);
344}
345
346bool AArch64RegisterInfo::isConstantPhysReg(MCRegister PhysReg) const {
347  return PhysReg == AArch64::WZR || PhysReg == AArch64::XZR;
348}
349
350const TargetRegisterClass *
351AArch64RegisterInfo::getPointerRegClass(const MachineFunction &MF,
352                                      unsigned Kind) const {
353  return &AArch64::GPR64spRegClass;
354}
355
356const TargetRegisterClass *
357AArch64RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
358  if (RC == &AArch64::CCRRegClass)
359    return &AArch64::GPR64RegClass; // Only MSR & MRS copy NZCV.
360  return RC;
361}
362
363unsigned AArch64RegisterInfo::getBaseRegister() const { return AArch64::X19; }
364
365bool AArch64RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
366  const MachineFrameInfo &MFI = MF.getFrameInfo();
367
368  // In the presence of variable sized objects or funclets, if the fixed stack
369  // size is large enough that referencing from the FP won't result in things
370  // being in range relatively often, we can use a base pointer to allow access
371  // from the other direction like the SP normally works.
372  //
373  // Furthermore, if both variable sized objects are present, and the
374  // stack needs to be dynamically re-aligned, the base pointer is the only
375  // reliable way to reference the locals.
376  if (MFI.hasVarSizedObjects() || MF.hasEHFunclets()) {
377    if (needsStackRealignment(MF))
378      return true;
379
380    if (MF.getSubtarget<AArch64Subtarget>().hasSVE()) {
381      const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
382      // Frames that have variable sized objects and scalable SVE objects,
383      // should always use a basepointer.
384      if (!AFI->hasCalculatedStackSizeSVE() || AFI->getStackSizeSVE())
385        return true;
386    }
387
388    // Conservatively estimate whether the negative offset from the frame
389    // pointer will be sufficient to reach. If a function has a smallish
390    // frame, it's less likely to have lots of spills and callee saved
391    // space, so it's all more likely to be within range of the frame pointer.
392    // If it's wrong, we'll materialize the constant and still get to the
393    // object; it's just suboptimal. Negative offsets use the unscaled
394    // load/store instructions, which have a 9-bit signed immediate.
395    return MFI.getLocalFrameSize() >= 256;
396  }
397
398  return false;
399}
400
401Register
402AArch64RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
403  const AArch64FrameLowering *TFI = getFrameLowering(MF);
404  return TFI->hasFP(MF) ? AArch64::FP : AArch64::SP;
405}
406
407bool AArch64RegisterInfo::requiresRegisterScavenging(
408    const MachineFunction &MF) const {
409  return true;
410}
411
412bool AArch64RegisterInfo::requiresVirtualBaseRegisters(
413    const MachineFunction &MF) const {
414  return true;
415}
416
417bool
418AArch64RegisterInfo::useFPForScavengingIndex(const MachineFunction &MF) const {
419  // This function indicates whether the emergency spillslot should be placed
420  // close to the beginning of the stackframe (closer to FP) or the end
421  // (closer to SP).
422  //
423  // The beginning works most reliably if we have a frame pointer.
424  // In the presence of any non-constant space between FP and locals,
425  // (e.g. in case of stack realignment or a scalable SVE area), it is
426  // better to use SP or BP.
427  const AArch64FrameLowering &TFI = *getFrameLowering(MF);
428  const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
429  assert((!MF.getSubtarget<AArch64Subtarget>().hasSVE() ||
430          AFI->hasCalculatedStackSizeSVE()) &&
431         "Expected SVE area to be calculated by this point");
432  return TFI.hasFP(MF) && !needsStackRealignment(MF) && !AFI->getStackSizeSVE();
433}
434
435bool AArch64RegisterInfo::requiresFrameIndexScavenging(
436    const MachineFunction &MF) const {
437  return true;
438}
439
440bool
441AArch64RegisterInfo::cannotEliminateFrame(const MachineFunction &MF) const {
442  const MachineFrameInfo &MFI = MF.getFrameInfo();
443  if (MF.getTarget().Options.DisableFramePointerElim(MF) && MFI.adjustsStack())
444    return true;
445  return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken();
446}
447
448/// needsFrameBaseReg - Returns true if the instruction's frame index
449/// reference would be better served by a base register other than FP
450/// or SP. Used by LocalStackFrameAllocation to determine which frame index
451/// references it should create new base registers for.
452bool AArch64RegisterInfo::needsFrameBaseReg(MachineInstr *MI,
453                                            int64_t Offset) const {
454  for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i)
455    assert(i < MI->getNumOperands() &&
456           "Instr doesn't have FrameIndex operand!");
457
458  // It's the load/store FI references that cause issues, as it can be difficult
459  // to materialize the offset if it won't fit in the literal field. Estimate
460  // based on the size of the local frame and some conservative assumptions
461  // about the rest of the stack frame (note, this is pre-regalloc, so
462  // we don't know everything for certain yet) whether this offset is likely
463  // to be out of range of the immediate. Return true if so.
464
465  // We only generate virtual base registers for loads and stores, so
466  // return false for everything else.
467  if (!MI->mayLoad() && !MI->mayStore())
468    return false;
469
470  // Without a virtual base register, if the function has variable sized
471  // objects, all fixed-size local references will be via the frame pointer,
472  // Approximate the offset and see if it's legal for the instruction.
473  // Note that the incoming offset is based on the SP value at function entry,
474  // so it'll be negative.
475  MachineFunction &MF = *MI->getParent()->getParent();
476  const AArch64FrameLowering *TFI = getFrameLowering(MF);
477  MachineFrameInfo &MFI = MF.getFrameInfo();
478
479  // Estimate an offset from the frame pointer.
480  // Conservatively assume all GPR callee-saved registers get pushed.
481  // FP, LR, X19-X28, D8-D15. 64-bits each.
482  int64_t FPOffset = Offset - 16 * 20;
483  // Estimate an offset from the stack pointer.
484  // The incoming offset is relating to the SP at the start of the function,
485  // but when we access the local it'll be relative to the SP after local
486  // allocation, so adjust our SP-relative offset by that allocation size.
487  Offset += MFI.getLocalFrameSize();
488  // Assume that we'll have at least some spill slots allocated.
489  // FIXME: This is a total SWAG number. We should run some statistics
490  //        and pick a real one.
491  Offset += 128; // 128 bytes of spill slots
492
493  // If there is a frame pointer, try using it.
494  // The FP is only available if there is no dynamic realignment. We
495  // don't know for sure yet whether we'll need that, so we guess based
496  // on whether there are any local variables that would trigger it.
497  if (TFI->hasFP(MF) && isFrameOffsetLegal(MI, AArch64::FP, FPOffset))
498    return false;
499
500  // If we can reference via the stack pointer or base pointer, try that.
501  // FIXME: This (and the code that resolves the references) can be improved
502  //        to only disallow SP relative references in the live range of
503  //        the VLA(s). In practice, it's unclear how much difference that
504  //        would make, but it may be worth doing.
505  if (isFrameOffsetLegal(MI, AArch64::SP, Offset))
506    return false;
507
508  // If even offset 0 is illegal, we don't want a virtual base register.
509  if (!isFrameOffsetLegal(MI, AArch64::SP, 0))
510    return false;
511
512  // The offset likely isn't legal; we want to allocate a virtual base register.
513  return true;
514}
515
516bool AArch64RegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
517                                             Register BaseReg,
518                                             int64_t Offset) const {
519  assert(MI && "Unable to get the legal offset for nil instruction.");
520  StackOffset SaveOffset(Offset, MVT::i8);
521  return isAArch64FrameOffsetLegal(*MI, SaveOffset) & AArch64FrameOffsetIsLegal;
522}
523
524/// Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx
525/// at the beginning of the basic block.
526void AArch64RegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
527                                                       Register BaseReg,
528                                                       int FrameIdx,
529                                                       int64_t Offset) const {
530  MachineBasicBlock::iterator Ins = MBB->begin();
531  DebugLoc DL; // Defaults to "unknown"
532  if (Ins != MBB->end())
533    DL = Ins->getDebugLoc();
534  const MachineFunction &MF = *MBB->getParent();
535  const AArch64InstrInfo *TII =
536      MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
537  const MCInstrDesc &MCID = TII->get(AArch64::ADDXri);
538  MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
539  MRI.constrainRegClass(BaseReg, TII->getRegClass(MCID, 0, this, MF));
540  unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
541
542  BuildMI(*MBB, Ins, DL, MCID, BaseReg)
543      .addFrameIndex(FrameIdx)
544      .addImm(Offset)
545      .addImm(Shifter);
546}
547
548void AArch64RegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg,
549                                            int64_t Offset) const {
550  // ARM doesn't need the general 64-bit offsets
551  StackOffset Off(Offset, MVT::i8);
552
553  unsigned i = 0;
554
555  while (!MI.getOperand(i).isFI()) {
556    ++i;
557    assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
558  }
559  const MachineFunction *MF = MI.getParent()->getParent();
560  const AArch64InstrInfo *TII =
561      MF->getSubtarget<AArch64Subtarget>().getInstrInfo();
562  bool Done = rewriteAArch64FrameIndex(MI, i, BaseReg, Off, TII);
563  assert(Done && "Unable to resolve frame index!");
564  (void)Done;
565}
566
567// Create a scratch register for the frame index elimination in an instruction.
568// This function has special handling of stack tagging loop pseudos, in which
569// case it can also change the instruction opcode (but not the operands).
570static Register
571createScratchRegisterForInstruction(MachineInstr &MI,
572                                    const AArch64InstrInfo *TII) {
573  // ST*Gloop have a reserved scratch register in operand 1. Use it, and also
574  // replace the instruction with the writeback variant because it will now
575  // satisfy the operand constraints for it.
576  if (MI.getOpcode() == AArch64::STGloop) {
577    MI.setDesc(TII->get(AArch64::STGloop_wback));
578    return MI.getOperand(1).getReg();
579  } else if (MI.getOpcode() == AArch64::STZGloop) {
580    MI.setDesc(TII->get(AArch64::STZGloop_wback));
581    return MI.getOperand(1).getReg();
582  } else {
583    return MI.getMF()->getRegInfo().createVirtualRegister(
584        &AArch64::GPR64RegClass);
585  }
586}
587
588void AArch64RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
589                                              int SPAdj, unsigned FIOperandNum,
590                                              RegScavenger *RS) const {
591  assert(SPAdj == 0 && "Unexpected");
592
593  MachineInstr &MI = *II;
594  MachineBasicBlock &MBB = *MI.getParent();
595  MachineFunction &MF = *MBB.getParent();
596  const MachineFrameInfo &MFI = MF.getFrameInfo();
597  const AArch64InstrInfo *TII =
598      MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
599  const AArch64FrameLowering *TFI = getFrameLowering(MF);
600
601  int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
602  bool Tagged =
603      MI.getOperand(FIOperandNum).getTargetFlags() & AArch64II::MO_TAGGED;
604  Register FrameReg;
605
606  // Special handling of dbg_value, stackmap and patchpoint instructions.
607  if (MI.isDebugValue() || MI.getOpcode() == TargetOpcode::STACKMAP ||
608      MI.getOpcode() == TargetOpcode::PATCHPOINT) {
609    StackOffset Offset =
610        TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg,
611                                        /*PreferFP=*/true,
612                                        /*ForSimm=*/false);
613    Offset += StackOffset(MI.getOperand(FIOperandNum + 1).getImm(), MVT::i8);
614    MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false /*isDef*/);
615    MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset.getBytes());
616    return;
617  }
618
619  if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE) {
620    MachineOperand &FI = MI.getOperand(FIOperandNum);
621    int Offset = TFI->getNonLocalFrameIndexReference(MF, FrameIndex);
622    FI.ChangeToImmediate(Offset);
623    return;
624  }
625
626  StackOffset Offset;
627  if (MI.getOpcode() == AArch64::TAGPstack) {
628    // TAGPstack must use the virtual frame register in its 3rd operand.
629    const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
630    FrameReg = MI.getOperand(3).getReg();
631    Offset = {MFI.getObjectOffset(FrameIndex) +
632                  AFI->getTaggedBasePointerOffset(),
633              MVT::i8};
634  } else if (Tagged) {
635    StackOffset SPOffset = {
636        MFI.getObjectOffset(FrameIndex) + (int64_t)MFI.getStackSize(), MVT::i8};
637    if (MFI.hasVarSizedObjects() ||
638        isAArch64FrameOffsetLegal(MI, SPOffset, nullptr, nullptr, nullptr) !=
639            (AArch64FrameOffsetCanUpdate | AArch64FrameOffsetIsLegal)) {
640      // Can't update to SP + offset in place. Precalculate the tagged pointer
641      // in a scratch register.
642      Offset = TFI->resolveFrameIndexReference(
643          MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
644      Register ScratchReg =
645          MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
646      emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset,
647                      TII);
648      BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(AArch64::LDG), ScratchReg)
649          .addReg(ScratchReg)
650          .addReg(ScratchReg)
651          .addImm(0);
652      MI.getOperand(FIOperandNum)
653          .ChangeToRegister(ScratchReg, false, false, true);
654      return;
655    }
656    FrameReg = AArch64::SP;
657    Offset = {MFI.getObjectOffset(FrameIndex) + (int64_t)MFI.getStackSize(),
658              MVT::i8};
659  } else {
660    Offset = TFI->resolveFrameIndexReference(
661        MF, FrameIndex, FrameReg, /*PreferFP=*/false, /*ForSimm=*/true);
662  }
663
664  // Modify MI as necessary to handle as much of 'Offset' as possible
665  if (rewriteAArch64FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII))
666    return;
667
668  assert((!RS || !RS->isScavengingFrameIndex(FrameIndex)) &&
669         "Emergency spill slot is out of reach");
670
671  // If we get here, the immediate doesn't fit into the instruction.  We folded
672  // as much as possible above.  Handle the rest, providing a register that is
673  // SP+LargeImm.
674  Register ScratchReg = createScratchRegisterForInstruction(MI, TII);
675  emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset, TII);
676  MI.getOperand(FIOperandNum).ChangeToRegister(ScratchReg, false, false, true);
677}
678
679unsigned AArch64RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
680                                                  MachineFunction &MF) const {
681  const AArch64FrameLowering *TFI = getFrameLowering(MF);
682
683  switch (RC->getID()) {
684  default:
685    return 0;
686  case AArch64::GPR32RegClassID:
687  case AArch64::GPR32spRegClassID:
688  case AArch64::GPR32allRegClassID:
689  case AArch64::GPR64spRegClassID:
690  case AArch64::GPR64allRegClassID:
691  case AArch64::GPR64RegClassID:
692  case AArch64::GPR32commonRegClassID:
693  case AArch64::GPR64commonRegClassID:
694    return 32 - 1                                   // XZR/SP
695              - (TFI->hasFP(MF) || TT.isOSDarwin()) // FP
696              - MF.getSubtarget<AArch64Subtarget>().getNumXRegisterReserved()
697              - hasBasePointer(MF);  // X19
698  case AArch64::FPR8RegClassID:
699  case AArch64::FPR16RegClassID:
700  case AArch64::FPR32RegClassID:
701  case AArch64::FPR64RegClassID:
702  case AArch64::FPR128RegClassID:
703    return 32;
704
705  case AArch64::DDRegClassID:
706  case AArch64::DDDRegClassID:
707  case AArch64::DDDDRegClassID:
708  case AArch64::QQRegClassID:
709  case AArch64::QQQRegClassID:
710  case AArch64::QQQQRegClassID:
711    return 32;
712
713  case AArch64::FPR128_loRegClassID:
714  case AArch64::FPR64_loRegClassID:
715  case AArch64::FPR16_loRegClassID:
716    return 16;
717  }
718}
719
720unsigned AArch64RegisterInfo::getLocalAddressRegister(
721  const MachineFunction &MF) const {
722  const auto &MFI = MF.getFrameInfo();
723  if (!MF.hasEHFunclets() && !MFI.hasVarSizedObjects())
724    return AArch64::SP;
725  else if (needsStackRealignment(MF))
726    return getBaseRegister();
727  return getFrameRegister(MF);
728}
729