1//===-- SystemZRegisterInfo.cpp - SystemZ register information ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "SystemZRegisterInfo.h"
10#include "SystemZInstrInfo.h"
11#include "SystemZSubtarget.h"
12#include "llvm/ADT/SmallSet.h"
13#include "llvm/CodeGen/LiveIntervals.h"
14#include "llvm/CodeGen/MachineInstrBuilder.h"
15#include "llvm/CodeGen/MachineRegisterInfo.h"
16#include "llvm/CodeGen/TargetFrameLowering.h"
17#include "llvm/CodeGen/VirtRegMap.h"
18#include "llvm/IR/DebugInfoMetadata.h"
19
20using namespace llvm;
21
22#define GET_REGINFO_TARGET_DESC
23#include "SystemZGenRegisterInfo.inc"
24
25// Given that MO is a GRX32 operand, return either GR32 or GRH32 if MO
26// somehow belongs in it. Otherwise, return GRX32.
27static const TargetRegisterClass *getRC32(MachineOperand &MO,
28                                          const VirtRegMap *VRM,
29                                          const MachineRegisterInfo *MRI) {
30  const TargetRegisterClass *RC = MRI->getRegClass(MO.getReg());
31
32  if (SystemZ::GR32BitRegClass.hasSubClassEq(RC) ||
33      MO.getSubReg() == SystemZ::subreg_ll32 ||
34      MO.getSubReg() == SystemZ::subreg_l32)
35    return &SystemZ::GR32BitRegClass;
36  if (SystemZ::GRH32BitRegClass.hasSubClassEq(RC) ||
37      MO.getSubReg() == SystemZ::subreg_lh32 ||
38      MO.getSubReg() == SystemZ::subreg_h32)
39    return &SystemZ::GRH32BitRegClass;
40
41  if (VRM && VRM->hasPhys(MO.getReg())) {
42    Register PhysReg = VRM->getPhys(MO.getReg());
43    if (SystemZ::GR32BitRegClass.contains(PhysReg))
44      return &SystemZ::GR32BitRegClass;
45    assert (SystemZ::GRH32BitRegClass.contains(PhysReg) &&
46            "Phys reg not in GR32 or GRH32?");
47    return &SystemZ::GRH32BitRegClass;
48  }
49
50  assert (RC == &SystemZ::GRX32BitRegClass);
51  return RC;
52}
53
54// Pass the registers of RC as hints while making sure that if any of these
55// registers are copy hints (and therefore already in Hints), hint them
56// first.
57static void addHints(ArrayRef<MCPhysReg> Order,
58                     SmallVectorImpl<MCPhysReg> &Hints,
59                     const TargetRegisterClass *RC,
60                     const MachineRegisterInfo *MRI) {
61  SmallSet<unsigned, 4> CopyHints;
62  CopyHints.insert(Hints.begin(), Hints.end());
63  Hints.clear();
64  for (MCPhysReg Reg : Order)
65    if (CopyHints.count(Reg) &&
66        RC->contains(Reg) && !MRI->isReserved(Reg))
67      Hints.push_back(Reg);
68  for (MCPhysReg Reg : Order)
69    if (!CopyHints.count(Reg) &&
70        RC->contains(Reg) && !MRI->isReserved(Reg))
71      Hints.push_back(Reg);
72}
73
74bool SystemZRegisterInfo::getRegAllocationHints(
75    Register VirtReg, ArrayRef<MCPhysReg> Order,
76    SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF,
77    const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
78  const MachineRegisterInfo *MRI = &MF.getRegInfo();
79  const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>();
80  const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
81
82  bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
83      VirtReg, Order, Hints, MF, VRM, Matrix);
84
85  if (VRM != nullptr) {
86    // Add any two address hints after any copy hints.
87    SmallSet<unsigned, 4> TwoAddrHints;
88    for (auto &Use : MRI->reg_nodbg_instructions(VirtReg))
89      if (SystemZ::getTwoOperandOpcode(Use.getOpcode()) != -1) {
90        const MachineOperand *VRRegMO = nullptr;
91        const MachineOperand *OtherMO = nullptr;
92        const MachineOperand *CommuMO = nullptr;
93        if (VirtReg == Use.getOperand(0).getReg()) {
94          VRRegMO = &Use.getOperand(0);
95          OtherMO = &Use.getOperand(1);
96          if (Use.isCommutable())
97            CommuMO = &Use.getOperand(2);
98        } else if (VirtReg == Use.getOperand(1).getReg()) {
99          VRRegMO = &Use.getOperand(1);
100          OtherMO = &Use.getOperand(0);
101        } else if (VirtReg == Use.getOperand(2).getReg() &&
102                   Use.isCommutable()) {
103          VRRegMO = &Use.getOperand(2);
104          OtherMO = &Use.getOperand(0);
105        } else
106          continue;
107
108        auto tryAddHint = [&](const MachineOperand *MO) -> void {
109          Register Reg = MO->getReg();
110          Register PhysReg =
111              Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg));
112          if (PhysReg) {
113            if (MO->getSubReg())
114              PhysReg = getSubReg(PhysReg, MO->getSubReg());
115            if (VRRegMO->getSubReg())
116              PhysReg = getMatchingSuperReg(PhysReg, VRRegMO->getSubReg(),
117                                            MRI->getRegClass(VirtReg));
118            if (!MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg))
119              TwoAddrHints.insert(PhysReg);
120          }
121        };
122        tryAddHint(OtherMO);
123        if (CommuMO)
124          tryAddHint(CommuMO);
125      }
126    for (MCPhysReg OrderReg : Order)
127      if (TwoAddrHints.count(OrderReg))
128        Hints.push_back(OrderReg);
129  }
130
131  if (MRI->getRegClass(VirtReg) == &SystemZ::GRX32BitRegClass) {
132    SmallVector<Register, 8> Worklist;
133    SmallSet<Register, 4> DoneRegs;
134    Worklist.push_back(VirtReg);
135    while (Worklist.size()) {
136      Register Reg = Worklist.pop_back_val();
137      if (!DoneRegs.insert(Reg).second)
138        continue;
139
140      for (auto &Use : MRI->reg_instructions(Reg)) {
141        // For LOCRMux, see if the other operand is already a high or low
142        // register, and in that case give the corresponding hints for
143        // VirtReg. LOCR instructions need both operands in either high or
144        // low parts. Same handling for SELRMux.
145        if (Use.getOpcode() == SystemZ::LOCRMux ||
146            Use.getOpcode() == SystemZ::SELRMux) {
147          MachineOperand &TrueMO = Use.getOperand(1);
148          MachineOperand &FalseMO = Use.getOperand(2);
149          const TargetRegisterClass *RC =
150            TRI->getCommonSubClass(getRC32(FalseMO, VRM, MRI),
151                                   getRC32(TrueMO, VRM, MRI));
152          if (Use.getOpcode() == SystemZ::SELRMux)
153            RC = TRI->getCommonSubClass(RC,
154                                        getRC32(Use.getOperand(0), VRM, MRI));
155          if (RC && RC != &SystemZ::GRX32BitRegClass) {
156            addHints(Order, Hints, RC, MRI);
157            // Return true to make these hints the only regs available to
158            // RA. This may mean extra spilling but since the alternative is
159            // a jump sequence expansion of the LOCRMux, it is preferred.
160            return true;
161          }
162
163          // Add the other operand of the LOCRMux to the worklist.
164          Register OtherReg =
165              (TrueMO.getReg() == Reg ? FalseMO.getReg() : TrueMO.getReg());
166          if (MRI->getRegClass(OtherReg) == &SystemZ::GRX32BitRegClass)
167            Worklist.push_back(OtherReg);
168        } // end LOCRMux
169        else if (Use.getOpcode() == SystemZ::CHIMux ||
170                 Use.getOpcode() == SystemZ::CFIMux) {
171          if (Use.getOperand(1).getImm() == 0) {
172            bool OnlyLMuxes = true;
173            for (MachineInstr &DefMI : MRI->def_instructions(VirtReg))
174              if (DefMI.getOpcode() != SystemZ::LMux)
175                OnlyLMuxes = false;
176            if (OnlyLMuxes) {
177              addHints(Order, Hints, &SystemZ::GR32BitRegClass, MRI);
178              // Return false to make these hints preferred but not obligatory.
179              return false;
180            }
181          }
182        } // end CHIMux / CFIMux
183      }
184    }
185  }
186
187  return BaseImplRetVal;
188}
189
190const MCPhysReg *
191SystemZXPLINK64Registers::getCalleeSavedRegs(const MachineFunction *MF) const {
192  const SystemZSubtarget &Subtarget = MF->getSubtarget<SystemZSubtarget>();
193  return Subtarget.hasVector() ? CSR_SystemZ_XPLINK64_Vector_SaveList
194                               : CSR_SystemZ_XPLINK64_SaveList;
195}
196
197const MCPhysReg *
198SystemZELFRegisters::getCalleeSavedRegs(const MachineFunction *MF) const {
199  const SystemZSubtarget &Subtarget = MF->getSubtarget<SystemZSubtarget>();
200  if (MF->getFunction().getCallingConv() == CallingConv::GHC)
201    return CSR_SystemZ_NoRegs_SaveList;
202  if (MF->getFunction().getCallingConv() == CallingConv::AnyReg)
203    return Subtarget.hasVector()? CSR_SystemZ_AllRegs_Vector_SaveList
204                                : CSR_SystemZ_AllRegs_SaveList;
205  if (MF->getSubtarget().getTargetLowering()->supportSwiftError() &&
206      MF->getFunction().getAttributes().hasAttrSomewhere(
207          Attribute::SwiftError))
208    return CSR_SystemZ_SwiftError_SaveList;
209  return CSR_SystemZ_ELF_SaveList;
210}
211
212const uint32_t *
213SystemZXPLINK64Registers::getCallPreservedMask(const MachineFunction &MF,
214                                               CallingConv::ID CC) const {
215  const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>();
216  return Subtarget.hasVector() ? CSR_SystemZ_XPLINK64_Vector_RegMask
217                               : CSR_SystemZ_XPLINK64_RegMask;
218}
219
220const uint32_t *
221SystemZELFRegisters::getCallPreservedMask(const MachineFunction &MF,
222                                          CallingConv::ID CC) const {
223  const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>();
224  if (CC == CallingConv::GHC)
225    return CSR_SystemZ_NoRegs_RegMask;
226  if (CC == CallingConv::AnyReg)
227    return Subtarget.hasVector()? CSR_SystemZ_AllRegs_Vector_RegMask
228                                : CSR_SystemZ_AllRegs_RegMask;
229  if (MF.getSubtarget().getTargetLowering()->supportSwiftError() &&
230      MF.getFunction().getAttributes().hasAttrSomewhere(
231          Attribute::SwiftError))
232    return CSR_SystemZ_SwiftError_RegMask;
233  return CSR_SystemZ_ELF_RegMask;
234}
235
236SystemZRegisterInfo::SystemZRegisterInfo(unsigned int RA)
237    : SystemZGenRegisterInfo(RA) {}
238
239const MCPhysReg *
240SystemZRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
241
242  const SystemZSubtarget *Subtarget = &MF->getSubtarget<SystemZSubtarget>();
243  SystemZCallingConventionRegisters *Regs = Subtarget->getSpecialRegisters();
244
245  return Regs->getCalleeSavedRegs(MF);
246}
247
248const uint32_t *
249SystemZRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
250                                          CallingConv::ID CC) const {
251
252  const SystemZSubtarget *Subtarget = &MF.getSubtarget<SystemZSubtarget>();
253  SystemZCallingConventionRegisters *Regs = Subtarget->getSpecialRegisters();
254  return Regs->getCallPreservedMask(MF, CC);
255}
256
257BitVector
258SystemZRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
259  BitVector Reserved(getNumRegs());
260  const SystemZFrameLowering *TFI = getFrameLowering(MF);
261  const SystemZSubtarget *Subtarget = &MF.getSubtarget<SystemZSubtarget>();
262  SystemZCallingConventionRegisters *Regs = Subtarget->getSpecialRegisters();
263  if (TFI->hasFP(MF))
264    // The frame pointer. Reserve all aliases.
265    for (MCRegAliasIterator AI(Regs->getFramePointerRegister(), this, true);
266         AI.isValid(); ++AI)
267      Reserved.set(*AI);
268
269  // Reserve all aliases for the stack pointer.
270  for (MCRegAliasIterator AI(Regs->getStackPointerRegister(), this, true);
271       AI.isValid(); ++AI)
272    Reserved.set(*AI);
273
274  // A0 and A1 hold the thread pointer.
275  Reserved.set(SystemZ::A0);
276  Reserved.set(SystemZ::A1);
277
278  // FPC is the floating-point control register.
279  Reserved.set(SystemZ::FPC);
280
281  return Reserved;
282}
283
284bool
285SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
286                                         int SPAdj, unsigned FIOperandNum,
287                                         RegScavenger *RS) const {
288  assert(SPAdj == 0 && "Outgoing arguments should be part of the frame");
289
290  MachineBasicBlock &MBB = *MI->getParent();
291  MachineFunction &MF = *MBB.getParent();
292  auto *TII = MF.getSubtarget<SystemZSubtarget>().getInstrInfo();
293  const SystemZFrameLowering *TFI = getFrameLowering(MF);
294  DebugLoc DL = MI->getDebugLoc();
295
296  // Decompose the frame index into a base and offset.
297  int FrameIndex = MI->getOperand(FIOperandNum).getIndex();
298  Register BasePtr;
299  int64_t Offset =
300      (TFI->getFrameIndexReference(MF, FrameIndex, BasePtr).getFixed() +
301       MI->getOperand(FIOperandNum + 1).getImm());
302
303  // Special handling of dbg_value instructions.
304  if (MI->isDebugValue()) {
305    MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, /*isDef*/ false);
306    if (MI->isNonListDebugValue()) {
307      MI->getDebugOffset().ChangeToImmediate(Offset);
308    } else {
309      unsigned OpIdx = MI->getDebugOperandIndex(&MI->getOperand(FIOperandNum));
310      SmallVector<uint64_t, 3> Ops;
311      DIExpression::appendOffset(
312          Ops, TFI->getFrameIndexReference(MF, FrameIndex, BasePtr).getFixed());
313      MI->getDebugExpressionOp().setMetadata(
314          DIExpression::appendOpsToArg(MI->getDebugExpression(), Ops, OpIdx));
315    }
316    return false;
317  }
318
319  // See if the offset is in range, or if an equivalent instruction that
320  // accepts the offset exists.
321  unsigned Opcode = MI->getOpcode();
322  unsigned OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset, &*MI);
323  if (OpcodeForOffset) {
324    if (OpcodeForOffset == SystemZ::LE &&
325        MF.getSubtarget<SystemZSubtarget>().hasVector()) {
326      // If LE is ok for offset, use LDE instead on z13.
327      OpcodeForOffset = SystemZ::LDE32;
328    }
329    MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
330  }
331  else {
332    // Create an anchor point that is in range.  Start at 0xffff so that
333    // can use LLILH to load the immediate.
334    int64_t OldOffset = Offset;
335    int64_t Mask = 0xffff;
336    do {
337      Offset = OldOffset & Mask;
338      OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset);
339      Mask >>= 1;
340      assert(Mask && "One offset must be OK");
341    } while (!OpcodeForOffset);
342
343    Register ScratchReg =
344        MF.getRegInfo().createVirtualRegister(&SystemZ::ADDR64BitRegClass);
345    int64_t HighOffset = OldOffset - Offset;
346
347    if (MI->getDesc().TSFlags & SystemZII::HasIndex
348        && MI->getOperand(FIOperandNum + 2).getReg() == 0) {
349      // Load the offset into the scratch register and use it as an index.
350      // The scratch register then dies here.
351      TII->loadImmediate(MBB, MI, ScratchReg, HighOffset);
352      MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
353      MI->getOperand(FIOperandNum + 2).ChangeToRegister(ScratchReg,
354                                                        false, false, true);
355    } else {
356      // Load the anchor address into a scratch register.
357      unsigned LAOpcode = TII->getOpcodeForOffset(SystemZ::LA, HighOffset);
358      if (LAOpcode)
359        BuildMI(MBB, MI, DL, TII->get(LAOpcode),ScratchReg)
360          .addReg(BasePtr).addImm(HighOffset).addReg(0);
361      else {
362        // Load the high offset into the scratch register and use it as
363        // an index.
364        TII->loadImmediate(MBB, MI, ScratchReg, HighOffset);
365        BuildMI(MBB, MI, DL, TII->get(SystemZ::LA), ScratchReg)
366          .addReg(BasePtr, RegState::Kill).addImm(0).addReg(ScratchReg);
367      }
368
369      // Use the scratch register as the base.  It then dies here.
370      MI->getOperand(FIOperandNum).ChangeToRegister(ScratchReg,
371                                                    false, false, true);
372    }
373  }
374  MI->setDesc(TII->get(OpcodeForOffset));
375  MI->getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
376  return false;
377}
378
379bool SystemZRegisterInfo::shouldCoalesce(MachineInstr *MI,
380                                         const TargetRegisterClass *SrcRC,
381                                         unsigned SubReg,
382                                         const TargetRegisterClass *DstRC,
383                                         unsigned DstSubReg,
384                                         const TargetRegisterClass *NewRC,
385                                         LiveIntervals &LIS) const {
386  assert (MI->isCopy() && "Only expecting COPY instructions");
387
388  // Coalesce anything which is not a COPY involving a subreg to/from GR128.
389  if (!(NewRC->hasSuperClassEq(&SystemZ::GR128BitRegClass) &&
390        (getRegSizeInBits(*SrcRC) <= 64 || getRegSizeInBits(*DstRC) <= 64) &&
391        !MI->getOperand(1).isUndef()))
392    return true;
393
394  // Allow coalescing of a GR128 subreg COPY only if the subreg liverange is
395  // local to one MBB with not too many interferring physreg clobbers. Otherwise
396  // regalloc may run out of registers.
397  unsigned SubregOpIdx = getRegSizeInBits(*SrcRC) == 128 ? 0 : 1;
398  LiveInterval &LI = LIS.getInterval(MI->getOperand(SubregOpIdx).getReg());
399
400  // Check that the subreg is local to MBB.
401  MachineBasicBlock *MBB = MI->getParent();
402  MachineInstr *FirstMI = LIS.getInstructionFromIndex(LI.beginIndex());
403  MachineInstr *LastMI = LIS.getInstructionFromIndex(LI.endIndex());
404  if (!FirstMI || FirstMI->getParent() != MBB ||
405      !LastMI || LastMI->getParent() != MBB)
406    return false;
407
408  // Check if coalescing seems safe by finding the set of clobbered physreg
409  // pairs in the region.
410  BitVector PhysClobbered(getNumRegs());
411  for (MachineBasicBlock::iterator MII = FirstMI,
412                                   MEE = std::next(LastMI->getIterator());
413       MII != MEE; ++MII)
414    for (const MachineOperand &MO : MII->operands())
415      if (MO.isReg() && MO.getReg().isPhysical()) {
416        for (MCPhysReg SI : superregs_inclusive(MO.getReg()))
417          if (NewRC->contains(SI)) {
418            PhysClobbered.set(SI);
419            break;
420          }
421      }
422
423  // Demand an arbitrary margin of free regs.
424  unsigned const DemandedFreeGR128 = 3;
425  if (PhysClobbered.count() > (NewRC->getNumRegs() - DemandedFreeGR128))
426    return false;
427
428  return true;
429}
430
431Register
432SystemZRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
433  const SystemZFrameLowering *TFI = getFrameLowering(MF);
434  const SystemZSubtarget *Subtarget = &MF.getSubtarget<SystemZSubtarget>();
435  SystemZCallingConventionRegisters *Regs = Subtarget->getSpecialRegisters();
436
437  return TFI->hasFP(MF) ? Regs->getFramePointerRegister()
438                        : Regs->getStackPointerRegister();
439}
440
441const TargetRegisterClass *
442SystemZRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
443  if (RC == &SystemZ::CCRRegClass)
444    return &SystemZ::GR32BitRegClass;
445  return RC;
446}
447
448