1//===-- SystemZRegisterInfo.cpp - SystemZ register information ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "SystemZRegisterInfo.h"
10#include "SystemZInstrInfo.h"
11#include "SystemZSubtarget.h"
12#include "llvm/CodeGen/LiveIntervals.h"
13#include "llvm/ADT/SmallSet.h"
14#include "llvm/CodeGen/MachineInstrBuilder.h"
15#include "llvm/CodeGen/MachineRegisterInfo.h"
16#include "llvm/CodeGen/TargetFrameLowering.h"
17#include "llvm/CodeGen/VirtRegMap.h"
18
19using namespace llvm;
20
21#define GET_REGINFO_TARGET_DESC
22#include "SystemZGenRegisterInfo.inc"
23
24SystemZRegisterInfo::SystemZRegisterInfo()
25    : SystemZGenRegisterInfo(SystemZ::R14D) {}
26
27// Given that MO is a GRX32 operand, return either GR32 or GRH32 if MO
28// somehow belongs in it. Otherwise, return GRX32.
29static const TargetRegisterClass *getRC32(MachineOperand &MO,
30                                          const VirtRegMap *VRM,
31                                          const MachineRegisterInfo *MRI) {
32  const TargetRegisterClass *RC = MRI->getRegClass(MO.getReg());
33
34  if (SystemZ::GR32BitRegClass.hasSubClassEq(RC) ||
35      MO.getSubReg() == SystemZ::subreg_l32 ||
36      MO.getSubReg() == SystemZ::subreg_hl32)
37    return &SystemZ::GR32BitRegClass;
38  if (SystemZ::GRH32BitRegClass.hasSubClassEq(RC) ||
39      MO.getSubReg() == SystemZ::subreg_h32 ||
40      MO.getSubReg() == SystemZ::subreg_hh32)
41    return &SystemZ::GRH32BitRegClass;
42
43  if (VRM && VRM->hasPhys(MO.getReg())) {
44    Register PhysReg = VRM->getPhys(MO.getReg());
45    if (SystemZ::GR32BitRegClass.contains(PhysReg))
46      return &SystemZ::GR32BitRegClass;
47    assert (SystemZ::GRH32BitRegClass.contains(PhysReg) &&
48            "Phys reg not in GR32 or GRH32?");
49    return &SystemZ::GRH32BitRegClass;
50  }
51
52  assert (RC == &SystemZ::GRX32BitRegClass);
53  return RC;
54}
55
56// Pass the registers of RC as hints while making sure that if any of these
57// registers are copy hints (and therefore already in Hints), hint them
58// first.
59static void addHints(ArrayRef<MCPhysReg> Order,
60                     SmallVectorImpl<MCPhysReg> &Hints,
61                     const TargetRegisterClass *RC,
62                     const MachineRegisterInfo *MRI) {
63  SmallSet<unsigned, 4> CopyHints;
64  CopyHints.insert(Hints.begin(), Hints.end());
65  Hints.clear();
66  for (MCPhysReg Reg : Order)
67    if (CopyHints.count(Reg) &&
68        RC->contains(Reg) && !MRI->isReserved(Reg))
69      Hints.push_back(Reg);
70  for (MCPhysReg Reg : Order)
71    if (!CopyHints.count(Reg) &&
72        RC->contains(Reg) && !MRI->isReserved(Reg))
73      Hints.push_back(Reg);
74}
75
76bool
77SystemZRegisterInfo::getRegAllocationHints(unsigned VirtReg,
78                                           ArrayRef<MCPhysReg> Order,
79                                           SmallVectorImpl<MCPhysReg> &Hints,
80                                           const MachineFunction &MF,
81                                           const VirtRegMap *VRM,
82                                           const LiveRegMatrix *Matrix) const {
83  const MachineRegisterInfo *MRI = &MF.getRegInfo();
84  const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>();
85  const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
86
87  bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
88      VirtReg, Order, Hints, MF, VRM, Matrix);
89
90  if (VRM != nullptr) {
91    // Add any two address hints after any copy hints.
92    SmallSet<unsigned, 4> TwoAddrHints;
93    for (auto &Use : MRI->reg_nodbg_instructions(VirtReg))
94      if (SystemZ::getTwoOperandOpcode(Use.getOpcode()) != -1) {
95        const MachineOperand *VRRegMO = nullptr;
96        const MachineOperand *OtherMO = nullptr;
97        const MachineOperand *CommuMO = nullptr;
98        if (VirtReg == Use.getOperand(0).getReg()) {
99          VRRegMO = &Use.getOperand(0);
100          OtherMO = &Use.getOperand(1);
101          if (Use.isCommutable())
102            CommuMO = &Use.getOperand(2);
103        } else if (VirtReg == Use.getOperand(1).getReg()) {
104          VRRegMO = &Use.getOperand(1);
105          OtherMO = &Use.getOperand(0);
106        } else if (VirtReg == Use.getOperand(2).getReg() &&
107                   Use.isCommutable()) {
108          VRRegMO = &Use.getOperand(2);
109          OtherMO = &Use.getOperand(0);
110        } else
111          continue;
112
113        auto tryAddHint = [&](const MachineOperand *MO) -> void {
114          Register Reg = MO->getReg();
115          Register PhysReg =
116            Register::isPhysicalRegister(Reg) ? Reg : VRM->getPhys(Reg);
117          if (PhysReg) {
118            if (MO->getSubReg())
119              PhysReg = getSubReg(PhysReg, MO->getSubReg());
120            if (VRRegMO->getSubReg())
121              PhysReg = getMatchingSuperReg(PhysReg, VRRegMO->getSubReg(),
122                                            MRI->getRegClass(VirtReg));
123            if (!MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg))
124              TwoAddrHints.insert(PhysReg);
125          }
126        };
127        tryAddHint(OtherMO);
128        if (CommuMO)
129          tryAddHint(CommuMO);
130      }
131    for (MCPhysReg OrderReg : Order)
132      if (TwoAddrHints.count(OrderReg))
133        Hints.push_back(OrderReg);
134  }
135
136  if (MRI->getRegClass(VirtReg) == &SystemZ::GRX32BitRegClass) {
137    SmallVector<unsigned, 8> Worklist;
138    SmallSet<unsigned, 4> DoneRegs;
139    Worklist.push_back(VirtReg);
140    while (Worklist.size()) {
141      unsigned Reg = Worklist.pop_back_val();
142      if (!DoneRegs.insert(Reg).second)
143        continue;
144
145      for (auto &Use : MRI->reg_instructions(Reg)) {
146        // For LOCRMux, see if the other operand is already a high or low
147        // register, and in that case give the corresponding hints for
148        // VirtReg. LOCR instructions need both operands in either high or
149        // low parts. Same handling for SELRMux.
150        if (Use.getOpcode() == SystemZ::LOCRMux ||
151            Use.getOpcode() == SystemZ::SELRMux) {
152          MachineOperand &TrueMO = Use.getOperand(1);
153          MachineOperand &FalseMO = Use.getOperand(2);
154          const TargetRegisterClass *RC =
155            TRI->getCommonSubClass(getRC32(FalseMO, VRM, MRI),
156                                   getRC32(TrueMO, VRM, MRI));
157          if (Use.getOpcode() == SystemZ::SELRMux)
158            RC = TRI->getCommonSubClass(RC,
159                                        getRC32(Use.getOperand(0), VRM, MRI));
160          if (RC && RC != &SystemZ::GRX32BitRegClass) {
161            addHints(Order, Hints, RC, MRI);
162            // Return true to make these hints the only regs available to
163            // RA. This may mean extra spilling but since the alternative is
164            // a jump sequence expansion of the LOCRMux, it is preferred.
165            return true;
166          }
167
168          // Add the other operand of the LOCRMux to the worklist.
169          Register OtherReg =
170              (TrueMO.getReg() == Reg ? FalseMO.getReg() : TrueMO.getReg());
171          if (MRI->getRegClass(OtherReg) == &SystemZ::GRX32BitRegClass)
172            Worklist.push_back(OtherReg);
173        } // end LOCRMux
174        else if (Use.getOpcode() == SystemZ::CHIMux ||
175                 Use.getOpcode() == SystemZ::CFIMux) {
176          if (Use.getOperand(1).getImm() == 0) {
177            bool OnlyLMuxes = true;
178            for (MachineInstr &DefMI : MRI->def_instructions(VirtReg))
179              if (DefMI.getOpcode() != SystemZ::LMux)
180                OnlyLMuxes = false;
181            if (OnlyLMuxes) {
182              addHints(Order, Hints, &SystemZ::GR32BitRegClass, MRI);
183              // Return false to make these hints preferred but not obligatory.
184              return false;
185            }
186          }
187        } // end CHIMux / CFIMux
188      }
189    }
190  }
191
192  return BaseImplRetVal;
193}
194
195const MCPhysReg *
196SystemZRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
197  const SystemZSubtarget &Subtarget = MF->getSubtarget<SystemZSubtarget>();
198  if (MF->getFunction().getCallingConv() == CallingConv::GHC)
199    return CSR_SystemZ_NoRegs_SaveList;
200  if (MF->getFunction().getCallingConv() == CallingConv::AnyReg)
201    return Subtarget.hasVector()? CSR_SystemZ_AllRegs_Vector_SaveList
202                                : CSR_SystemZ_AllRegs_SaveList;
203  if (MF->getSubtarget().getTargetLowering()->supportSwiftError() &&
204      MF->getFunction().getAttributes().hasAttrSomewhere(
205          Attribute::SwiftError))
206    return CSR_SystemZ_SwiftError_SaveList;
207  return CSR_SystemZ_SaveList;
208}
209
210const uint32_t *
211SystemZRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
212                                          CallingConv::ID CC) const {
213  const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>();
214  if (CC == CallingConv::GHC)
215    return CSR_SystemZ_NoRegs_RegMask;
216  if (CC == CallingConv::AnyReg)
217    return Subtarget.hasVector()? CSR_SystemZ_AllRegs_Vector_RegMask
218                                : CSR_SystemZ_AllRegs_RegMask;
219  if (MF.getSubtarget().getTargetLowering()->supportSwiftError() &&
220      MF.getFunction().getAttributes().hasAttrSomewhere(
221          Attribute::SwiftError))
222    return CSR_SystemZ_SwiftError_RegMask;
223  return CSR_SystemZ_RegMask;
224}
225
226BitVector
227SystemZRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
228  BitVector Reserved(getNumRegs());
229  const SystemZFrameLowering *TFI = getFrameLowering(MF);
230
231  if (TFI->hasFP(MF)) {
232    // R11D is the frame pointer.  Reserve all aliases.
233    Reserved.set(SystemZ::R11D);
234    Reserved.set(SystemZ::R11L);
235    Reserved.set(SystemZ::R11H);
236    Reserved.set(SystemZ::R10Q);
237  }
238
239  // R15D is the stack pointer.  Reserve all aliases.
240  Reserved.set(SystemZ::R15D);
241  Reserved.set(SystemZ::R15L);
242  Reserved.set(SystemZ::R15H);
243  Reserved.set(SystemZ::R14Q);
244
245  // A0 and A1 hold the thread pointer.
246  Reserved.set(SystemZ::A0);
247  Reserved.set(SystemZ::A1);
248
249  // FPC is the floating-point control register.
250  Reserved.set(SystemZ::FPC);
251
252  return Reserved;
253}
254
255void
256SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
257                                         int SPAdj, unsigned FIOperandNum,
258                                         RegScavenger *RS) const {
259  assert(SPAdj == 0 && "Outgoing arguments should be part of the frame");
260
261  MachineBasicBlock &MBB = *MI->getParent();
262  MachineFunction &MF = *MBB.getParent();
263  auto *TII =
264      static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo());
265  const SystemZFrameLowering *TFI = getFrameLowering(MF);
266  DebugLoc DL = MI->getDebugLoc();
267
268  // Decompose the frame index into a base and offset.
269  int FrameIndex = MI->getOperand(FIOperandNum).getIndex();
270  unsigned BasePtr;
271  int64_t Offset = (TFI->getFrameIndexReference(MF, FrameIndex, BasePtr) +
272                    MI->getOperand(FIOperandNum + 1).getImm());
273
274  // Special handling of dbg_value instructions.
275  if (MI->isDebugValue()) {
276    MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, /*isDef*/ false);
277    MI->getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
278    return;
279  }
280
281  // See if the offset is in range, or if an equivalent instruction that
282  // accepts the offset exists.
283  unsigned Opcode = MI->getOpcode();
284  unsigned OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset);
285  if (OpcodeForOffset) {
286    if (OpcodeForOffset == SystemZ::LE &&
287        MF.getSubtarget<SystemZSubtarget>().hasVector()) {
288      // If LE is ok for offset, use LDE instead on z13.
289      OpcodeForOffset = SystemZ::LDE32;
290    }
291    MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
292  }
293  else {
294    // Create an anchor point that is in range.  Start at 0xffff so that
295    // can use LLILH to load the immediate.
296    int64_t OldOffset = Offset;
297    int64_t Mask = 0xffff;
298    do {
299      Offset = OldOffset & Mask;
300      OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset);
301      Mask >>= 1;
302      assert(Mask && "One offset must be OK");
303    } while (!OpcodeForOffset);
304
305    Register ScratchReg =
306        MF.getRegInfo().createVirtualRegister(&SystemZ::ADDR64BitRegClass);
307    int64_t HighOffset = OldOffset - Offset;
308
309    if (MI->getDesc().TSFlags & SystemZII::HasIndex
310        && MI->getOperand(FIOperandNum + 2).getReg() == 0) {
311      // Load the offset into the scratch register and use it as an index.
312      // The scratch register then dies here.
313      TII->loadImmediate(MBB, MI, ScratchReg, HighOffset);
314      MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
315      MI->getOperand(FIOperandNum + 2).ChangeToRegister(ScratchReg,
316                                                        false, false, true);
317    } else {
318      // Load the anchor address into a scratch register.
319      unsigned LAOpcode = TII->getOpcodeForOffset(SystemZ::LA, HighOffset);
320      if (LAOpcode)
321        BuildMI(MBB, MI, DL, TII->get(LAOpcode),ScratchReg)
322          .addReg(BasePtr).addImm(HighOffset).addReg(0);
323      else {
324        // Load the high offset into the scratch register and use it as
325        // an index.
326        TII->loadImmediate(MBB, MI, ScratchReg, HighOffset);
327        BuildMI(MBB, MI, DL, TII->get(SystemZ::AGR),ScratchReg)
328          .addReg(ScratchReg, RegState::Kill).addReg(BasePtr);
329      }
330
331      // Use the scratch register as the base.  It then dies here.
332      MI->getOperand(FIOperandNum).ChangeToRegister(ScratchReg,
333                                                    false, false, true);
334    }
335  }
336  MI->setDesc(TII->get(OpcodeForOffset));
337  MI->getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
338}
339
340bool SystemZRegisterInfo::shouldCoalesce(MachineInstr *MI,
341                                  const TargetRegisterClass *SrcRC,
342                                  unsigned SubReg,
343                                  const TargetRegisterClass *DstRC,
344                                  unsigned DstSubReg,
345                                  const TargetRegisterClass *NewRC,
346                                  LiveIntervals &LIS) const {
347  assert (MI->isCopy() && "Only expecting COPY instructions");
348
349  // Coalesce anything which is not a COPY involving a subreg to/from GR128.
350  if (!(NewRC->hasSuperClassEq(&SystemZ::GR128BitRegClass) &&
351        (getRegSizeInBits(*SrcRC) <= 64 || getRegSizeInBits(*DstRC) <= 64)))
352    return true;
353
354  // Allow coalescing of a GR128 subreg COPY only if the live ranges are small
355  // and local to one MBB with not too much interferring registers. Otherwise
356  // regalloc may run out of registers.
357
358  unsigned WideOpNo = (getRegSizeInBits(*SrcRC) == 128 ? 1 : 0);
359  Register GR128Reg = MI->getOperand(WideOpNo).getReg();
360  Register GRNarReg = MI->getOperand((WideOpNo == 1) ? 0 : 1).getReg();
361  LiveInterval &IntGR128 = LIS.getInterval(GR128Reg);
362  LiveInterval &IntGRNar = LIS.getInterval(GRNarReg);
363
364  // Check that the two virtual registers are local to MBB.
365  MachineBasicBlock *MBB = MI->getParent();
366  MachineInstr *FirstMI_GR128 =
367    LIS.getInstructionFromIndex(IntGR128.beginIndex());
368  MachineInstr *FirstMI_GRNar =
369    LIS.getInstructionFromIndex(IntGRNar.beginIndex());
370  MachineInstr *LastMI_GR128 = LIS.getInstructionFromIndex(IntGR128.endIndex());
371  MachineInstr *LastMI_GRNar = LIS.getInstructionFromIndex(IntGRNar.endIndex());
372  if ((!FirstMI_GR128 || FirstMI_GR128->getParent() != MBB) ||
373      (!FirstMI_GRNar || FirstMI_GRNar->getParent() != MBB) ||
374      (!LastMI_GR128 || LastMI_GR128->getParent() != MBB) ||
375      (!LastMI_GRNar || LastMI_GRNar->getParent() != MBB))
376    return false;
377
378  MachineBasicBlock::iterator MII = nullptr, MEE = nullptr;
379  if (WideOpNo == 1) {
380    MII = FirstMI_GR128;
381    MEE = LastMI_GRNar;
382  } else {
383    MII = FirstMI_GRNar;
384    MEE = LastMI_GR128;
385  }
386
387  // Check if coalescing seems safe by finding the set of clobbered physreg
388  // pairs in the region.
389  BitVector PhysClobbered(getNumRegs());
390  MEE++;
391  for (; MII != MEE; ++MII) {
392    for (const MachineOperand &MO : MII->operands())
393      if (MO.isReg() && Register::isPhysicalRegister(MO.getReg())) {
394        for (MCSuperRegIterator SI(MO.getReg(), this, true/*IncludeSelf*/);
395             SI.isValid(); ++SI)
396          if (NewRC->contains(*SI)) {
397            PhysClobbered.set(*SI);
398            break;
399          }
400      }
401  }
402
403  // Demand an arbitrary margin of free regs.
404  unsigned const DemandedFreeGR128 = 3;
405  if (PhysClobbered.count() > (NewRC->getNumRegs() - DemandedFreeGR128))
406    return false;
407
408  return true;
409}
410
411Register
412SystemZRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
413  const SystemZFrameLowering *TFI = getFrameLowering(MF);
414  return TFI->hasFP(MF) ? SystemZ::R11D : SystemZ::R15D;
415}
416
417const TargetRegisterClass *
418SystemZRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
419  if (RC == &SystemZ::CCRRegClass)
420    return &SystemZ::GR32BitRegClass;
421  return RC;
422}
423
424