1//===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements some simple delegations needed for call lowering.
11///
12//===----------------------------------------------------------------------===//
13
14#include "llvm/CodeGen/Analysis.h"
15#include "llvm/CodeGen/GlobalISel/CallLowering.h"
16#include "llvm/CodeGen/GlobalISel/Utils.h"
17#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
18#include "llvm/CodeGen/MachineOperand.h"
19#include "llvm/CodeGen/MachineRegisterInfo.h"
20#include "llvm/CodeGen/TargetLowering.h"
21#include "llvm/IR/DataLayout.h"
22#include "llvm/IR/Instructions.h"
23#include "llvm/IR/LLVMContext.h"
24#include "llvm/IR/Module.h"
25#include "llvm/Target/TargetMachine.h"
26
27#define DEBUG_TYPE "call-lowering"
28
29using namespace llvm;
30
31void CallLowering::anchor() {}
32
33bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
34                             ArrayRef<Register> ResRegs,
35                             ArrayRef<ArrayRef<Register>> ArgRegs,
36                             Register SwiftErrorVReg,
37                             std::function<unsigned()> GetCalleeReg) const {
38  CallLoweringInfo Info;
39  const DataLayout &DL = MIRBuilder.getDataLayout();
40
41  // First step is to marshall all the function's parameters into the correct
42  // physregs and memory locations. Gather the sequence of argument types that
43  // we'll pass to the assigner function.
44  unsigned i = 0;
45  unsigned NumFixedArgs = CB.getFunctionType()->getNumParams();
46  for (auto &Arg : CB.args()) {
47    ArgInfo OrigArg{ArgRegs[i], Arg->getType(), ISD::ArgFlagsTy{},
48                    i < NumFixedArgs};
49    setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CB);
50    Info.OrigArgs.push_back(OrigArg);
51    ++i;
52  }
53
54  // Try looking through a bitcast from one function type to another.
55  // Commonly happens with calls to objc_msgSend().
56  const Value *CalleeV = CB.getCalledOperand()->stripPointerCasts();
57  if (const Function *F = dyn_cast<Function>(CalleeV))
58    Info.Callee = MachineOperand::CreateGA(F, 0);
59  else
60    Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false);
61
62  Info.OrigRet = ArgInfo{ResRegs, CB.getType(), ISD::ArgFlagsTy{}};
63  if (!Info.OrigRet.Ty->isVoidTy())
64    setArgFlags(Info.OrigRet, AttributeList::ReturnIndex, DL, CB);
65
66  MachineFunction &MF = MIRBuilder.getMF();
67  Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees);
68  Info.CallConv = CB.getCallingConv();
69  Info.SwiftErrorVReg = SwiftErrorVReg;
70  Info.IsMustTailCall = CB.isMustTailCall();
71  Info.IsTailCall =
72      CB.isTailCall() && isInTailCallPosition(CB, MF.getTarget()) &&
73      (MF.getFunction()
74           .getFnAttribute("disable-tail-calls")
75           .getValueAsString() != "true");
76  Info.IsVarArg = CB.getFunctionType()->isVarArg();
77  return lowerCall(MIRBuilder, Info);
78}
79
80template <typename FuncInfoTy>
81void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx,
82                               const DataLayout &DL,
83                               const FuncInfoTy &FuncInfo) const {
84  auto &Flags = Arg.Flags[0];
85  const AttributeList &Attrs = FuncInfo.getAttributes();
86  if (Attrs.hasAttribute(OpIdx, Attribute::ZExt))
87    Flags.setZExt();
88  if (Attrs.hasAttribute(OpIdx, Attribute::SExt))
89    Flags.setSExt();
90  if (Attrs.hasAttribute(OpIdx, Attribute::InReg))
91    Flags.setInReg();
92  if (Attrs.hasAttribute(OpIdx, Attribute::StructRet))
93    Flags.setSRet();
94  if (Attrs.hasAttribute(OpIdx, Attribute::SwiftSelf))
95    Flags.setSwiftSelf();
96  if (Attrs.hasAttribute(OpIdx, Attribute::SwiftError))
97    Flags.setSwiftError();
98  if (Attrs.hasAttribute(OpIdx, Attribute::ByVal))
99    Flags.setByVal();
100  if (Attrs.hasAttribute(OpIdx, Attribute::Preallocated))
101    Flags.setPreallocated();
102  if (Attrs.hasAttribute(OpIdx, Attribute::InAlloca))
103    Flags.setInAlloca();
104
105  if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated()) {
106    Type *ElementTy = cast<PointerType>(Arg.Ty)->getElementType();
107
108    auto Ty = Attrs.getAttribute(OpIdx, Attribute::ByVal).getValueAsType();
109    Flags.setByValSize(DL.getTypeAllocSize(Ty ? Ty : ElementTy));
110
111    // For ByVal, alignment should be passed from FE.  BE will guess if
112    // this info is not there but there are cases it cannot get right.
113    Align FrameAlign;
114    if (auto ParamAlign = FuncInfo.getParamAlign(OpIdx - 2))
115      FrameAlign = *ParamAlign;
116    else
117      FrameAlign = Align(getTLI()->getByValTypeAlignment(ElementTy, DL));
118    Flags.setByValAlign(FrameAlign);
119  }
120  if (Attrs.hasAttribute(OpIdx, Attribute::Nest))
121    Flags.setNest();
122  Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));
123}
124
125template void
126CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
127                                    const DataLayout &DL,
128                                    const Function &FuncInfo) const;
129
130template void
131CallLowering::setArgFlags<CallBase>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
132                                    const DataLayout &DL,
133                                    const CallBase &FuncInfo) const;
134
135Register CallLowering::packRegs(ArrayRef<Register> SrcRegs, Type *PackedTy,
136                                MachineIRBuilder &MIRBuilder) const {
137  assert(SrcRegs.size() > 1 && "Nothing to pack");
138
139  const DataLayout &DL = MIRBuilder.getMF().getDataLayout();
140  MachineRegisterInfo *MRI = MIRBuilder.getMRI();
141
142  LLT PackedLLT = getLLTForType(*PackedTy, DL);
143
144  SmallVector<LLT, 8> LLTs;
145  SmallVector<uint64_t, 8> Offsets;
146  computeValueLLTs(DL, *PackedTy, LLTs, &Offsets);
147  assert(LLTs.size() == SrcRegs.size() && "Regs / types mismatch");
148
149  Register Dst = MRI->createGenericVirtualRegister(PackedLLT);
150  MIRBuilder.buildUndef(Dst);
151  for (unsigned i = 0; i < SrcRegs.size(); ++i) {
152    Register NewDst = MRI->createGenericVirtualRegister(PackedLLT);
153    MIRBuilder.buildInsert(NewDst, Dst, SrcRegs[i], Offsets[i]);
154    Dst = NewDst;
155  }
156
157  return Dst;
158}
159
160void CallLowering::unpackRegs(ArrayRef<Register> DstRegs, Register SrcReg,
161                              Type *PackedTy,
162                              MachineIRBuilder &MIRBuilder) const {
163  assert(DstRegs.size() > 1 && "Nothing to unpack");
164
165  const DataLayout &DL = MIRBuilder.getDataLayout();
166
167  SmallVector<LLT, 8> LLTs;
168  SmallVector<uint64_t, 8> Offsets;
169  computeValueLLTs(DL, *PackedTy, LLTs, &Offsets);
170  assert(LLTs.size() == DstRegs.size() && "Regs / types mismatch");
171
172  for (unsigned i = 0; i < DstRegs.size(); ++i)
173    MIRBuilder.buildExtract(DstRegs[i], SrcReg, Offsets[i]);
174}
175
176bool CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder,
177                                     SmallVectorImpl<ArgInfo> &Args,
178                                     ValueHandler &Handler) const {
179  MachineFunction &MF = MIRBuilder.getMF();
180  const Function &F = MF.getFunction();
181  SmallVector<CCValAssign, 16> ArgLocs;
182  CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext());
183  return handleAssignments(CCInfo, ArgLocs, MIRBuilder, Args, Handler);
184}
185
186bool CallLowering::handleAssignments(CCState &CCInfo,
187                                     SmallVectorImpl<CCValAssign> &ArgLocs,
188                                     MachineIRBuilder &MIRBuilder,
189                                     SmallVectorImpl<ArgInfo> &Args,
190                                     ValueHandler &Handler) const {
191  MachineFunction &MF = MIRBuilder.getMF();
192  const Function &F = MF.getFunction();
193  const DataLayout &DL = F.getParent()->getDataLayout();
194
195  unsigned NumArgs = Args.size();
196  for (unsigned i = 0; i != NumArgs; ++i) {
197    EVT CurVT = EVT::getEVT(Args[i].Ty);
198    if (!CurVT.isSimple() ||
199        Handler.assignArg(i, CurVT.getSimpleVT(), CurVT.getSimpleVT(),
200                          CCValAssign::Full, Args[i], Args[i].Flags[0],
201                          CCInfo)) {
202      MVT NewVT = TLI->getRegisterTypeForCallingConv(
203          F.getContext(), F.getCallingConv(), EVT(CurVT));
204
205      // If we need to split the type over multiple regs, check it's a scenario
206      // we currently support.
207      unsigned NumParts = TLI->getNumRegistersForCallingConv(
208          F.getContext(), F.getCallingConv(), CurVT);
209      if (NumParts > 1) {
210        // For now only handle exact splits.
211        if (NewVT.getSizeInBits() * NumParts != CurVT.getSizeInBits())
212          return false;
213      }
214
215      // For incoming arguments (physregs to vregs), we could have values in
216      // physregs (or memlocs) which we want to extract and copy to vregs.
217      // During this, we might have to deal with the LLT being split across
218      // multiple regs, so we have to record this information for later.
219      //
220      // If we have outgoing args, then we have the opposite case. We have a
221      // vreg with an LLT which we want to assign to a physical location, and
222      // we might have to record that the value has to be split later.
223      if (Handler.isIncomingArgumentHandler()) {
224        if (NumParts == 1) {
225          // Try to use the register type if we couldn't assign the VT.
226          if (Handler.assignArg(i, NewVT, NewVT, CCValAssign::Full, Args[i],
227                                Args[i].Flags[0], CCInfo))
228            return false;
229        } else {
230          // We're handling an incoming arg which is split over multiple regs.
231          // E.g. passing an s128 on AArch64.
232          ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0];
233          Args[i].OrigRegs.push_back(Args[i].Regs[0]);
234          Args[i].Regs.clear();
235          Args[i].Flags.clear();
236          LLT NewLLT = getLLTForMVT(NewVT);
237          // For each split register, create and assign a vreg that will store
238          // the incoming component of the larger value. These will later be
239          // merged to form the final vreg.
240          for (unsigned Part = 0; Part < NumParts; ++Part) {
241            Register Reg =
242                MIRBuilder.getMRI()->createGenericVirtualRegister(NewLLT);
243            ISD::ArgFlagsTy Flags = OrigFlags;
244            if (Part == 0) {
245              Flags.setSplit();
246            } else {
247              Flags.setOrigAlign(Align(1));
248              if (Part == NumParts - 1)
249                Flags.setSplitEnd();
250            }
251            Args[i].Regs.push_back(Reg);
252            Args[i].Flags.push_back(Flags);
253            if (Handler.assignArg(i + Part, NewVT, NewVT, CCValAssign::Full,
254                                  Args[i], Args[i].Flags[Part], CCInfo)) {
255              // Still couldn't assign this smaller part type for some reason.
256              return false;
257            }
258          }
259        }
260      } else {
261        // Handling an outgoing arg that might need to be split.
262        if (NumParts < 2)
263          return false; // Don't know how to deal with this type combination.
264
265        // This type is passed via multiple registers in the calling convention.
266        // We need to extract the individual parts.
267        Register LargeReg = Args[i].Regs[0];
268        LLT SmallTy = LLT::scalar(NewVT.getSizeInBits());
269        auto Unmerge = MIRBuilder.buildUnmerge(SmallTy, LargeReg);
270        assert(Unmerge->getNumOperands() == NumParts + 1);
271        ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0];
272        // We're going to replace the regs and flags with the split ones.
273        Args[i].Regs.clear();
274        Args[i].Flags.clear();
275        for (unsigned PartIdx = 0; PartIdx < NumParts; ++PartIdx) {
276          ISD::ArgFlagsTy Flags = OrigFlags;
277          if (PartIdx == 0) {
278            Flags.setSplit();
279          } else {
280            Flags.setOrigAlign(Align(1));
281            if (PartIdx == NumParts - 1)
282              Flags.setSplitEnd();
283          }
284          Args[i].Regs.push_back(Unmerge.getReg(PartIdx));
285          Args[i].Flags.push_back(Flags);
286          if (Handler.assignArg(i + PartIdx, NewVT, NewVT, CCValAssign::Full,
287                                Args[i], Args[i].Flags[PartIdx], CCInfo))
288            return false;
289        }
290      }
291    }
292  }
293
294  for (unsigned i = 0, e = Args.size(), j = 0; i != e; ++i, ++j) {
295    assert(j < ArgLocs.size() && "Skipped too many arg locs");
296
297    CCValAssign &VA = ArgLocs[j];
298    assert(VA.getValNo() == i && "Location doesn't correspond to current arg");
299
300    if (VA.needsCustom()) {
301      unsigned NumArgRegs =
302          Handler.assignCustomValue(Args[i], makeArrayRef(ArgLocs).slice(j));
303      if (!NumArgRegs)
304        return false;
305      j += NumArgRegs;
306      continue;
307    }
308
309    // FIXME: Pack registers if we have more than one.
310    Register ArgReg = Args[i].Regs[0];
311
312    EVT OrigVT = EVT::getEVT(Args[i].Ty);
313    EVT VAVT = VA.getValVT();
314    const LLT OrigTy = getLLTForType(*Args[i].Ty, DL);
315
316    if (VA.isRegLoc()) {
317      if (Handler.isIncomingArgumentHandler() && VAVT != OrigVT) {
318        if (VAVT.getSizeInBits() < OrigVT.getSizeInBits()) {
319          // Expected to be multiple regs for a single incoming arg.
320          unsigned NumArgRegs = Args[i].Regs.size();
321          if (NumArgRegs < 2)
322            return false;
323
324          assert((j + (NumArgRegs - 1)) < ArgLocs.size() &&
325                 "Too many regs for number of args");
326          for (unsigned Part = 0; Part < NumArgRegs; ++Part) {
327            // There should be Regs.size() ArgLocs per argument.
328            VA = ArgLocs[j + Part];
329            Handler.assignValueToReg(Args[i].Regs[Part], VA.getLocReg(), VA);
330          }
331          j += NumArgRegs - 1;
332          // Merge the split registers into the expected larger result vreg
333          // of the original call.
334          MIRBuilder.buildMerge(Args[i].OrigRegs[0], Args[i].Regs);
335          continue;
336        }
337        const LLT VATy(VAVT.getSimpleVT());
338        Register NewReg =
339            MIRBuilder.getMRI()->createGenericVirtualRegister(VATy);
340        Handler.assignValueToReg(NewReg, VA.getLocReg(), VA);
341        // If it's a vector type, we either need to truncate the elements
342        // or do an unmerge to get the lower block of elements.
343        if (VATy.isVector() &&
344            VATy.getNumElements() > OrigVT.getVectorNumElements()) {
345          // Just handle the case where the VA type is 2 * original type.
346          if (VATy.getNumElements() != OrigVT.getVectorNumElements() * 2) {
347            LLVM_DEBUG(dbgs()
348                       << "Incoming promoted vector arg has too many elts");
349            return false;
350          }
351          auto Unmerge = MIRBuilder.buildUnmerge({OrigTy, OrigTy}, {NewReg});
352          MIRBuilder.buildCopy(ArgReg, Unmerge.getReg(0));
353        } else {
354          MIRBuilder.buildTrunc(ArgReg, {NewReg}).getReg(0);
355        }
356      } else if (!Handler.isIncomingArgumentHandler()) {
357        assert((j + (Args[i].Regs.size() - 1)) < ArgLocs.size() &&
358               "Too many regs for number of args");
359        // This is an outgoing argument that might have been split.
360        for (unsigned Part = 0; Part < Args[i].Regs.size(); ++Part) {
361          // There should be Regs.size() ArgLocs per argument.
362          VA = ArgLocs[j + Part];
363          Handler.assignValueToReg(Args[i].Regs[Part], VA.getLocReg(), VA);
364        }
365        j += Args[i].Regs.size() - 1;
366      } else {
367        Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA);
368      }
369    } else if (VA.isMemLoc()) {
370      // Don't currently support loading/storing a type that needs to be split
371      // to the stack. Should be easy, just not implemented yet.
372      if (Args[i].Regs.size() > 1) {
373        LLVM_DEBUG(
374            dbgs()
375            << "Load/store a split arg to/from the stack not implemented yet");
376        return false;
377      }
378
379      EVT LocVT = VA.getValVT();
380      unsigned MemSize = LocVT == MVT::iPTR ? DL.getPointerSize()
381                                            : LocVT.getStoreSize();
382
383      unsigned Offset = VA.getLocMemOffset();
384      MachinePointerInfo MPO;
385      Register StackAddr = Handler.getStackAddress(MemSize, Offset, MPO);
386      Handler.assignValueToAddress(Args[i], StackAddr, MemSize, MPO, VA);
387    } else {
388      // FIXME: Support byvals and other weirdness
389      return false;
390    }
391  }
392  return true;
393}
394
395bool CallLowering::analyzeArgInfo(CCState &CCState,
396                                  SmallVectorImpl<ArgInfo> &Args,
397                                  CCAssignFn &AssignFnFixed,
398                                  CCAssignFn &AssignFnVarArg) const {
399  for (unsigned i = 0, e = Args.size(); i < e; ++i) {
400    MVT VT = MVT::getVT(Args[i].Ty);
401    CCAssignFn &Fn = Args[i].IsFixed ? AssignFnFixed : AssignFnVarArg;
402    if (Fn(i, VT, VT, CCValAssign::Full, Args[i].Flags[0], CCState)) {
403      // Bail out on anything we can't handle.
404      LLVM_DEBUG(dbgs() << "Cannot analyze " << EVT(VT).getEVTString()
405                        << " (arg number = " << i << "\n");
406      return false;
407    }
408  }
409  return true;
410}
411
412bool CallLowering::resultsCompatible(CallLoweringInfo &Info,
413                                     MachineFunction &MF,
414                                     SmallVectorImpl<ArgInfo> &InArgs,
415                                     CCAssignFn &CalleeAssignFnFixed,
416                                     CCAssignFn &CalleeAssignFnVarArg,
417                                     CCAssignFn &CallerAssignFnFixed,
418                                     CCAssignFn &CallerAssignFnVarArg) const {
419  const Function &F = MF.getFunction();
420  CallingConv::ID CalleeCC = Info.CallConv;
421  CallingConv::ID CallerCC = F.getCallingConv();
422
423  if (CallerCC == CalleeCC)
424    return true;
425
426  SmallVector<CCValAssign, 16> ArgLocs1;
427  CCState CCInfo1(CalleeCC, false, MF, ArgLocs1, F.getContext());
428  if (!analyzeArgInfo(CCInfo1, InArgs, CalleeAssignFnFixed,
429                      CalleeAssignFnVarArg))
430    return false;
431
432  SmallVector<CCValAssign, 16> ArgLocs2;
433  CCState CCInfo2(CallerCC, false, MF, ArgLocs2, F.getContext());
434  if (!analyzeArgInfo(CCInfo2, InArgs, CallerAssignFnFixed,
435                      CalleeAssignFnVarArg))
436    return false;
437
438  // We need the argument locations to match up exactly. If there's more in
439  // one than the other, then we are done.
440  if (ArgLocs1.size() != ArgLocs2.size())
441    return false;
442
443  // Make sure that each location is passed in exactly the same way.
444  for (unsigned i = 0, e = ArgLocs1.size(); i < e; ++i) {
445    const CCValAssign &Loc1 = ArgLocs1[i];
446    const CCValAssign &Loc2 = ArgLocs2[i];
447
448    // We need both of them to be the same. So if one is a register and one
449    // isn't, we're done.
450    if (Loc1.isRegLoc() != Loc2.isRegLoc())
451      return false;
452
453    if (Loc1.isRegLoc()) {
454      // If they don't have the same register location, we're done.
455      if (Loc1.getLocReg() != Loc2.getLocReg())
456        return false;
457
458      // They matched, so we can move to the next ArgLoc.
459      continue;
460    }
461
462    // Loc1 wasn't a RegLoc, so they both must be MemLocs. Check if they match.
463    if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset())
464      return false;
465  }
466
467  return true;
468}
469
470Register CallLowering::ValueHandler::extendRegister(Register ValReg,
471                                                    CCValAssign &VA,
472                                                    unsigned MaxSizeBits) {
473  LLT LocTy{VA.getLocVT()};
474  LLT ValTy = MRI.getType(ValReg);
475  if (LocTy.getSizeInBits() == ValTy.getSizeInBits())
476    return ValReg;
477
478  if (LocTy.isScalar() && MaxSizeBits && MaxSizeBits < LocTy.getSizeInBits()) {
479    if (MaxSizeBits <= ValTy.getSizeInBits())
480      return ValReg;
481    LocTy = LLT::scalar(MaxSizeBits);
482  }
483
484  switch (VA.getLocInfo()) {
485  default: break;
486  case CCValAssign::Full:
487  case CCValAssign::BCvt:
488    // FIXME: bitconverting between vector types may or may not be a
489    // nop in big-endian situations.
490    return ValReg;
491  case CCValAssign::AExt: {
492    auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg);
493    return MIB.getReg(0);
494  }
495  case CCValAssign::SExt: {
496    Register NewReg = MRI.createGenericVirtualRegister(LocTy);
497    MIRBuilder.buildSExt(NewReg, ValReg);
498    return NewReg;
499  }
500  case CCValAssign::ZExt: {
501    Register NewReg = MRI.createGenericVirtualRegister(LocTy);
502    MIRBuilder.buildZExt(NewReg, ValReg);
503    return NewReg;
504  }
505  }
506  llvm_unreachable("unable to extend register");
507}
508
509void CallLowering::ValueHandler::anchor() {}
510