1//===- Hexagon.cpp --------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "ABIInfoImpl.h"
10#include "TargetInfo.h"
11
12using namespace clang;
13using namespace clang::CodeGen;
14
15//===----------------------------------------------------------------------===//
16// Hexagon ABI Implementation
17//===----------------------------------------------------------------------===//
18
19namespace {
20
21class HexagonABIInfo : public DefaultABIInfo {
22public:
23  HexagonABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
24
25private:
26  ABIArgInfo classifyReturnType(QualType RetTy) const;
27  ABIArgInfo classifyArgumentType(QualType RetTy) const;
28  ABIArgInfo classifyArgumentType(QualType RetTy, unsigned *RegsLeft) const;
29
30  void computeInfo(CGFunctionInfo &FI) const override;
31
32  Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
33                    QualType Ty) const override;
34  Address EmitVAArgFromMemory(CodeGenFunction &CFG, Address VAListAddr,
35                              QualType Ty) const;
36  Address EmitVAArgForHexagon(CodeGenFunction &CFG, Address VAListAddr,
37                              QualType Ty) const;
38  Address EmitVAArgForHexagonLinux(CodeGenFunction &CFG, Address VAListAddr,
39                                   QualType Ty) const;
40};
41
42class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
43public:
44  HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
45      : TargetCodeGenInfo(std::make_unique<HexagonABIInfo>(CGT)) {}
46
47  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
48    return 29;
49  }
50
51  void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
52                           CodeGen::CodeGenModule &GCM) const override {
53    if (GV->isDeclaration())
54      return;
55    const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
56    if (!FD)
57      return;
58  }
59};
60
61} // namespace
62
63void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
64  unsigned RegsLeft = 6;
65  if (!getCXXABI().classifyReturnType(FI))
66    FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
67  for (auto &I : FI.arguments())
68    I.info = classifyArgumentType(I.type, &RegsLeft);
69}
70
71static bool HexagonAdjustRegsLeft(uint64_t Size, unsigned *RegsLeft) {
72  assert(Size <= 64 && "Not expecting to pass arguments larger than 64 bits"
73                       " through registers");
74
75  if (*RegsLeft == 0)
76    return false;
77
78  if (Size <= 32) {
79    (*RegsLeft)--;
80    return true;
81  }
82
83  if (2 <= (*RegsLeft & (~1U))) {
84    *RegsLeft = (*RegsLeft & (~1U)) - 2;
85    return true;
86  }
87
88  // Next available register was r5 but candidate was greater than 32-bits so it
89  // has to go on the stack. However we still consume r5
90  if (*RegsLeft == 1)
91    *RegsLeft = 0;
92
93  return false;
94}
95
96ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty,
97                                                unsigned *RegsLeft) const {
98  if (!isAggregateTypeForABI(Ty)) {
99    // Treat an enum type as its underlying type.
100    if (const EnumType *EnumTy = Ty->getAs<EnumType>())
101      Ty = EnumTy->getDecl()->getIntegerType();
102
103    uint64_t Size = getContext().getTypeSize(Ty);
104    if (Size <= 64)
105      HexagonAdjustRegsLeft(Size, RegsLeft);
106
107    if (Size > 64 && Ty->isBitIntType())
108      return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
109
110    return isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
111                                             : ABIArgInfo::getDirect();
112  }
113
114  if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
115    return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
116
117  // Ignore empty records.
118  if (isEmptyRecord(getContext(), Ty, true))
119    return ABIArgInfo::getIgnore();
120
121  uint64_t Size = getContext().getTypeSize(Ty);
122  unsigned Align = getContext().getTypeAlign(Ty);
123
124  if (Size > 64)
125    return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
126
127  if (HexagonAdjustRegsLeft(Size, RegsLeft))
128    Align = Size <= 32 ? 32 : 64;
129  if (Size <= Align) {
130    // Pass in the smallest viable integer type.
131    Size = llvm::bit_ceil(Size);
132    return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size));
133  }
134  return DefaultABIInfo::classifyArgumentType(Ty);
135}
136
137ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
138  if (RetTy->isVoidType())
139    return ABIArgInfo::getIgnore();
140
141  const TargetInfo &T = CGT.getTarget();
142  uint64_t Size = getContext().getTypeSize(RetTy);
143
144  if (RetTy->getAs<VectorType>()) {
145    // HVX vectors are returned in vector registers or register pairs.
146    if (T.hasFeature("hvx")) {
147      assert(T.hasFeature("hvx-length64b") || T.hasFeature("hvx-length128b"));
148      uint64_t VecSize = T.hasFeature("hvx-length64b") ? 64*8 : 128*8;
149      if (Size == VecSize || Size == 2*VecSize)
150        return ABIArgInfo::getDirectInReg();
151    }
152    // Large vector types should be returned via memory.
153    if (Size > 64)
154      return getNaturalAlignIndirect(RetTy);
155  }
156
157  if (!isAggregateTypeForABI(RetTy)) {
158    // Treat an enum type as its underlying type.
159    if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
160      RetTy = EnumTy->getDecl()->getIntegerType();
161
162    if (Size > 64 && RetTy->isBitIntType())
163      return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
164
165    return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
166                                                : ABIArgInfo::getDirect();
167  }
168
169  if (isEmptyRecord(getContext(), RetTy, true))
170    return ABIArgInfo::getIgnore();
171
172  // Aggregates <= 8 bytes are returned in registers, other aggregates
173  // are returned indirectly.
174  if (Size <= 64) {
175    // Return in the smallest viable integer type.
176    Size = llvm::bit_ceil(Size);
177    return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size));
178  }
179  return getNaturalAlignIndirect(RetTy, /*ByVal=*/true);
180}
181
182Address HexagonABIInfo::EmitVAArgFromMemory(CodeGenFunction &CGF,
183                                            Address VAListAddr,
184                                            QualType Ty) const {
185  // Load the overflow area pointer.
186  Address __overflow_area_pointer_p =
187      CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p");
188  llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad(
189      __overflow_area_pointer_p, "__overflow_area_pointer");
190
191  uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
192  if (Align > 4) {
193    // Alignment should be a power of 2.
194    assert((Align & (Align - 1)) == 0 && "Alignment is not power of 2!");
195
196    // overflow_arg_area = (overflow_arg_area + align - 1) & -align;
197    llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int64Ty, Align - 1);
198
199    // Add offset to the current pointer to access the argument.
200    __overflow_area_pointer =
201        CGF.Builder.CreateGEP(CGF.Int8Ty, __overflow_area_pointer, Offset);
202    llvm::Value *AsInt =
203        CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty);
204
205    // Create a mask which should be "AND"ed
206    // with (overflow_arg_area + align - 1)
207    llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -(int)Align);
208    __overflow_area_pointer = CGF.Builder.CreateIntToPtr(
209        CGF.Builder.CreateAnd(AsInt, Mask), __overflow_area_pointer->getType(),
210        "__overflow_area_pointer.align");
211  }
212
213  // Get the type of the argument from memory and bitcast
214  // overflow area pointer to the argument type.
215  llvm::Type *PTy = CGF.ConvertTypeForMem(Ty);
216  Address AddrTyped =
217      Address(__overflow_area_pointer, PTy, CharUnits::fromQuantity(Align));
218
219  // Round up to the minimum stack alignment for varargs which is 4 bytes.
220  uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4);
221
222  __overflow_area_pointer = CGF.Builder.CreateGEP(
223      CGF.Int8Ty, __overflow_area_pointer,
224      llvm::ConstantInt::get(CGF.Int32Ty, Offset),
225      "__overflow_area_pointer.next");
226  CGF.Builder.CreateStore(__overflow_area_pointer, __overflow_area_pointer_p);
227
228  return AddrTyped;
229}
230
231Address HexagonABIInfo::EmitVAArgForHexagon(CodeGenFunction &CGF,
232                                            Address VAListAddr,
233                                            QualType Ty) const {
234  // FIXME: Need to handle alignment
235  llvm::Type *BP = CGF.Int8PtrTy;
236  CGBuilderTy &Builder = CGF.Builder;
237  Address VAListAddrAsBPP = VAListAddr.withElementType(BP);
238  llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
239  // Handle address alignment for type alignment > 32 bits
240  uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
241  if (TyAlign > 4) {
242    assert((TyAlign & (TyAlign - 1)) == 0 && "Alignment is not power of 2!");
243    llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
244    AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
245    AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
246    Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
247  }
248  Address AddrTyped =
249      Address(Addr, CGF.ConvertType(Ty), CharUnits::fromQuantity(TyAlign));
250
251  uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4);
252  llvm::Value *NextAddr = Builder.CreateGEP(
253      CGF.Int8Ty, Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next");
254  Builder.CreateStore(NextAddr, VAListAddrAsBPP);
255
256  return AddrTyped;
257}
258
259Address HexagonABIInfo::EmitVAArgForHexagonLinux(CodeGenFunction &CGF,
260                                                 Address VAListAddr,
261                                                 QualType Ty) const {
262  int ArgSize = CGF.getContext().getTypeSize(Ty) / 8;
263
264  if (ArgSize > 8)
265    return EmitVAArgFromMemory(CGF, VAListAddr, Ty);
266
267  // Here we have check if the argument is in register area or
268  // in overflow area.
269  // If the saved register area pointer + argsize rounded up to alignment >
270  // saved register area end pointer, argument is in overflow area.
271  unsigned RegsLeft = 6;
272  Ty = CGF.getContext().getCanonicalType(Ty);
273  (void)classifyArgumentType(Ty, &RegsLeft);
274
275  llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
276  llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
277  llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
278  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
279
280  // Get rounded size of the argument.GCC does not allow vararg of
281  // size < 4 bytes. We follow the same logic here.
282  ArgSize = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8;
283  int ArgAlign = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8;
284
285  // Argument may be in saved register area
286  CGF.EmitBlock(MaybeRegBlock);
287
288  // Load the current saved register area pointer.
289  Address __current_saved_reg_area_pointer_p = CGF.Builder.CreateStructGEP(
290      VAListAddr, 0, "__current_saved_reg_area_pointer_p");
291  llvm::Value *__current_saved_reg_area_pointer = CGF.Builder.CreateLoad(
292      __current_saved_reg_area_pointer_p, "__current_saved_reg_area_pointer");
293
294  // Load the saved register area end pointer.
295  Address __saved_reg_area_end_pointer_p = CGF.Builder.CreateStructGEP(
296      VAListAddr, 1, "__saved_reg_area_end_pointer_p");
297  llvm::Value *__saved_reg_area_end_pointer = CGF.Builder.CreateLoad(
298      __saved_reg_area_end_pointer_p, "__saved_reg_area_end_pointer");
299
300  // If the size of argument is > 4 bytes, check if the stack
301  // location is aligned to 8 bytes
302  if (ArgAlign > 4) {
303
304    llvm::Value *__current_saved_reg_area_pointer_int =
305        CGF.Builder.CreatePtrToInt(__current_saved_reg_area_pointer,
306                                   CGF.Int32Ty);
307
308    __current_saved_reg_area_pointer_int = CGF.Builder.CreateAdd(
309        __current_saved_reg_area_pointer_int,
310        llvm::ConstantInt::get(CGF.Int32Ty, (ArgAlign - 1)),
311        "align_current_saved_reg_area_pointer");
312
313    __current_saved_reg_area_pointer_int =
314        CGF.Builder.CreateAnd(__current_saved_reg_area_pointer_int,
315                              llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign),
316                              "align_current_saved_reg_area_pointer");
317
318    __current_saved_reg_area_pointer =
319        CGF.Builder.CreateIntToPtr(__current_saved_reg_area_pointer_int,
320                                   __current_saved_reg_area_pointer->getType(),
321                                   "align_current_saved_reg_area_pointer");
322  }
323
324  llvm::Value *__new_saved_reg_area_pointer =
325      CGF.Builder.CreateGEP(CGF.Int8Ty, __current_saved_reg_area_pointer,
326                            llvm::ConstantInt::get(CGF.Int32Ty, ArgSize),
327                            "__new_saved_reg_area_pointer");
328
329  llvm::Value *UsingStack = nullptr;
330  UsingStack = CGF.Builder.CreateICmpSGT(__new_saved_reg_area_pointer,
331                                         __saved_reg_area_end_pointer);
332
333  CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, InRegBlock);
334
335  // Argument in saved register area
336  // Implement the block where argument is in register saved area
337  CGF.EmitBlock(InRegBlock);
338
339  llvm::Type *PTy = CGF.ConvertType(Ty);
340  llvm::Value *__saved_reg_area_p = CGF.Builder.CreateBitCast(
341      __current_saved_reg_area_pointer, llvm::PointerType::getUnqual(PTy));
342
343  CGF.Builder.CreateStore(__new_saved_reg_area_pointer,
344                          __current_saved_reg_area_pointer_p);
345
346  CGF.EmitBranch(ContBlock);
347
348  // Argument in overflow area
349  // Implement the block where the argument is in overflow area.
350  CGF.EmitBlock(OnStackBlock);
351
352  // Load the overflow area pointer
353  Address __overflow_area_pointer_p =
354      CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p");
355  llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad(
356      __overflow_area_pointer_p, "__overflow_area_pointer");
357
358  // Align the overflow area pointer according to the alignment of the argument
359  if (ArgAlign > 4) {
360    llvm::Value *__overflow_area_pointer_int =
361        CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty);
362
363    __overflow_area_pointer_int =
364        CGF.Builder.CreateAdd(__overflow_area_pointer_int,
365                              llvm::ConstantInt::get(CGF.Int32Ty, ArgAlign - 1),
366                              "align_overflow_area_pointer");
367
368    __overflow_area_pointer_int =
369        CGF.Builder.CreateAnd(__overflow_area_pointer_int,
370                              llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign),
371                              "align_overflow_area_pointer");
372
373    __overflow_area_pointer = CGF.Builder.CreateIntToPtr(
374        __overflow_area_pointer_int, __overflow_area_pointer->getType(),
375        "align_overflow_area_pointer");
376  }
377
378  // Get the pointer for next argument in overflow area and store it
379  // to overflow area pointer.
380  llvm::Value *__new_overflow_area_pointer = CGF.Builder.CreateGEP(
381      CGF.Int8Ty, __overflow_area_pointer,
382      llvm::ConstantInt::get(CGF.Int32Ty, ArgSize),
383      "__overflow_area_pointer.next");
384
385  CGF.Builder.CreateStore(__new_overflow_area_pointer,
386                          __overflow_area_pointer_p);
387
388  CGF.Builder.CreateStore(__new_overflow_area_pointer,
389                          __current_saved_reg_area_pointer_p);
390
391  // Bitcast the overflow area pointer to the type of argument.
392  llvm::Type *OverflowPTy = CGF.ConvertTypeForMem(Ty);
393  llvm::Value *__overflow_area_p = CGF.Builder.CreateBitCast(
394      __overflow_area_pointer, llvm::PointerType::getUnqual(OverflowPTy));
395
396  CGF.EmitBranch(ContBlock);
397
398  // Get the correct pointer to load the variable argument
399  // Implement the ContBlock
400  CGF.EmitBlock(ContBlock);
401
402  llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty);
403  llvm::Type *MemPTy = llvm::PointerType::getUnqual(MemTy);
404  llvm::PHINode *ArgAddr = CGF.Builder.CreatePHI(MemPTy, 2, "vaarg.addr");
405  ArgAddr->addIncoming(__saved_reg_area_p, InRegBlock);
406  ArgAddr->addIncoming(__overflow_area_p, OnStackBlock);
407
408  return Address(ArgAddr, MemTy, CharUnits::fromQuantity(ArgAlign));
409}
410
411Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
412                                  QualType Ty) const {
413
414  if (getTarget().getTriple().isMusl())
415    return EmitVAArgForHexagonLinux(CGF, VAListAddr, Ty);
416
417  return EmitVAArgForHexagon(CGF, VAListAddr, Ty);
418}
419
420std::unique_ptr<TargetCodeGenInfo>
421CodeGen::createHexagonTargetCodeGenInfo(CodeGenModule &CGM) {
422  return std::make_unique<HexagonTargetCodeGenInfo>(CGM.getTypes());
423}
424