1//===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements AArch64 TargetInfo objects.
10//
11//===----------------------------------------------------------------------===//
12
13#include "AArch64.h"
14#include "clang/Basic/LangOptions.h"
15#include "clang/Basic/TargetBuiltins.h"
16#include "clang/Basic/TargetInfo.h"
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/StringExtras.h"
19#include "llvm/ADT/StringSwitch.h"
20#include "llvm/Support/AArch64TargetParser.h"
21
22using namespace clang;
23using namespace clang::targets;
24
25const Builtin::Info AArch64TargetInfo::BuiltinInfo[] = {
26#define BUILTIN(ID, TYPE, ATTRS)                                               \
27   {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
28#include "clang/Basic/BuiltinsNEON.def"
29
30#define BUILTIN(ID, TYPE, ATTRS)                                               \
31   {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
32#include "clang/Basic/BuiltinsSVE.def"
33
34#define BUILTIN(ID, TYPE, ATTRS)                                               \
35   {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
36#define LANGBUILTIN(ID, TYPE, ATTRS, LANG)                                     \
37  {#ID, TYPE, ATTRS, nullptr, LANG, nullptr},
38#define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE)         \
39  {#ID, TYPE, ATTRS, HEADER, LANGS, FEATURE},
40#include "clang/Basic/BuiltinsAArch64.def"
41};
42
43AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
44                                     const TargetOptions &Opts)
45    : TargetInfo(Triple), ABI("aapcs") {
46  if (getTriple().isOSOpenBSD()) {
47    Int64Type = SignedLongLong;
48    IntMaxType = SignedLongLong;
49  } else {
50    if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
51      WCharType = UnsignedInt;
52
53    Int64Type = SignedLong;
54    IntMaxType = SignedLong;
55  }
56
57  // All AArch64 implementations support ARMv8 FP, which makes half a legal type.
58  HasLegalHalfType = true;
59  HasFloat16 = true;
60
61  if (Triple.isArch64Bit())
62    LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
63  else
64    LongWidth = LongAlign = PointerWidth = PointerAlign = 32;
65
66  MaxVectorAlign = 128;
67  MaxAtomicInlineWidth = 128;
68  MaxAtomicPromoteWidth = 128;
69
70  LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
71  LongDoubleFormat = &llvm::APFloat::IEEEquad();
72
73  BFloat16Width = BFloat16Align = 16;
74  BFloat16Format = &llvm::APFloat::BFloat();
75
76  // Make __builtin_ms_va_list available.
77  HasBuiltinMSVaList = true;
78
79  // Make the SVE types available.  Note that this deliberately doesn't
80  // depend on SveMode, since in principle it should be possible to turn
81  // SVE on and off within a translation unit.  It should also be possible
82  // to compile the global declaration:
83  //
84  // __SVInt8_t *ptr;
85  //
86  // even without SVE.
87  HasAArch64SVETypes = true;
88
89  // {} in inline assembly are neon specifiers, not assembly variant
90  // specifiers.
91  NoAsmVariants = true;
92
93  // AAPCS gives rules for bitfields. 7.1.7 says: "The container type
94  // contributes to the alignment of the containing aggregate in the same way
95  // a plain (non bit-field) member of that type would, without exception for
96  // zero-sized or anonymous bit-fields."
97  assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
98  UseZeroLengthBitfieldAlignment = true;
99
100  // AArch64 targets default to using the ARM C++ ABI.
101  TheCXXABI.set(TargetCXXABI::GenericAArch64);
102
103  if (Triple.getOS() == llvm::Triple::Linux)
104    this->MCountName = "\01_mcount";
105  else if (Triple.getOS() == llvm::Triple::UnknownOS)
106    this->MCountName =
107        Opts.EABIVersion == llvm::EABI::GNU ? "\01_mcount" : "mcount";
108}
109
110StringRef AArch64TargetInfo::getABI() const { return ABI; }
111
112bool AArch64TargetInfo::setABI(const std::string &Name) {
113  if (Name != "aapcs" && Name != "darwinpcs")
114    return false;
115
116  ABI = Name;
117  return true;
118}
119
120bool AArch64TargetInfo::validateBranchProtection(StringRef Spec,
121                                                 BranchProtectionInfo &BPI,
122                                                 StringRef &Err) const {
123  llvm::AArch64::ParsedBranchProtection PBP;
124  if (!llvm::AArch64::parseBranchProtection(Spec, PBP, Err))
125    return false;
126
127  BPI.SignReturnAddr =
128      llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
129          .Case("non-leaf", LangOptions::SignReturnAddressScopeKind::NonLeaf)
130          .Case("all", LangOptions::SignReturnAddressScopeKind::All)
131          .Default(LangOptions::SignReturnAddressScopeKind::None);
132
133  if (PBP.Key == "a_key")
134    BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
135  else
136    BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
137
138  BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
139  return true;
140}
141
142bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
143  return Name == "generic" ||
144         llvm::AArch64::parseCPUArch(Name) != llvm::AArch64::ArchKind::INVALID;
145}
146
147bool AArch64TargetInfo::setCPU(const std::string &Name) {
148  return isValidCPUName(Name);
149}
150
151void AArch64TargetInfo::fillValidCPUList(
152    SmallVectorImpl<StringRef> &Values) const {
153  llvm::AArch64::fillValidCPUArchList(Values);
154}
155
156void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
157                                                MacroBuilder &Builder) const {
158  Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
159  Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
160  Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
161}
162
163void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
164                                                MacroBuilder &Builder) const {
165  // Also include the ARMv8.1 defines
166  getTargetDefinesARMV81A(Opts, Builder);
167}
168
169void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
170                                                MacroBuilder &Builder) const {
171  Builder.defineMacro("__ARM_FEATURE_COMPLEX", "1");
172  Builder.defineMacro("__ARM_FEATURE_JCVT", "1");
173  // Also include the Armv8.2 defines
174  getTargetDefinesARMV82A(Opts, Builder);
175}
176
177void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
178                                                MacroBuilder &Builder) const {
179  // Also include the Armv8.3 defines
180  getTargetDefinesARMV83A(Opts, Builder);
181}
182
183void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
184                                                MacroBuilder &Builder) const {
185  Builder.defineMacro("__ARM_FEATURE_FRINT", "1");
186  // Also include the Armv8.4 defines
187  getTargetDefinesARMV84A(Opts, Builder);
188}
189
190void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions &Opts,
191                                                MacroBuilder &Builder) const {
192  // Also include the Armv8.5 defines
193  // FIXME: Armv8.6 makes the following extensions mandatory:
194  // - __ARM_FEATURE_BF16
195  // - __ARM_FEATURE_MATMUL_INT8
196  // Handle them here.
197  getTargetDefinesARMV85A(Opts, Builder);
198}
199
200void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions &Opts,
201                                                MacroBuilder &Builder) const {
202  // Also include the Armv8.6 defines
203  getTargetDefinesARMV86A(Opts, Builder);
204}
205
206void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
207                                         MacroBuilder &Builder) const {
208  // Target identification.
209  Builder.defineMacro("__aarch64__");
210  // For bare-metal.
211  if (getTriple().getOS() == llvm::Triple::UnknownOS &&
212      getTriple().isOSBinFormatELF())
213    Builder.defineMacro("__ELF__");
214
215  // Target properties.
216  if (!getTriple().isOSWindows() && getTriple().isArch64Bit()) {
217    Builder.defineMacro("_LP64");
218    Builder.defineMacro("__LP64__");
219  }
220
221  std::string CodeModel = getTargetOpts().CodeModel;
222  if (CodeModel == "default")
223    CodeModel = "small";
224  for (char &c : CodeModel)
225    c = toupper(c);
226  Builder.defineMacro("__AARCH64_CMODEL_" + CodeModel + "__");
227
228  // ACLE predefines. Many can only have one possible value on v8 AArch64.
229  Builder.defineMacro("__ARM_ACLE", "200");
230  Builder.defineMacro("__ARM_ARCH", "8");
231  Builder.defineMacro("__ARM_ARCH_PROFILE", "'A'");
232
233  Builder.defineMacro("__ARM_64BIT_STATE", "1");
234  Builder.defineMacro("__ARM_PCS_AAPCS64", "1");
235  Builder.defineMacro("__ARM_ARCH_ISA_A64", "1");
236
237  Builder.defineMacro("__ARM_FEATURE_CLZ", "1");
238  Builder.defineMacro("__ARM_FEATURE_FMA", "1");
239  Builder.defineMacro("__ARM_FEATURE_LDREX", "0xF");
240  Builder.defineMacro("__ARM_FEATURE_IDIV", "1"); // As specified in ACLE
241  Builder.defineMacro("__ARM_FEATURE_DIV");       // For backwards compatibility
242  Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN", "1");
243  Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING", "1");
244
245  Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
246
247  // 0xe implies support for half, single and double precision operations.
248  Builder.defineMacro("__ARM_FP", "0xE");
249
250  // PCS specifies this for SysV variants, which is all we support. Other ABIs
251  // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
252  Builder.defineMacro("__ARM_FP16_FORMAT_IEEE", "1");
253  Builder.defineMacro("__ARM_FP16_ARGS", "1");
254
255  if (Opts.UnsafeFPMath)
256    Builder.defineMacro("__ARM_FP_FAST", "1");
257
258  Builder.defineMacro("__ARM_SIZEOF_WCHAR_T",
259                      Twine(Opts.WCharSize ? Opts.WCharSize : 4));
260
261  Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM", Opts.ShortEnums ? "1" : "4");
262
263  if (FPU & NeonMode) {
264    Builder.defineMacro("__ARM_NEON", "1");
265    // 64-bit NEON supports half, single and double precision operations.
266    Builder.defineMacro("__ARM_NEON_FP", "0xE");
267  }
268
269  if (FPU & SveMode)
270    Builder.defineMacro("__ARM_FEATURE_SVE", "1");
271
272  if (HasSVE2)
273    Builder.defineMacro("__ARM_FEATURE_SVE2", "1");
274
275  if (HasSVE2 && HasSVE2AES)
276    Builder.defineMacro("__ARM_FEATURE_SVE2_AES", "1");
277
278  if (HasSVE2 && HasSVE2BitPerm)
279    Builder.defineMacro("__ARM_FEATURE_SVE2_BITPERM", "1");
280
281  if (HasSVE2 && HasSVE2SHA3)
282    Builder.defineMacro("__ARM_FEATURE_SVE2_SHA3", "1");
283
284  if (HasSVE2 && HasSVE2SM4)
285    Builder.defineMacro("__ARM_FEATURE_SVE2_SM4", "1");
286
287  if (HasCRC)
288    Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
289
290  // The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained feature
291  // macros for AES, SHA2, SHA3 and SM4
292  if (HasAES && HasSHA2)
293    Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
294
295  if (HasAES)
296    Builder.defineMacro("__ARM_FEATURE_AES", "1");
297
298  if (HasSHA2)
299    Builder.defineMacro("__ARM_FEATURE_SHA2", "1");
300
301  if (HasSHA3) {
302    Builder.defineMacro("__ARM_FEATURE_SHA3", "1");
303    Builder.defineMacro("__ARM_FEATURE_SHA512", "1");
304  }
305
306  if (HasSM4) {
307    Builder.defineMacro("__ARM_FEATURE_SM3", "1");
308    Builder.defineMacro("__ARM_FEATURE_SM4", "1");
309  }
310
311  if (HasUnaligned)
312    Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
313
314  if ((FPU & NeonMode) && HasFullFP16)
315    Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
316  if (HasFullFP16)
317   Builder.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1");
318
319  if (HasDotProd)
320    Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
321
322  if (HasMTE)
323    Builder.defineMacro("__ARM_FEATURE_MEMORY_TAGGING", "1");
324
325  if (HasTME)
326    Builder.defineMacro("__ARM_FEATURE_TME", "1");
327
328  if (HasMatMul)
329    Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
330
331  if (HasLSE)
332    Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
333
334  if (HasBFloat16) {
335    Builder.defineMacro("__ARM_FEATURE_BF16", "1");
336    Builder.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
337    Builder.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
338    Builder.defineMacro("__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", "1");
339  }
340
341  if ((FPU & SveMode) && HasBFloat16) {
342    Builder.defineMacro("__ARM_FEATURE_SVE_BF16", "1");
343  }
344
345  if ((FPU & SveMode) && HasMatmulFP64)
346    Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP64", "1");
347
348  if ((FPU & SveMode) && HasMatmulFP32)
349    Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP32", "1");
350
351  if ((FPU & SveMode) && HasMatMul)
352    Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_INT8", "1");
353
354  if ((FPU & NeonMode) && HasFP16FML)
355    Builder.defineMacro("__ARM_FEATURE_FP16_FML", "1");
356
357  if (Opts.hasSignReturnAddress()) {
358    // Bitmask:
359    // 0: Protection using the A key
360    // 1: Protection using the B key
361    // 2: Protection including leaf functions
362    unsigned Value = 0;
363
364    if (Opts.isSignReturnAddressWithAKey())
365      Value |= (1 << 0);
366    else
367      Value |= (1 << 1);
368
369    if (Opts.isSignReturnAddressScopeAll())
370      Value |= (1 << 2);
371
372    Builder.defineMacro("__ARM_FEATURE_PAC_DEFAULT", std::to_string(Value));
373  }
374
375  if (Opts.BranchTargetEnforcement)
376    Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
377
378  if (HasLS64)
379    Builder.defineMacro("__ARM_FEATURE_LS64", "1");
380
381  if (HasRandGen)
382    Builder.defineMacro("__ARM_FEATURE_RNG", "1");
383
384  switch (ArchKind) {
385  default:
386    break;
387  case llvm::AArch64::ArchKind::ARMV8_1A:
388    getTargetDefinesARMV81A(Opts, Builder);
389    break;
390  case llvm::AArch64::ArchKind::ARMV8_2A:
391    getTargetDefinesARMV82A(Opts, Builder);
392    break;
393  case llvm::AArch64::ArchKind::ARMV8_3A:
394    getTargetDefinesARMV83A(Opts, Builder);
395    break;
396  case llvm::AArch64::ArchKind::ARMV8_4A:
397    getTargetDefinesARMV84A(Opts, Builder);
398    break;
399  case llvm::AArch64::ArchKind::ARMV8_5A:
400    getTargetDefinesARMV85A(Opts, Builder);
401    break;
402  case llvm::AArch64::ArchKind::ARMV8_6A:
403    getTargetDefinesARMV86A(Opts, Builder);
404    break;
405  case llvm::AArch64::ArchKind::ARMV8_7A:
406    getTargetDefinesARMV87A(Opts, Builder);
407    break;
408  }
409
410  // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work.
411  Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
412  Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
413  Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
414  Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
415
416  if (Opts.ArmSveVectorBits) {
417    Builder.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts.ArmSveVectorBits));
418    Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS");
419  }
420}
421
422ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
423  return llvm::makeArrayRef(BuiltinInfo, clang::AArch64::LastTSBuiltin -
424                                             Builtin::FirstTSBuiltin);
425}
426
427bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
428  return Feature == "aarch64" || Feature == "arm64" || Feature == "arm" ||
429         (Feature == "neon" && (FPU & NeonMode)) ||
430         ((Feature == "sve" || Feature == "sve2" || Feature == "sve2-bitperm" ||
431           Feature == "sve2-aes" || Feature == "sve2-sha3" ||
432           Feature == "sve2-sm4" || Feature == "f64mm" || Feature == "f32mm" ||
433           Feature == "i8mm" || Feature == "bf16") &&
434          (FPU & SveMode));
435}
436
437bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
438                                             DiagnosticsEngine &Diags) {
439  FPU = FPUMode;
440  HasCRC = false;
441  HasCrypto = false;
442  HasAES = false;
443  HasSHA2 = false;
444  HasSHA3 = false;
445  HasSM4 = false;
446  HasUnaligned = true;
447  HasFullFP16 = false;
448  HasDotProd = false;
449  HasFP16FML = false;
450  HasMTE = false;
451  HasTME = false;
452  HasLS64 = false;
453  HasRandGen = false;
454  HasMatMul = false;
455  HasBFloat16 = false;
456  HasSVE2 = false;
457  HasSVE2AES = false;
458  HasSVE2SHA3 = false;
459  HasSVE2SM4 = false;
460  HasSVE2BitPerm = false;
461  HasMatmulFP64 = false;
462  HasMatmulFP32 = false;
463  HasLSE = false;
464
465  ArchKind = llvm::AArch64::ArchKind::ARMV8A;
466
467  for (const auto &Feature : Features) {
468    if (Feature == "+neon")
469      FPU |= NeonMode;
470    if (Feature == "+sve") {
471      FPU |= SveMode;
472      HasFullFP16 = 1;
473    }
474    if (Feature == "+sve2") {
475      FPU |= SveMode;
476      HasFullFP16 = 1;
477      HasSVE2 = 1;
478    }
479    if (Feature == "+sve2-aes") {
480      FPU |= SveMode;
481      HasFullFP16 = 1;
482      HasSVE2 = 1;
483      HasSVE2AES = 1;
484    }
485    if (Feature == "+sve2-sha3") {
486      FPU |= SveMode;
487      HasFullFP16 = 1;
488      HasSVE2 = 1;
489      HasSVE2SHA3 = 1;
490    }
491    if (Feature == "+sve2-sm4") {
492      FPU |= SveMode;
493      HasFullFP16 = 1;
494      HasSVE2 = 1;
495      HasSVE2SM4 = 1;
496    }
497    if (Feature == "+sve2-bitperm") {
498      FPU |= SveMode;
499      HasFullFP16 = 1;
500      HasSVE2 = 1;
501      HasSVE2BitPerm = 1;
502    }
503    if (Feature == "+f32mm") {
504      FPU |= SveMode;
505      HasMatmulFP32 = true;
506    }
507    if (Feature == "+f64mm") {
508      FPU |= SveMode;
509      HasMatmulFP64 = true;
510    }
511    if (Feature == "+crc")
512      HasCRC = true;
513    if (Feature == "+crypto")
514      HasCrypto = true;
515    if (Feature == "+aes")
516      HasAES = true;
517    if (Feature == "+sha2")
518      HasSHA2 = true;
519    if (Feature == "+sha3") {
520      HasSHA2 = true;
521      HasSHA3 = true;
522    }
523    if (Feature == "+sm4")
524      HasSM4 = true;
525    if (Feature == "+strict-align")
526      HasUnaligned = false;
527    if (Feature == "+v8.1a")
528      ArchKind = llvm::AArch64::ArchKind::ARMV8_1A;
529    if (Feature == "+v8.2a")
530      ArchKind = llvm::AArch64::ArchKind::ARMV8_2A;
531    if (Feature == "+v8.3a")
532      ArchKind = llvm::AArch64::ArchKind::ARMV8_3A;
533    if (Feature == "+v8.4a")
534      ArchKind = llvm::AArch64::ArchKind::ARMV8_4A;
535    if (Feature == "+v8.5a")
536      ArchKind = llvm::AArch64::ArchKind::ARMV8_5A;
537    if (Feature == "+v8.6a")
538      ArchKind = llvm::AArch64::ArchKind::ARMV8_6A;
539    if (Feature == "+v8.7a")
540      ArchKind = llvm::AArch64::ArchKind::ARMV8_7A;
541    if (Feature == "+v8r")
542      ArchKind = llvm::AArch64::ArchKind::ARMV8R;
543    if (Feature == "+fullfp16")
544      HasFullFP16 = true;
545    if (Feature == "+dotprod")
546      HasDotProd = true;
547    if (Feature == "+fp16fml")
548      HasFP16FML = true;
549    if (Feature == "+mte")
550      HasMTE = true;
551    if (Feature == "+tme")
552      HasTME = true;
553    if (Feature == "+pauth")
554      HasPAuth = true;
555    if (Feature == "+i8mm")
556      HasMatMul = true;
557    if (Feature == "+bf16")
558      HasBFloat16 = true;
559    if (Feature == "+lse")
560      HasLSE = true;
561    if (Feature == "+ls64")
562      HasLS64 = true;
563    if (Feature == "+rand")
564      HasRandGen = true;
565    if (Feature == "+flagm")
566      HasFlagM = true;
567  }
568
569  setDataLayout();
570
571  return true;
572}
573
574TargetInfo::CallingConvCheckResult
575AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
576  switch (CC) {
577  case CC_C:
578  case CC_Swift:
579  case CC_PreserveMost:
580  case CC_PreserveAll:
581  case CC_OpenCLKernel:
582  case CC_AArch64VectorCall:
583  case CC_Win64:
584    return CCCR_OK;
585  default:
586    return CCCR_Warning;
587  }
588}
589
590bool AArch64TargetInfo::isCLZForZeroUndef() const { return false; }
591
592TargetInfo::BuiltinVaListKind AArch64TargetInfo::getBuiltinVaListKind() const {
593  return TargetInfo::AArch64ABIBuiltinVaList;
594}
595
596const char *const AArch64TargetInfo::GCCRegNames[] = {
597    // 32-bit Integer registers
598    "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
599    "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
600    "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
601
602    // 64-bit Integer registers
603    "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
604    "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22",
605    "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
606
607    // 32-bit floating point regsisters
608    "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
609    "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
610    "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
611
612    // 64-bit floating point regsisters
613    "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
614    "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
615    "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
616
617    // Neon vector registers
618    "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
619    "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
620    "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
621
622    // SVE vector registers
623    "z0",  "z1",  "z2",  "z3",  "z4",  "z5",  "z6",  "z7",  "z8",  "z9",  "z10",
624    "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
625    "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
626
627    // SVE predicate registers
628    "p0",  "p1",  "p2",  "p3",  "p4",  "p5",  "p6",  "p7",  "p8",  "p9",  "p10",
629    "p11", "p12", "p13", "p14", "p15"
630};
631
632ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
633  return llvm::makeArrayRef(GCCRegNames);
634}
635
636const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
637    {{"w31"}, "wsp"},
638    {{"x31"}, "sp"},
639    // GCC rN registers are aliases of xN registers.
640    {{"r0"}, "x0"},
641    {{"r1"}, "x1"},
642    {{"r2"}, "x2"},
643    {{"r3"}, "x3"},
644    {{"r4"}, "x4"},
645    {{"r5"}, "x5"},
646    {{"r6"}, "x6"},
647    {{"r7"}, "x7"},
648    {{"r8"}, "x8"},
649    {{"r9"}, "x9"},
650    {{"r10"}, "x10"},
651    {{"r11"}, "x11"},
652    {{"r12"}, "x12"},
653    {{"r13"}, "x13"},
654    {{"r14"}, "x14"},
655    {{"r15"}, "x15"},
656    {{"r16"}, "x16"},
657    {{"r17"}, "x17"},
658    {{"r18"}, "x18"},
659    {{"r19"}, "x19"},
660    {{"r20"}, "x20"},
661    {{"r21"}, "x21"},
662    {{"r22"}, "x22"},
663    {{"r23"}, "x23"},
664    {{"r24"}, "x24"},
665    {{"r25"}, "x25"},
666    {{"r26"}, "x26"},
667    {{"r27"}, "x27"},
668    {{"r28"}, "x28"},
669    {{"r29", "x29"}, "fp"},
670    {{"r30", "x30"}, "lr"},
671    // The S/D/Q and W/X registers overlap, but aren't really aliases; we
672    // don't want to substitute one of these for a different-sized one.
673};
674
675ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
676  return llvm::makeArrayRef(GCCRegAliases);
677}
678
679bool AArch64TargetInfo::validateAsmConstraint(
680    const char *&Name, TargetInfo::ConstraintInfo &Info) const {
681  switch (*Name) {
682  default:
683    return false;
684  case 'w': // Floating point and SIMD registers (V0-V31)
685    Info.setAllowsRegister();
686    return true;
687  case 'I': // Constant that can be used with an ADD instruction
688  case 'J': // Constant that can be used with a SUB instruction
689  case 'K': // Constant that can be used with a 32-bit logical instruction
690  case 'L': // Constant that can be used with a 64-bit logical instruction
691  case 'M': // Constant that can be used as a 32-bit MOV immediate
692  case 'N': // Constant that can be used as a 64-bit MOV immediate
693  case 'Y': // Floating point constant zero
694  case 'Z': // Integer constant zero
695    return true;
696  case 'Q': // A memory reference with base register and no offset
697    Info.setAllowsMemory();
698    return true;
699  case 'S': // A symbolic address
700    Info.setAllowsRegister();
701    return true;
702  case 'U':
703    if (Name[1] == 'p' && (Name[2] == 'l' || Name[2] == 'a')) {
704      // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7)
705      Info.setAllowsRegister();
706      Name += 2;
707      return true;
708    }
709    // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
710    // Utf: A memory address suitable for ldp/stp in TF mode.
711    // Usa: An absolute symbolic address.
712    // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
713
714    // Better to return an error saying that it's an unrecognised constraint
715    // even if this is a valid constraint in gcc.
716    return false;
717  case 'z': // Zero register, wzr or xzr
718    Info.setAllowsRegister();
719    return true;
720  case 'x': // Floating point and SIMD registers (V0-V15)
721    Info.setAllowsRegister();
722    return true;
723  case 'y': // SVE registers (V0-V7)
724    Info.setAllowsRegister();
725    return true;
726  }
727  return false;
728}
729
730bool AArch64TargetInfo::validateConstraintModifier(
731    StringRef Constraint, char Modifier, unsigned Size,
732    std::string &SuggestedModifier) const {
733  // Strip off constraint modifiers.
734  while (Constraint[0] == '=' || Constraint[0] == '+' || Constraint[0] == '&')
735    Constraint = Constraint.substr(1);
736
737  switch (Constraint[0]) {
738  default:
739    return true;
740  case 'z':
741  case 'r': {
742    switch (Modifier) {
743    case 'x':
744    case 'w':
745      // For now assume that the person knows what they're
746      // doing with the modifier.
747      return true;
748    default:
749      // By default an 'r' constraint will be in the 'x'
750      // registers.
751      if (Size == 64)
752        return true;
753
754      SuggestedModifier = "w";
755      return false;
756    }
757  }
758  }
759}
760
761const char *AArch64TargetInfo::getClobbers() const { return ""; }
762
763int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
764  if (RegNo == 0)
765    return 0;
766  if (RegNo == 1)
767    return 1;
768  return -1;
769}
770
771bool AArch64TargetInfo::hasInt128Type() const { return true; }
772
773AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple,
774                                         const TargetOptions &Opts)
775    : AArch64TargetInfo(Triple, Opts) {}
776
777void AArch64leTargetInfo::setDataLayout() {
778  if (getTriple().isOSBinFormatMachO()) {
779    if(getTriple().isArch32Bit())
780      resetDataLayout("e-m:o-p:32:32-i64:64-i128:128-n32:64-S128", "_");
781    else
782      resetDataLayout("e-m:o-i64:64-i128:128-n32:64-S128", "_");
783  } else
784    resetDataLayout("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
785}
786
787void AArch64leTargetInfo::getTargetDefines(const LangOptions &Opts,
788                                           MacroBuilder &Builder) const {
789  Builder.defineMacro("__AARCH64EL__");
790  AArch64TargetInfo::getTargetDefines(Opts, Builder);
791}
792
793AArch64beTargetInfo::AArch64beTargetInfo(const llvm::Triple &Triple,
794                                         const TargetOptions &Opts)
795    : AArch64TargetInfo(Triple, Opts) {}
796
797void AArch64beTargetInfo::getTargetDefines(const LangOptions &Opts,
798                                           MacroBuilder &Builder) const {
799  Builder.defineMacro("__AARCH64EB__");
800  Builder.defineMacro("__AARCH_BIG_ENDIAN");
801  Builder.defineMacro("__ARM_BIG_ENDIAN");
802  AArch64TargetInfo::getTargetDefines(Opts, Builder);
803}
804
805void AArch64beTargetInfo::setDataLayout() {
806  assert(!getTriple().isOSBinFormatMachO());
807  resetDataLayout("E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
808}
809
810WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
811                                               const TargetOptions &Opts)
812    : WindowsTargetInfo<AArch64leTargetInfo>(Triple, Opts), Triple(Triple) {
813
814  // This is an LLP64 platform.
815  // int:4, long:4, long long:8, long double:8.
816  IntWidth = IntAlign = 32;
817  LongWidth = LongAlign = 32;
818  DoubleAlign = LongLongAlign = 64;
819  LongDoubleWidth = LongDoubleAlign = 64;
820  LongDoubleFormat = &llvm::APFloat::IEEEdouble();
821  IntMaxType = SignedLongLong;
822  Int64Type = SignedLongLong;
823  SizeType = UnsignedLongLong;
824  PtrDiffType = SignedLongLong;
825  IntPtrType = SignedLongLong;
826}
827
828void WindowsARM64TargetInfo::setDataLayout() {
829  resetDataLayout(Triple.isOSBinFormatMachO()
830                      ? "e-m:o-i64:64-i128:128-n32:64-S128"
831                      : "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128",
832                  Triple.isOSBinFormatMachO() ? "_" : "");
833}
834
835TargetInfo::BuiltinVaListKind
836WindowsARM64TargetInfo::getBuiltinVaListKind() const {
837  return TargetInfo::CharPtrBuiltinVaList;
838}
839
840TargetInfo::CallingConvCheckResult
841WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
842  switch (CC) {
843  case CC_X86StdCall:
844  case CC_X86ThisCall:
845  case CC_X86FastCall:
846  case CC_X86VectorCall:
847    return CCCR_Ignore;
848  case CC_C:
849  case CC_OpenCLKernel:
850  case CC_PreserveMost:
851  case CC_PreserveAll:
852  case CC_Swift:
853  case CC_Win64:
854    return CCCR_OK;
855  default:
856    return CCCR_Warning;
857  }
858}
859
860MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
861                                                   const TargetOptions &Opts)
862    : WindowsARM64TargetInfo(Triple, Opts) {
863  TheCXXABI.set(TargetCXXABI::Microsoft);
864}
865
866void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
867                                                MacroBuilder &Builder) const {
868  WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
869  Builder.defineMacro("_M_ARM64", "1");
870}
871
872TargetInfo::CallingConvKind
873MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
874  return CCK_MicrosoftWin64;
875}
876
877unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize) const {
878  unsigned Align = WindowsARM64TargetInfo::getMinGlobalAlign(TypeSize);
879
880  // MSVC does size based alignment for arm64 based on alignment section in
881  // below document, replicate that to keep alignment consistent with object
882  // files compiled by MSVC.
883  // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
884  if (TypeSize >= 512) {              // TypeSize >= 64 bytes
885    Align = std::max(Align, 128u);    // align type at least 16 bytes
886  } else if (TypeSize >= 64) {        // TypeSize >= 8 bytes
887    Align = std::max(Align, 64u);     // align type at least 8 butes
888  } else if (TypeSize >= 16) {        // TypeSize >= 2 bytes
889    Align = std::max(Align, 32u);     // align type at least 4 bytes
890  }
891  return Align;
892}
893
894MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple &Triple,
895                                           const TargetOptions &Opts)
896    : WindowsARM64TargetInfo(Triple, Opts) {
897  TheCXXABI.set(TargetCXXABI::GenericAArch64);
898}
899
900DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple &Triple,
901                                                 const TargetOptions &Opts)
902    : DarwinTargetInfo<AArch64leTargetInfo>(Triple, Opts) {
903  Int64Type = SignedLongLong;
904  if (getTriple().isArch32Bit())
905    IntMaxType = SignedLongLong;
906
907  WCharType = SignedInt;
908  UseSignedCharForObjCBool = false;
909
910  LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
911  LongDoubleFormat = &llvm::APFloat::IEEEdouble();
912
913  UseZeroLengthBitfieldAlignment = false;
914
915  if (getTriple().isArch32Bit()) {
916    UseBitFieldTypeAlignment = false;
917    ZeroLengthBitfieldBoundary = 32;
918    UseZeroLengthBitfieldAlignment = true;
919    TheCXXABI.set(TargetCXXABI::WatchOS);
920  } else
921    TheCXXABI.set(TargetCXXABI::AppleARM64);
922}
923
924void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
925                                           const llvm::Triple &Triple,
926                                           MacroBuilder &Builder) const {
927  Builder.defineMacro("__AARCH64_SIMD__");
928  if (Triple.isArch32Bit())
929    Builder.defineMacro("__ARM64_ARCH_8_32__");
930  else
931    Builder.defineMacro("__ARM64_ARCH_8__");
932  Builder.defineMacro("__ARM_NEON__");
933  Builder.defineMacro("__LITTLE_ENDIAN__");
934  Builder.defineMacro("__REGISTER_PREFIX__", "");
935  Builder.defineMacro("__arm64", "1");
936  Builder.defineMacro("__arm64__", "1");
937
938  if (Triple.isArm64e())
939    Builder.defineMacro("__arm64e__", "1");
940
941  getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion);
942}
943
944TargetInfo::BuiltinVaListKind
945DarwinAArch64TargetInfo::getBuiltinVaListKind() const {
946  return TargetInfo::CharPtrBuiltinVaList;
947}
948
949// 64-bit RenderScript is aarch64
950RenderScript64TargetInfo::RenderScript64TargetInfo(const llvm::Triple &Triple,
951                                                   const TargetOptions &Opts)
952    : AArch64leTargetInfo(llvm::Triple("aarch64", Triple.getVendorName(),
953                                       Triple.getOSName(),
954                                       Triple.getEnvironmentName()),
955                          Opts) {
956  IsRenderScriptTarget = true;
957}
958
959void RenderScript64TargetInfo::getTargetDefines(const LangOptions &Opts,
960                                                MacroBuilder &Builder) const {
961  Builder.defineMacro("__RENDERSCRIPT__");
962  AArch64leTargetInfo::getTargetDefines(Opts, Builder);
963}
964