1//===--- NVPTX.cpp - Implement NVPTX target feature support ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements NVPTX TargetInfo objects.
10//
11//===----------------------------------------------------------------------===//
12
13#include "NVPTX.h"
14#include "Targets.h"
15#include "clang/Basic/Builtins.h"
16#include "clang/Basic/MacroBuilder.h"
17#include "clang/Basic/TargetBuiltins.h"
18#include "llvm/ADT/StringSwitch.h"
19
20using namespace clang;
21using namespace clang::targets;
22
23const Builtin::Info NVPTXTargetInfo::BuiltinInfo[] = {
24#define BUILTIN(ID, TYPE, ATTRS)                                               \
25  {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
26#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER)                                    \
27  {#ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr},
28#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE)                               \
29  {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, FEATURE},
30#include "clang/Basic/BuiltinsNVPTX.def"
31};
32
33const char *const NVPTXTargetInfo::GCCRegNames[] = {"r0"};
34
35NVPTXTargetInfo::NVPTXTargetInfo(const llvm::Triple &Triple,
36                                 const TargetOptions &Opts,
37                                 unsigned TargetPointerWidth)
38    : TargetInfo(Triple) {
39  assert((TargetPointerWidth == 32 || TargetPointerWidth == 64) &&
40         "NVPTX only supports 32- and 64-bit modes.");
41
42  PTXVersion = 32;
43  for (const StringRef Feature : Opts.FeaturesAsWritten) {
44    if (!Feature.startswith("+ptx"))
45      continue;
46    PTXVersion = llvm::StringSwitch<unsigned>(Feature)
47                     .Case("+ptx64", 64)
48                     .Case("+ptx63", 63)
49                     .Case("+ptx61", 61)
50                     .Case("+ptx60", 60)
51                     .Case("+ptx50", 50)
52                     .Case("+ptx43", 43)
53                     .Case("+ptx42", 42)
54                     .Case("+ptx41", 41)
55                     .Case("+ptx40", 40)
56                     .Case("+ptx32", 32)
57                     .Default(32);
58  }
59
60  TLSSupported = false;
61  VLASupported = false;
62  AddrSpaceMap = &NVPTXAddrSpaceMap;
63  UseAddrSpaceMapMangling = true;
64
65  // Define available target features
66  // These must be defined in sorted order!
67  NoAsmVariants = true;
68  GPU = CudaArch::SM_20;
69
70  if (TargetPointerWidth == 32)
71    resetDataLayout("e-p:32:32-i64:64-i128:128-v16:16-v32:32-n16:32:64");
72  else if (Opts.NVPTXUseShortPointers)
73    resetDataLayout(
74        "e-p3:32:32-p4:32:32-p5:32:32-i64:64-i128:128-v16:16-v32:32-n16:32:64");
75  else
76    resetDataLayout("e-i64:64-i128:128-v16:16-v32:32-n16:32:64");
77
78  // If possible, get a TargetInfo for our host triple, so we can match its
79  // types.
80  llvm::Triple HostTriple(Opts.HostTriple);
81  if (!HostTriple.isNVPTX())
82    HostTarget.reset(AllocateTarget(llvm::Triple(Opts.HostTriple), Opts));
83
84  // If no host target, make some guesses about the data layout and return.
85  if (!HostTarget) {
86    LongWidth = LongAlign = TargetPointerWidth;
87    PointerWidth = PointerAlign = TargetPointerWidth;
88    switch (TargetPointerWidth) {
89    case 32:
90      SizeType = TargetInfo::UnsignedInt;
91      PtrDiffType = TargetInfo::SignedInt;
92      IntPtrType = TargetInfo::SignedInt;
93      break;
94    case 64:
95      SizeType = TargetInfo::UnsignedLong;
96      PtrDiffType = TargetInfo::SignedLong;
97      IntPtrType = TargetInfo::SignedLong;
98      break;
99    default:
100      llvm_unreachable("TargetPointerWidth must be 32 or 64");
101    }
102    return;
103  }
104
105  // Copy properties from host target.
106  PointerWidth = HostTarget->getPointerWidth(/* AddrSpace = */ 0);
107  PointerAlign = HostTarget->getPointerAlign(/* AddrSpace = */ 0);
108  BoolWidth = HostTarget->getBoolWidth();
109  BoolAlign = HostTarget->getBoolAlign();
110  IntWidth = HostTarget->getIntWidth();
111  IntAlign = HostTarget->getIntAlign();
112  HalfWidth = HostTarget->getHalfWidth();
113  HalfAlign = HostTarget->getHalfAlign();
114  FloatWidth = HostTarget->getFloatWidth();
115  FloatAlign = HostTarget->getFloatAlign();
116  DoubleWidth = HostTarget->getDoubleWidth();
117  DoubleAlign = HostTarget->getDoubleAlign();
118  LongWidth = HostTarget->getLongWidth();
119  LongAlign = HostTarget->getLongAlign();
120  LongLongWidth = HostTarget->getLongLongWidth();
121  LongLongAlign = HostTarget->getLongLongAlign();
122  MinGlobalAlign = HostTarget->getMinGlobalAlign(/* TypeSize = */ 0);
123  NewAlign = HostTarget->getNewAlign();
124  DefaultAlignForAttributeAligned =
125      HostTarget->getDefaultAlignForAttributeAligned();
126  SizeType = HostTarget->getSizeType();
127  IntMaxType = HostTarget->getIntMaxType();
128  PtrDiffType = HostTarget->getPtrDiffType(/* AddrSpace = */ 0);
129  IntPtrType = HostTarget->getIntPtrType();
130  WCharType = HostTarget->getWCharType();
131  WIntType = HostTarget->getWIntType();
132  Char16Type = HostTarget->getChar16Type();
133  Char32Type = HostTarget->getChar32Type();
134  Int64Type = HostTarget->getInt64Type();
135  SigAtomicType = HostTarget->getSigAtomicType();
136  ProcessIDType = HostTarget->getProcessIDType();
137
138  UseBitFieldTypeAlignment = HostTarget->useBitFieldTypeAlignment();
139  UseZeroLengthBitfieldAlignment = HostTarget->useZeroLengthBitfieldAlignment();
140  UseExplicitBitFieldAlignment = HostTarget->useExplicitBitFieldAlignment();
141  ZeroLengthBitfieldBoundary = HostTarget->getZeroLengthBitfieldBoundary();
142
143  // This is a bit of a lie, but it controls __GCC_ATOMIC_XXX_LOCK_FREE, and
144  // we need those macros to be identical on host and device, because (among
145  // other things) they affect which standard library classes are defined, and
146  // we need all classes to be defined on both the host and device.
147  MaxAtomicInlineWidth = HostTarget->getMaxAtomicInlineWidth();
148
149  // Properties intentionally not copied from host:
150  // - LargeArrayMinWidth, LargeArrayAlign: Not visible across the
151  //   host/device boundary.
152  // - SuitableAlign: Not visible across the host/device boundary, and may
153  //   correctly be different on host/device, e.g. if host has wider vector
154  //   types than device.
155  // - LongDoubleWidth, LongDoubleAlign: nvptx's long double type is the same
156  //   as its double type, but that's not necessarily true on the host.
157  //   TODO: nvcc emits a warning when using long double on device; we should
158  //   do the same.
159}
160
161ArrayRef<const char *> NVPTXTargetInfo::getGCCRegNames() const {
162  return llvm::makeArrayRef(GCCRegNames);
163}
164
165bool NVPTXTargetInfo::hasFeature(StringRef Feature) const {
166  return llvm::StringSwitch<bool>(Feature)
167      .Cases("ptx", "nvptx", true)
168      .Default(false);
169}
170
171void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts,
172                                       MacroBuilder &Builder) const {
173  Builder.defineMacro("__PTX__");
174  Builder.defineMacro("__NVPTX__");
175  if (Opts.CUDAIsDevice) {
176    // Set __CUDA_ARCH__ for the GPU specified.
177    std::string CUDAArchCode = [this] {
178      switch (GPU) {
179      case CudaArch::GFX600:
180      case CudaArch::GFX601:
181      case CudaArch::GFX700:
182      case CudaArch::GFX701:
183      case CudaArch::GFX702:
184      case CudaArch::GFX703:
185      case CudaArch::GFX704:
186      case CudaArch::GFX801:
187      case CudaArch::GFX802:
188      case CudaArch::GFX803:
189      case CudaArch::GFX810:
190      case CudaArch::GFX900:
191      case CudaArch::GFX902:
192      case CudaArch::GFX904:
193      case CudaArch::GFX906:
194      case CudaArch::GFX908:
195      case CudaArch::GFX909:
196      case CudaArch::GFX1010:
197      case CudaArch::GFX1011:
198      case CudaArch::GFX1012:
199      case CudaArch::LAST:
200        break;
201      case CudaArch::UNKNOWN:
202        assert(false && "No GPU arch when compiling CUDA device code.");
203        return "";
204      case CudaArch::SM_20:
205        return "200";
206      case CudaArch::SM_21:
207        return "210";
208      case CudaArch::SM_30:
209        return "300";
210      case CudaArch::SM_32:
211        return "320";
212      case CudaArch::SM_35:
213        return "350";
214      case CudaArch::SM_37:
215        return "370";
216      case CudaArch::SM_50:
217        return "500";
218      case CudaArch::SM_52:
219        return "520";
220      case CudaArch::SM_53:
221        return "530";
222      case CudaArch::SM_60:
223        return "600";
224      case CudaArch::SM_61:
225        return "610";
226      case CudaArch::SM_62:
227        return "620";
228      case CudaArch::SM_70:
229        return "700";
230      case CudaArch::SM_72:
231        return "720";
232      case CudaArch::SM_75:
233        return "750";
234      }
235      llvm_unreachable("unhandled CudaArch");
236    }();
237    Builder.defineMacro("__CUDA_ARCH__", CUDAArchCode);
238  }
239}
240
241ArrayRef<Builtin::Info> NVPTXTargetInfo::getTargetBuiltins() const {
242  return llvm::makeArrayRef(BuiltinInfo, clang::NVPTX::LastTSBuiltin -
243                                             Builtin::FirstTSBuiltin);
244}
245