1/*===---- __clang_cuda_runtime_wrapper.h - CUDA runtime support -------------===
2 *
3 * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 * See https://llvm.org/LICENSE.txt for license information.
5 * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 *
7 *===-----------------------------------------------------------------------===
8 */
9
10/*
11 * WARNING: This header is intended to be directly -include'd by
12 * the compiler and is not supposed to be included by users.
13 *
14 * CUDA headers are implemented in a way that currently makes it
15 * impossible for user code to #include directly when compiling with
16 * Clang. They present different view of CUDA-supplied functions
17 * depending on where in NVCC's compilation pipeline the headers are
18 * included. Neither of these modes provides function definitions with
19 * correct attributes, so we use preprocessor to force the headers
20 * into a form that Clang can use.
21 *
22 * Similarly to NVCC which -include's cuda_runtime.h, Clang -include's
23 * this file during every CUDA compilation.
24 */
25
26#ifndef __CLANG_CUDA_RUNTIME_WRAPPER_H__
27#define __CLANG_CUDA_RUNTIME_WRAPPER_H__
28
29#if defined(__CUDA__) && defined(__clang__)
30
31// Include some forward declares that must come before cmath.
32#include <__clang_cuda_math_forward_declares.h>
33
34// Include some standard headers to avoid CUDA headers including them
35// while some required macros (like __THROW) are in a weird state.
36#include <cmath>
37#include <cstdlib>
38#include <stdlib.h>
39
40// Preserve common macros that will be changed below by us or by CUDA
41// headers.
42#pragma push_macro("__THROW")
43#pragma push_macro("__CUDA_ARCH__")
44
45// WARNING: Preprocessor hacks below are based on specific details of
46// CUDA-7.x headers and are not expected to work with any other
47// version of CUDA headers.
48#include "cuda.h"
49#if !defined(CUDA_VERSION)
50#error "cuda.h did not define CUDA_VERSION"
51#elif CUDA_VERSION < 7000
52#error "Unsupported CUDA version!"
53#endif
54
55#pragma push_macro("__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__")
56#if CUDA_VERSION >= 10000
57#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
58#endif
59
60// Make largest subset of device functions available during host
61// compilation -- SM_35 for the time being.
62#ifndef __CUDA_ARCH__
63#define __CUDA_ARCH__ 350
64#endif
65
66#include "__clang_cuda_builtin_vars.h"
67
68// No need for device_launch_parameters.h as __clang_cuda_builtin_vars.h above
69// has taken care of builtin variables declared in the file.
70#define __DEVICE_LAUNCH_PARAMETERS_H__
71
72// {math,device}_functions.h only have declarations of the
73// functions. We don't need them as we're going to pull in their
74// definitions from .hpp files.
75#define __DEVICE_FUNCTIONS_H__
76#define __MATH_FUNCTIONS_H__
77#define __COMMON_FUNCTIONS_H__
78// device_functions_decls is replaced by __clang_cuda_device_functions.h
79// included below.
80#define __DEVICE_FUNCTIONS_DECLS_H__
81
82#undef __CUDACC__
83#if CUDA_VERSION < 9000
84#define __CUDABE__
85#else
86#define __CUDA_LIBDEVICE__
87#endif
88// Disables definitions of device-side runtime support stubs in
89// cuda_device_runtime_api.h
90#include "driver_types.h"
91#include "host_config.h"
92#include "host_defines.h"
93
94// Temporarily replace "nv_weak" with weak, so __attribute__((nv_weak)) in
95// cuda_device_runtime_api.h ends up being __attribute__((weak)) which is the
96// functional equivalent of what we need.
97#pragma push_macro("nv_weak")
98#define nv_weak weak
99#undef __CUDABE__
100#undef __CUDA_LIBDEVICE__
101#define __CUDACC__
102#include "cuda_runtime.h"
103
104#pragma pop_macro("nv_weak")
105#undef __CUDACC__
106#define __CUDABE__
107
108// CUDA headers use __nvvm_memcpy and __nvvm_memset which Clang does
109// not have at the moment. Emulate them with a builtin memcpy/memset.
110#define __nvvm_memcpy(s, d, n, a) __builtin_memcpy(s, d, n)
111#define __nvvm_memset(d, c, n, a) __builtin_memset(d, c, n)
112
113#if CUDA_VERSION < 9000
114#include "crt/device_runtime.h"
115#endif
116#include "crt/host_runtime.h"
117// device_runtime.h defines __cxa_* macros that will conflict with
118// cxxabi.h.
119// FIXME: redefine these as __device__ functions.
120#undef __cxa_vec_ctor
121#undef __cxa_vec_cctor
122#undef __cxa_vec_dtor
123#undef __cxa_vec_new
124#undef __cxa_vec_new2
125#undef __cxa_vec_new3
126#undef __cxa_vec_delete2
127#undef __cxa_vec_delete
128#undef __cxa_vec_delete3
129#undef __cxa_pure_virtual
130
131// math_functions.hpp expects this host function be defined on MacOS, but it
132// ends up not being there because of the games we play here.  Just define it
133// ourselves; it's simple enough.
134#ifdef __APPLE__
135inline __host__ double __signbitd(double x) {
136  return std::signbit(x);
137}
138#endif
139
140// CUDA 9.1 no longer provides declarations for libdevice functions, so we need
141// to provide our own.
142#include <__clang_cuda_libdevice_declares.h>
143
144// Wrappers for many device-side standard library functions became compiler
145// builtins in CUDA-9 and have been removed from the CUDA headers. Clang now
146// provides its own implementation of the wrappers.
147#if CUDA_VERSION >= 9000
148#include <__clang_cuda_device_functions.h>
149#endif
150
151// __THROW is redefined to be empty by device_functions_decls.h in CUDA. Clang's
152// counterpart does not do it, so we need to make it empty here to keep
153// following CUDA includes happy.
154#undef __THROW
155#define __THROW
156
157// CUDA 8.0.41 relies on __USE_FAST_MATH__ and __CUDA_PREC_DIV's values.
158// Previous versions used to check whether they are defined or not.
159// CU_DEVICE_INVALID macro is only defined in 8.0.41, so we use it
160// here to detect the switch.
161
162#if defined(CU_DEVICE_INVALID)
163#if !defined(__USE_FAST_MATH__)
164#define __USE_FAST_MATH__ 0
165#endif
166
167#if !defined(__CUDA_PREC_DIV)
168#define __CUDA_PREC_DIV 0
169#endif
170#endif
171
172// Temporarily poison __host__ macro to ensure it's not used by any of
173// the headers we're about to include.
174#pragma push_macro("__host__")
175#define __host__ UNEXPECTED_HOST_ATTRIBUTE
176
177// device_functions.hpp and math_functions*.hpp use 'static
178// __forceinline__' (with no __device__) for definitions of device
179// functions. Temporarily redefine __forceinline__ to include
180// __device__.
181#pragma push_macro("__forceinline__")
182#define __forceinline__ __device__ __inline__ __attribute__((always_inline))
183#if CUDA_VERSION < 9000
184#include "device_functions.hpp"
185#endif
186
187// math_function.hpp uses the __USE_FAST_MATH__ macro to determine whether we
188// get the slow-but-accurate or fast-but-inaccurate versions of functions like
189// sin and exp.  This is controlled in clang by -fcuda-approx-transcendentals.
190//
191// device_functions.hpp uses __USE_FAST_MATH__ for a different purpose (fast vs.
192// slow divides), so we need to scope our define carefully here.
193#pragma push_macro("__USE_FAST_MATH__")
194#if defined(__CLANG_CUDA_APPROX_TRANSCENDENTALS__)
195#define __USE_FAST_MATH__ 1
196#endif
197
198#if CUDA_VERSION >= 9000
199// CUDA-9.2 needs host-side memcpy for some host functions in
200// device_functions.hpp
201#if CUDA_VERSION >= 9020
202#include <string.h>
203#endif
204#include "crt/math_functions.hpp"
205#else
206#include "math_functions.hpp"
207#endif
208
209#pragma pop_macro("__USE_FAST_MATH__")
210
211#if CUDA_VERSION < 9000
212#include "math_functions_dbl_ptx3.hpp"
213#endif
214#pragma pop_macro("__forceinline__")
215
216// Pull in host-only functions that are only available when neither
217// __CUDACC__ nor __CUDABE__ are defined.
218#undef __MATH_FUNCTIONS_HPP__
219#undef __CUDABE__
220#if CUDA_VERSION < 9000
221#include "math_functions.hpp"
222#endif
223// Alas, additional overloads for these functions are hard to get to.
224// Considering that we only need these overloads for a few functions,
225// we can provide them here.
226static inline float rsqrt(float __a) { return rsqrtf(__a); }
227static inline float rcbrt(float __a) { return rcbrtf(__a); }
228static inline float sinpi(float __a) { return sinpif(__a); }
229static inline float cospi(float __a) { return cospif(__a); }
230static inline void sincospi(float __a, float *__b, float *__c) {
231  return sincospif(__a, __b, __c);
232}
233static inline float erfcinv(float __a) { return erfcinvf(__a); }
234static inline float normcdfinv(float __a) { return normcdfinvf(__a); }
235static inline float normcdf(float __a) { return normcdff(__a); }
236static inline float erfcx(float __a) { return erfcxf(__a); }
237
238#if CUDA_VERSION < 9000
239// For some reason single-argument variant is not always declared by
240// CUDA headers. Alas, device_functions.hpp included below needs it.
241static inline __device__ void __brkpt(int __c) { __brkpt(); }
242#endif
243
244// Now include *.hpp with definitions of various GPU functions.  Alas,
245// a lot of thins get declared/defined with __host__ attribute which
246// we don't want and we have to define it out. We also have to include
247// {device,math}_functions.hpp again in order to extract the other
248// branch of #if/else inside.
249#define __host__
250#undef __CUDABE__
251#define __CUDACC__
252#if CUDA_VERSION >= 9000
253// Some atomic functions became compiler builtins in CUDA-9 , so we need their
254// declarations.
255#include "device_atomic_functions.h"
256#endif
257#undef __DEVICE_FUNCTIONS_HPP__
258#include "device_atomic_functions.hpp"
259#if CUDA_VERSION >= 9000
260#include "crt/device_functions.hpp"
261#include "crt/device_double_functions.hpp"
262#else
263#include "device_functions.hpp"
264#define __CUDABE__
265#include "device_double_functions.h"
266#undef __CUDABE__
267#endif
268#include "sm_20_atomic_functions.hpp"
269#include "sm_20_intrinsics.hpp"
270#include "sm_32_atomic_functions.hpp"
271
272// Don't include sm_30_intrinsics.h and sm_32_intrinsics.h.  These define the
273// __shfl and __ldg intrinsics using inline (volatile) asm, but we want to
274// define them using builtins so that the optimizer can reason about and across
275// these instructions.  In particular, using intrinsics for ldg gets us the
276// [addr+imm] addressing mode, which, although it doesn't actually exist in the
277// hardware, seems to generate faster machine code because ptxas can more easily
278// reason about our code.
279
280#if CUDA_VERSION >= 8000
281#pragma push_macro("__CUDA_ARCH__")
282#undef __CUDA_ARCH__
283#include "sm_60_atomic_functions.hpp"
284#include "sm_61_intrinsics.hpp"
285#pragma pop_macro("__CUDA_ARCH__")
286#endif
287
288#undef __MATH_FUNCTIONS_HPP__
289
290// math_functions.hpp defines ::signbit as a __host__ __device__ function.  This
291// conflicts with libstdc++'s constexpr ::signbit, so we have to rename
292// math_function.hpp's ::signbit.  It's guarded by #undef signbit, but that's
293// conditional on __GNUC__.  :)
294#pragma push_macro("signbit")
295#pragma push_macro("__GNUC__")
296#undef __GNUC__
297#define signbit __ignored_cuda_signbit
298
299// CUDA-9 omits device-side definitions of some math functions if it sees
300// include guard from math.h wrapper from libstdc++. We have to undo the header
301// guard temporarily to get the definitions we need.
302#pragma push_macro("_GLIBCXX_MATH_H")
303#pragma push_macro("_LIBCPP_VERSION")
304#if CUDA_VERSION >= 9000
305#undef _GLIBCXX_MATH_H
306// We also need to undo another guard that checks for libc++ 3.8+
307#ifdef _LIBCPP_VERSION
308#define _LIBCPP_VERSION 3700
309#endif
310#endif
311
312#if CUDA_VERSION >= 9000
313#include "crt/math_functions.hpp"
314#else
315#include "math_functions.hpp"
316#endif
317#pragma pop_macro("_GLIBCXX_MATH_H")
318#pragma pop_macro("_LIBCPP_VERSION")
319#pragma pop_macro("__GNUC__")
320#pragma pop_macro("signbit")
321
322#pragma pop_macro("__host__")
323
324#include "texture_indirect_functions.h"
325
326// Restore state of __CUDA_ARCH__ and __THROW we had on entry.
327#pragma pop_macro("__CUDA_ARCH__")
328#pragma pop_macro("__THROW")
329
330// Set up compiler macros expected to be seen during compilation.
331#undef __CUDABE__
332#define __CUDACC__
333
334extern "C" {
335// Device-side CUDA system calls.
336// http://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability/index.html#system-calls
337// We need these declarations and wrappers for device-side
338// malloc/free/printf calls to work without relying on
339// -fcuda-disable-target-call-checks option.
340__device__ int vprintf(const char *, const char *);
341__device__ void free(void *) __attribute((nothrow));
342__device__ void *malloc(size_t) __attribute((nothrow)) __attribute__((malloc));
343__device__ void __assertfail(const char *__message, const char *__file,
344                             unsigned __line, const char *__function,
345                             size_t __charSize) __attribute__((noreturn));
346
347// In order for standard assert() macro on linux to work we need to
348// provide device-side __assert_fail()
349__device__ static inline void __assert_fail(const char *__message,
350                                            const char *__file, unsigned __line,
351                                            const char *__function) {
352  __assertfail(__message, __file, __line, __function, sizeof(char));
353}
354
355// Clang will convert printf into vprintf, but we still need
356// device-side declaration for it.
357__device__ int printf(const char *, ...);
358} // extern "C"
359
360// We also need device-side std::malloc and std::free.
361namespace std {
362__device__ static inline void free(void *__ptr) { ::free(__ptr); }
363__device__ static inline void *malloc(size_t __size) {
364  return ::malloc(__size);
365}
366} // namespace std
367
368// Out-of-line implementations from __clang_cuda_builtin_vars.h.  These need to
369// come after we've pulled in the definition of uint3 and dim3.
370
371__device__ inline __cuda_builtin_threadIdx_t::operator uint3() const {
372  uint3 ret;
373  ret.x = x;
374  ret.y = y;
375  ret.z = z;
376  return ret;
377}
378
379__device__ inline __cuda_builtin_blockIdx_t::operator uint3() const {
380  uint3 ret;
381  ret.x = x;
382  ret.y = y;
383  ret.z = z;
384  return ret;
385}
386
387__device__ inline __cuda_builtin_blockDim_t::operator dim3() const {
388  return dim3(x, y, z);
389}
390
391__device__ inline __cuda_builtin_gridDim_t::operator dim3() const {
392  return dim3(x, y, z);
393}
394
395#include <__clang_cuda_cmath.h>
396#include <__clang_cuda_intrinsics.h>
397#include <__clang_cuda_complex_builtins.h>
398
399// curand_mtgp32_kernel helpfully redeclares blockDim and threadIdx in host
400// mode, giving them their "proper" types of dim3 and uint3.  This is
401// incompatible with the types we give in __clang_cuda_builtin_vars.h.  As as
402// hack, force-include the header (nvcc doesn't include it by default) but
403// redefine dim3 and uint3 to our builtin types.  (Thankfully dim3 and uint3 are
404// only used here for the redeclarations of blockDim and threadIdx.)
405#pragma push_macro("dim3")
406#pragma push_macro("uint3")
407#define dim3 __cuda_builtin_blockDim_t
408#define uint3 __cuda_builtin_threadIdx_t
409#include "curand_mtgp32_kernel.h"
410#pragma pop_macro("dim3")
411#pragma pop_macro("uint3")
412#pragma pop_macro("__USE_FAST_MATH__")
413#pragma pop_macro("__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__")
414
415// CUDA runtime uses this undocumented function to access kernel launch
416// configuration. The declaration is in crt/device_functions.h but that file
417// includes a lot of other stuff we don't want. Instead, we'll provide our own
418// declaration for it here.
419#if CUDA_VERSION >= 9020
420extern "C" unsigned __cudaPushCallConfiguration(dim3 gridDim, dim3 blockDim,
421                                                size_t sharedMem = 0,
422                                                void *stream = 0);
423#endif
424
425#endif // __CUDA__
426#endif // __CLANG_CUDA_RUNTIME_WRAPPER_H__
427