Searched refs:CUDA (Results 1 - 25 of 62) sorted by relevance

123

/openbsd-current/gnu/llvm/clang/lib/Frontend/
H A DFrontendOptions.cpp22 .Case("cui", InputKind(Language::CUDA).getPreprocessed())
33 .Cases("cu", "cuh", Language::CUDA)
H A DCompilerInstance.cpp114 // other side of CUDA/OpenMP/SYCL compilation.
116 (getLangOpts().CUDA || getLangOpts().OpenMPIsDevice ||
483 // Initialize the header search object. In CUDA compilations, we use the aux
485 // find the host headers in order to compile the CUDA code.
487 if (PP->getTargetInfo().getTriple().getOS() == llvm::Triple::CUDA &&
1067 if (getLangOpts().CUDA) {
1127 if (LangOpts.CUDA)
1128 return Language::CUDA;
H A DInitPreprocessor.cpp570 if (LangOpts.CUDA) {
1269 // CUDA device path compilaton
1276 // We need to communicate this to our CUDA header wrapper, which in turn
1277 // informs the proper CUDA headers of this choice.
1330 if ((LangOpts.CUDA || LangOpts.OpenMPIsDevice || LangOpts.SYCLIsDevice) &&
H A DCompilerInvocation.cpp1866 // When linking CUDA bitcode, propagate function attributes so that
2666 case Language::CUDA:
2878 .Case("cuda", Language::CUDA)
3256 case Language::CUDA:
3257 // FIXME: What -std= values should be permitted for CUDA compilations?
3258 return S.getLanguage() == Language::CUDA ||
3292 case Language::CUDA:
3293 return "CUDA";
3428 !(Opts.OpenCL || (Opts.CUDA && Opts.CUDAIsDevice) || Opts.SYCLIsDevice))
3805 Opts.ConvergentFunctions = Opts.OpenCL || (Opts.CUDA
[all...]
/openbsd-current/gnu/llvm/llvm/lib/Target/NVPTX/
H A DNVPTXSubtarget.cpp40 // Set default to PTX 6.0 (CUDA 9.0)
56 // Enable handles for Kepler+, where CUDA supports indirect surfaces and
58 if (TM.getDrvInterface() == NVPTX::CUDA)
H A DNVPTX.h73 CUDA enumerator in enum:llvm::NVPTX::DrvInterface
H A DNVPTXLowerArgs.cpp30 // 1. Convert non-byval pointer arguments of CUDA kernels to pointers in the
132 return "Lower pointer arguments of CUDA kernels";
424 if (TM && TM->getDrvInterface() == NVPTX::CUDA) {
448 else if (TM && TM->getDrvInterface() == NVPTX::CUDA)
H A DNVPTXAsmPrinter.h243 // Since the address value should always be generic in CUDA C and always
252 NVPTX::CUDA) {}
H A DNVPTXTargetMachine.cpp132 drvInterface = NVPTX::CUDA;
/openbsd-current/gnu/llvm/clang/lib/Basic/
H A DLangStandards.cpp57 case Language::CUDA:
H A DLangOptions.cpp173 Opts.CUDA = Lang == Language::CUDA || Opts.HIP;
184 } else if (Opts.CUDA) {
H A DBuiltins.cpp116 /* CUDA Unsupported */
117 if (!LangOpts.CUDA && BuiltinInfo.Langs == CUDA_LANG)
H A DIdentifierTable.cpp202 return LangOpts.CUDA ? KS_Enabled : KS_Unknown;
/openbsd-current/gnu/llvm/clang/include/clang/Basic/
H A DLangStandard.h40 CUDA, member in class:clang::Language
/openbsd-current/gnu/llvm/clang/lib/Sema/
H A DSemaCUDA.cpp1 //===--- SemaCUDA.cpp - Semantic Analysis for CUDA constructs -------------===//
9 /// This file implements semantic analysis for CUDA constructs.
38 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
43 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
115 /// IdentifyCUDATarget - Determine the CUDA compilation target for this function
144 /// IdentifyTarget - Determine the CUDA compilation target for this variable.
178 // * CUDA Call preference table
218 // (a) Can't call global from some contexts until we support CUDA'
[all...]
H A DSemaLambda.cpp470 if (!MCtx && (getLangOpts().CUDA || getLangOpts().SYCLIsDevice ||
472 // Force lambda numbering in CUDA/HIP as we need to name lambdas following
1030 // CUDA lambdas get implicit host and device attributes.
1031 if (getLangOpts().CUDA)
1919 if (LangOpts.CUDA)
/openbsd-current/gnu/llvm/clang/lib/AST/
H A DMicrosoftCXXABI.cpp124 if (Context.getLangOpts().CUDA && Context.getAuxTargetInfo()) {
194 if (Context.getLangOpts().CUDA && Context.getAuxTargetInfo()) {
/openbsd-current/gnu/llvm/clang/lib/CodeGen/
H A DCGOpenMPRuntimeGPU.h331 /// Target codegen is specialized based on two data-sharing modes: CUDA, in
336 /// CUDA data sharing mode.
337 CUDA, enumerator in enum:clang::CodeGen::CGOpenMPRuntimeGPU::DataSharingMode
H A DCodeGenModule.cpp164 if (LangOpts.CUDA)
540 if (Context.getLangOpts().CUDA && CUDARuntime) {
607 // used by host functions and mark it as used for CUDA/HIP. This is necessary
647 // CUDA/HIP device and host libraries are different. Currently there is no
1007 // As CUDA builtin surface/texture types are replaced, skip generating TBAA
1559 // In CUDA/HIP device compilation with -fgpu-rdc, the mangled name of a
2756 // Emit CUDA/HIP static device variables referenced by host code only.
2759 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice)
3287 // If this is CUDA, be selective about which declarations we emit.
3288 if (LangOpts.CUDA) {
[all...]
H A DCGDeclCXX.cpp195 // For example, in the above CUDA code, the static local variable s has a
512 // According to E.2.3.1 in CUDA-7.5 Programming guide: __device__,
761 assert(!getLangOpts().CUDA || !getLangOpts().CUDAIsDevice ||
913 assert(!getLangOpts().CUDA || !getLangOpts().CUDAIsDevice ||
H A DCodeGenPGO.cpp800 // Skip CUDA/HIP kernel launch stub functions.
801 if (CGM.getLangOpts().CUDA && !CGM.getLangOpts().CUDAIsDevice &&
871 // Skip host-only functions in the CUDA device compilation and device-only
875 if (CGM.getLangOpts().CUDA &&
/openbsd-current/gnu/llvm/clang/include/clang/Sema/
H A DSemaInternal.h41 // Helper function to check whether D's attributes match current CUDA mode.
43 // ignored during this CUDA compilation pass.
45 if (!LangOpts.CUDA || !D)
/openbsd-current/gnu/llvm/clang/lib/Headers/
H A D__clang_cuda_math.h1 /*===---- __clang_cuda_math.h - Device-side CUDA math support --------------===
12 #error "This file is for CUDA compilation only."
17 #error This file is intended to be used with CUDA-9+ only.
/openbsd-current/gnu/llvm/clang/lib/ExtractAPI/Serialization/
H A DSymbolGraphSerializer.cpp199 case Language::CUDA:
/openbsd-current/gnu/llvm/llvm/include/llvm/TargetParser/
H A DTriple.h209 CUDA, // NVIDIA CUDA enumerator in enum:llvm::Triple::OSType

Completed in 476 milliseconds

123