Searched refs:CUDA (Results 1 - 25 of 53) sorted by relevance

123

/freebsd-13-stable/contrib/llvm-project/clang/lib/Frontend/
H A DFrontendOptions.cpp22 .Case("cui", InputKind(Language::CUDA).getPreprocessed())
32 .Case("cu", Language::CUDA)
H A DCompilerInvocation.cpp1179 // When linking CUDA bitcode, propagate function attributes so that
2033 .Case("cuda", Language::CUDA)
2280 case Language::CUDA:
2372 Opts.CUDA = IK.getLanguage() == Language::CUDA || Opts.HIP;
2373 if (Opts.CUDA)
2441 case Language::CUDA:
2442 // FIXME: What -std= values should be permitted for CUDA compilations?
2443 return S.getLanguage() == Language::CUDA ||
2472 case Language::CUDA
[all...]
H A DCompilerInstance.cpp412 // Initialize the header search object. In CUDA compilations, we use the aux
414 // find the host headers in order to compile the CUDA code.
416 if (PP->getTargetInfo().getTriple().getOS() == llvm::Triple::CUDA &&
922 // Create TargetInfo for the other side of CUDA/OpenMP/SYCL compilation.
923 if ((getLangOpts().CUDA || getLangOpts().OpenMPIsDevice ||
1008 if (getLangOpts().CUDA) {
1047 if (LangOpts.CUDA)
1048 return Language::CUDA;
H A DInitPreprocessor.cpp473 if (LangOpts.CUDA && !LangOpts.HIP)
1089 // CUDA device path compilaton
1096 // We need to communicate this to our CUDA header wrapper, which in turn
1097 // informs the proper CUDA headers of this choice.
1154 if ((LangOpts.CUDA || LangOpts.OpenMPIsDevice || LangOpts.SYCLIsDevice) &&
H A DFrontendActions.cpp863 case Language::CUDA:
/freebsd-13-stable/contrib/llvm-project/llvm/lib/Target/NVPTX/
H A DNVPTXSubtarget.cpp40 // Set default to PTX 3.2 (CUDA 5.5)
56 // Enable handles for Kepler+, where CUDA supports indirect surfaces and
58 if (TM.getDrvInterface() == NVPTX::CUDA)
H A DNVPTXLowerArgs.cpp30 // 1. Convert non-byval pointer arguments of CUDA kernels to pointers in the
128 return "Lower pointer arguments of CUDA kernels";
211 if (TM && TM->getDrvInterface() == NVPTX::CUDA) {
235 else if (TM && TM->getDrvInterface() == NVPTX::CUDA)
H A DNVPTX.h53 CUDA enumerator in enum:llvm::NVPTX::DrvInterface
H A DNVPTXReplaceImageHandles.cpp148 if (TM.getDrvInterface() == NVPTX::CUDA) {
149 // For CUDA, we preserve the param loads coming from function arguments
H A DNVPTXAsmPrinter.h286 // Since the address value should always be generic in CUDA C and always
295 NVPTX::CUDA) {}
H A DNVPTXTargetMachine.cpp124 drvInterface = NVPTX::CUDA;
H A DNVPTXAsmPrinter.cpp657 * Currently, this is valid for CUDA shared variables, which have local
957 if (static_cast<NVPTXTargetMachine &>(TM).getDrvInterface() == NVPTX::CUDA) {
1485 NVPTX::CUDA) {
/freebsd-13-stable/contrib/llvm-project/clang/include/clang/Basic/
H A DLangStandard.h35 CUDA, member in class:clang::Language
/freebsd-13-stable/contrib/llvm-project/clang/lib/Sema/
H A DSemaCUDA.cpp1 //===--- SemaCUDA.cpp - Semantic Analysis for CUDA constructs -------------===//
9 /// This file implements semantic analysis for CUDA constructs.
30 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
35 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
107 /// IdentifyCUDATarget - Determine the CUDA compilation target for this function
135 // * CUDA Call preference table
175 // (a) Can't call global from some contexts until we support CUDA's
237 // Gets the CUDA functio
[all...]
H A DSemaLambda.cpp466 if (!MCtx && getLangOpts().CUDA) {
467 // Force lambda numbering in CUDA/HIP as we need to name lambdas following
994 // CUDA lambdas get implicit host and device attributes.
995 if (getLangOpts().CUDA)
1784 if (LangOpts.CUDA)
H A DSemaExprCXX.cpp831 !getSourceManager().isInSystemHeader(OpLoc) && !getLangOpts().CUDA) {
836 // Exceptions aren't allowed in CUDA device code.
837 if (getLangOpts().CUDA)
1510 // [CUDA] Ignore this function, if we can't call it.
1512 if (getLangOpts().CUDA &&
1519 if (Result || !getLangOpts().CUDA || PreventedBy.empty())
1522 // In case of CUDA, return true if none of the 1-argument deallocator
1588 // In CUDA, determine how much we'd like / dislike to call this.
1589 if (S.getLangOpts().CUDA)
1614 // Use CUDA cal
[all...]
H A DSemaDecl.cpp2588 else if (S.getLangOpts().CUDA && isa<FunctionDecl>(D) &&
2591 // CUDA target attributes are part of function signature for
6597 // So do CUDA's host/device attributes.
6598 if (S.getLangOpts().CUDA && (D->template hasAttr<CUDADeviceAttr>() ||
7088 if (getLangOpts().CUDA || getLangOpts().OpenMPIsDevice ||
7191 if (getLangOpts().CUDA || getLangOpts().OpenMPIsDevice ||
7194 ((getLangOpts().CUDA && DeclAttrsMatchCUDAMode(getLangOpts(), NewVD)) ||
7203 // CUDA B.2.5: "__shared__ and __constant__ variables have implied static
9448 if (getLangOpts().CUDA && !isFunctionTemplateSpecialization)
9715 if (getLangOpts().CUDA) {
[all...]
/freebsd-13-stable/lib/clang/headers/
H A DMakefile7 INCSGROUPS= INCS CUDA OMP PPC
150 CUDA+= cuda_wrappers/algorithm
151 CUDA+= cuda_wrappers/complex
152 CUDA+= cuda_wrappers/new
/freebsd-13-stable/contrib/llvm-project/clang/lib/CodeGen/
H A DCGOpenMPRuntimeNVPTX.h354 /// Target codegen is specialized based on two data-sharing modes: CUDA, in
359 /// CUDA data sharing mode.
360 CUDA, enumerator in enum:clang::CodeGen::CGOpenMPRuntimeNVPTX::DataSharingMode
H A DCodeGenModule.cpp142 if (LangOpts.CUDA)
412 if (Context.getLangOpts().CUDA && !Context.getLangOpts().CUDAIsDevice &&
466 // CUDA/HIP device and host libraries are different. Currently there is no
713 // As CUDA builtin surface/texture types are replaced, skip generating TBAA
2545 // If this is CUDA, be selective about which declarations we emit.
2546 if (LangOpts.CUDA) {
2557 // device-side variables because the CUDA runtime needs their
3798 if (LangOpts.CUDA && LangOpts.CUDAIsDevice) {
3889 // Do not set COMDAT attribute for CUDA/HIP stub functions to prevent
3947 // CUDA
[all...]
H A DCGCUDANV.cpp1 //===----- CGCUDANV.cpp - Interface to NVIDIA CUDA Runtime ----------------===//
9 // This provides a class for CUDA code generation targeting the NVIDIA CUDA
251 // CUDA 9.0+ uses new way to launch kernels. Parameters are packed in a local
385 /// Creates a function that sets up state on the host side for CUDA objects that
387 /// the host side of kernel functions and device global variables with the CUDA
444 // For HIP or CUDA 9.0+, device variable size is type of `size_t`.
515 /// For CUDA:
534 bool IsCUDA = CGM.getLangOpts().CUDA;
558 // Register GPU binary with the CUDA runtim
[all...]
/freebsd-13-stable/contrib/llvm-project/clang/include/clang/Sema/
H A DSemaInternal.h41 // Helper function to check whether D's attributes match current CUDA mode.
43 // ignored during this CUDA compilation pass.
45 if (!LangOpts.CUDA || !D)
/freebsd-13-stable/contrib/llvm-project/llvm/include/llvm/ADT/
H A DTriple.h184 CUDA, // NVIDIA CUDA enumerator in enum:llvm::Triple::OSType
/freebsd-13-stable/contrib/llvm-project/clang/lib/Headers/
H A D__clang_cuda_math.h1 /*===---- __clang_cuda_math.h - Device-side CUDA math support --------------===
12 #error "This file is for CUDA compilation only."
17 #error This file is intended to be used with CUDA-9+ only.
/freebsd-13-stable/contrib/llvm-project/llvm/lib/Support/
H A DTriple.cpp191 case CUDA: return "cuda";
513 .StartsWith("cuda", Triple::CUDA)

Completed in 302 milliseconds

123