/freebsd-13-stable/contrib/llvm-project/clang/lib/Frontend/ |
H A D | FrontendOptions.cpp | 22 .Case("cui", InputKind(Language::CUDA).getPreprocessed()) 32 .Case("cu", Language::CUDA)
|
H A D | CompilerInvocation.cpp | 1179 // When linking CUDA bitcode, propagate function attributes so that 2033 .Case("cuda", Language::CUDA) 2280 case Language::CUDA: 2372 Opts.CUDA = IK.getLanguage() == Language::CUDA || Opts.HIP; 2373 if (Opts.CUDA) 2441 case Language::CUDA: 2442 // FIXME: What -std= values should be permitted for CUDA compilations? 2443 return S.getLanguage() == Language::CUDA || 2472 case Language::CUDA [all...] |
H A D | CompilerInstance.cpp | 412 // Initialize the header search object. In CUDA compilations, we use the aux 414 // find the host headers in order to compile the CUDA code. 416 if (PP->getTargetInfo().getTriple().getOS() == llvm::Triple::CUDA && 922 // Create TargetInfo for the other side of CUDA/OpenMP/SYCL compilation. 923 if ((getLangOpts().CUDA || getLangOpts().OpenMPIsDevice || 1008 if (getLangOpts().CUDA) { 1047 if (LangOpts.CUDA) 1048 return Language::CUDA;
|
H A D | InitPreprocessor.cpp | 473 if (LangOpts.CUDA && !LangOpts.HIP) 1089 // CUDA device path compilaton 1096 // We need to communicate this to our CUDA header wrapper, which in turn 1097 // informs the proper CUDA headers of this choice. 1154 if ((LangOpts.CUDA || LangOpts.OpenMPIsDevice || LangOpts.SYCLIsDevice) &&
|
H A D | FrontendActions.cpp | 863 case Language::CUDA:
|
/freebsd-13-stable/contrib/llvm-project/llvm/lib/Target/NVPTX/ |
H A D | NVPTXSubtarget.cpp | 40 // Set default to PTX 3.2 (CUDA 5.5) 56 // Enable handles for Kepler+, where CUDA supports indirect surfaces and 58 if (TM.getDrvInterface() == NVPTX::CUDA)
|
H A D | NVPTXLowerArgs.cpp | 30 // 1. Convert non-byval pointer arguments of CUDA kernels to pointers in the 128 return "Lower pointer arguments of CUDA kernels"; 211 if (TM && TM->getDrvInterface() == NVPTX::CUDA) { 235 else if (TM && TM->getDrvInterface() == NVPTX::CUDA)
|
H A D | NVPTX.h | 53 CUDA enumerator in enum:llvm::NVPTX::DrvInterface
|
H A D | NVPTXReplaceImageHandles.cpp | 148 if (TM.getDrvInterface() == NVPTX::CUDA) { 149 // For CUDA, we preserve the param loads coming from function arguments
|
H A D | NVPTXAsmPrinter.h | 286 // Since the address value should always be generic in CUDA C and always 295 NVPTX::CUDA) {}
|
H A D | NVPTXTargetMachine.cpp | 124 drvInterface = NVPTX::CUDA;
|
H A D | NVPTXAsmPrinter.cpp | 657 * Currently, this is valid for CUDA shared variables, which have local 957 if (static_cast<NVPTXTargetMachine &>(TM).getDrvInterface() == NVPTX::CUDA) { 1485 NVPTX::CUDA) {
|
/freebsd-13-stable/contrib/llvm-project/clang/include/clang/Basic/ |
H A D | LangStandard.h | 35 CUDA, member in class:clang::Language
|
/freebsd-13-stable/contrib/llvm-project/clang/lib/Sema/ |
H A D | SemaCUDA.cpp | 1 //===--- SemaCUDA.cpp - Semantic Analysis for CUDA constructs -------------===// 9 /// This file implements semantic analysis for CUDA constructs. 30 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); 35 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); 107 /// IdentifyCUDATarget - Determine the CUDA compilation target for this function 135 // * CUDA Call preference table 175 // (a) Can't call global from some contexts until we support CUDA's 237 // Gets the CUDA functio [all...] |
H A D | SemaLambda.cpp | 466 if (!MCtx && getLangOpts().CUDA) { 467 // Force lambda numbering in CUDA/HIP as we need to name lambdas following 994 // CUDA lambdas get implicit host and device attributes. 995 if (getLangOpts().CUDA) 1784 if (LangOpts.CUDA)
|
H A D | SemaExprCXX.cpp | 831 !getSourceManager().isInSystemHeader(OpLoc) && !getLangOpts().CUDA) { 836 // Exceptions aren't allowed in CUDA device code. 837 if (getLangOpts().CUDA) 1510 // [CUDA] Ignore this function, if we can't call it. 1512 if (getLangOpts().CUDA && 1519 if (Result || !getLangOpts().CUDA || PreventedBy.empty()) 1522 // In case of CUDA, return true if none of the 1-argument deallocator 1588 // In CUDA, determine how much we'd like / dislike to call this. 1589 if (S.getLangOpts().CUDA) 1614 // Use CUDA cal [all...] |
H A D | SemaDecl.cpp | 2588 else if (S.getLangOpts().CUDA && isa<FunctionDecl>(D) && 2591 // CUDA target attributes are part of function signature for 6597 // So do CUDA's host/device attributes. 6598 if (S.getLangOpts().CUDA && (D->template hasAttr<CUDADeviceAttr>() || 7088 if (getLangOpts().CUDA || getLangOpts().OpenMPIsDevice || 7191 if (getLangOpts().CUDA || getLangOpts().OpenMPIsDevice || 7194 ((getLangOpts().CUDA && DeclAttrsMatchCUDAMode(getLangOpts(), NewVD)) || 7203 // CUDA B.2.5: "__shared__ and __constant__ variables have implied static 9448 if (getLangOpts().CUDA && !isFunctionTemplateSpecialization) 9715 if (getLangOpts().CUDA) { [all...] |
/freebsd-13-stable/lib/clang/headers/ |
H A D | Makefile | 7 INCSGROUPS= INCS CUDA OMP PPC 150 CUDA+= cuda_wrappers/algorithm 151 CUDA+= cuda_wrappers/complex 152 CUDA+= cuda_wrappers/new
|
/freebsd-13-stable/contrib/llvm-project/clang/lib/CodeGen/ |
H A D | CGOpenMPRuntimeNVPTX.h | 354 /// Target codegen is specialized based on two data-sharing modes: CUDA, in 359 /// CUDA data sharing mode. 360 CUDA, enumerator in enum:clang::CodeGen::CGOpenMPRuntimeNVPTX::DataSharingMode
|
H A D | CodeGenModule.cpp | 142 if (LangOpts.CUDA) 412 if (Context.getLangOpts().CUDA && !Context.getLangOpts().CUDAIsDevice && 466 // CUDA/HIP device and host libraries are different. Currently there is no 713 // As CUDA builtin surface/texture types are replaced, skip generating TBAA 2545 // If this is CUDA, be selective about which declarations we emit. 2546 if (LangOpts.CUDA) { 2557 // device-side variables because the CUDA runtime needs their 3798 if (LangOpts.CUDA && LangOpts.CUDAIsDevice) { 3889 // Do not set COMDAT attribute for CUDA/HIP stub functions to prevent 3947 // CUDA [all...] |
H A D | CGCUDANV.cpp | 1 //===----- CGCUDANV.cpp - Interface to NVIDIA CUDA Runtime ----------------===// 9 // This provides a class for CUDA code generation targeting the NVIDIA CUDA 251 // CUDA 9.0+ uses new way to launch kernels. Parameters are packed in a local 385 /// Creates a function that sets up state on the host side for CUDA objects that 387 /// the host side of kernel functions and device global variables with the CUDA 444 // For HIP or CUDA 9.0+, device variable size is type of `size_t`. 515 /// For CUDA: 534 bool IsCUDA = CGM.getLangOpts().CUDA; 558 // Register GPU binary with the CUDA runtim [all...] |
/freebsd-13-stable/contrib/llvm-project/clang/include/clang/Sema/ |
H A D | SemaInternal.h | 41 // Helper function to check whether D's attributes match current CUDA mode. 43 // ignored during this CUDA compilation pass. 45 if (!LangOpts.CUDA || !D)
|
/freebsd-13-stable/contrib/llvm-project/llvm/include/llvm/ADT/ |
H A D | Triple.h | 184 CUDA, // NVIDIA CUDA enumerator in enum:llvm::Triple::OSType
|
/freebsd-13-stable/contrib/llvm-project/clang/lib/Headers/ |
H A D | __clang_cuda_math.h | 1 /*===---- __clang_cuda_math.h - Device-side CUDA math support --------------=== 12 #error "This file is for CUDA compilation only." 17 #error This file is intended to be used with CUDA-9+ only.
|
/freebsd-13-stable/contrib/llvm-project/llvm/lib/Support/ |
H A D | Triple.cpp | 191 case CUDA: return "cuda"; 513 .StartsWith("cuda", Triple::CUDA)
|