1//===---- CGOpenMPRuntimeNVPTX.cpp - Interface to OpenMP NVPTX Runtimes ---===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This provides a class for OpenMP runtime code generation specialized to NVPTX 10// targets. 11// 12//===----------------------------------------------------------------------===// 13 14#include "CGOpenMPRuntimeNVPTX.h" 15#include "CodeGenFunction.h" 16#include "clang/AST/Attr.h" 17#include "clang/AST/DeclOpenMP.h" 18#include "clang/AST/StmtOpenMP.h" 19#include "clang/AST/StmtVisitor.h" 20#include "clang/Basic/Cuda.h" 21#include "llvm/ADT/SmallPtrSet.h" 22#include "llvm/IR/IntrinsicsNVPTX.h" 23 24using namespace clang; 25using namespace CodeGen; 26using namespace llvm::omp; 27 28namespace { 29enum OpenMPRTLFunctionNVPTX { 30 /// Call to void __kmpc_kernel_init(kmp_int32 thread_limit, 31 /// int16_t RequiresOMPRuntime); 32 OMPRTL_NVPTX__kmpc_kernel_init, 33 /// Call to void __kmpc_kernel_deinit(int16_t IsOMPRuntimeInitialized); 34 OMPRTL_NVPTX__kmpc_kernel_deinit, 35 /// Call to void __kmpc_spmd_kernel_init(kmp_int32 thread_limit, 36 /// int16_t RequiresOMPRuntime, int16_t RequiresDataSharing); 37 OMPRTL_NVPTX__kmpc_spmd_kernel_init, 38 /// Call to void __kmpc_spmd_kernel_deinit_v2(int16_t RequiresOMPRuntime); 39 OMPRTL_NVPTX__kmpc_spmd_kernel_deinit_v2, 40 /// Call to void __kmpc_kernel_prepare_parallel(void 41 /// *outlined_function); 42 OMPRTL_NVPTX__kmpc_kernel_prepare_parallel, 43 /// Call to bool __kmpc_kernel_parallel(void **outlined_function); 44 OMPRTL_NVPTX__kmpc_kernel_parallel, 45 /// Call to void __kmpc_kernel_end_parallel(); 46 OMPRTL_NVPTX__kmpc_kernel_end_parallel, 47 /// Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32 48 /// global_tid); 49 OMPRTL_NVPTX__kmpc_serialized_parallel, 50 /// Call to void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32 51 /// global_tid); 52 OMPRTL_NVPTX__kmpc_end_serialized_parallel, 53 /// Call to int32_t __kmpc_shuffle_int32(int32_t element, 54 /// int16_t lane_offset, int16_t warp_size); 55 OMPRTL_NVPTX__kmpc_shuffle_int32, 56 /// Call to int64_t __kmpc_shuffle_int64(int64_t element, 57 /// int16_t lane_offset, int16_t warp_size); 58 OMPRTL_NVPTX__kmpc_shuffle_int64, 59 /// Call to __kmpc_nvptx_parallel_reduce_nowait_v2(ident_t *loc, kmp_int32 60 /// global_tid, kmp_int32 num_vars, size_t reduce_size, void* reduce_data, 61 /// void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t 62 /// lane_offset, int16_t shortCircuit), 63 /// void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num)); 64 OMPRTL_NVPTX__kmpc_nvptx_parallel_reduce_nowait_v2, 65 /// Call to __kmpc_nvptx_teams_reduce_nowait_v2(ident_t *loc, kmp_int32 66 /// global_tid, void *global_buffer, int32_t num_of_records, void* 67 /// reduce_data, 68 /// void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t 69 /// lane_offset, int16_t shortCircuit), 70 /// void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num), void 71 /// (*kmp_ListToGlobalCpyFctPtr)(void *buffer, int idx, void *reduce_data), 72 /// void (*kmp_GlobalToListCpyFctPtr)(void *buffer, int idx, 73 /// void *reduce_data), void (*kmp_GlobalToListCpyPtrsFctPtr)(void *buffer, 74 /// int idx, void *reduce_data), void (*kmp_GlobalToListRedFctPtr)(void 75 /// *buffer, int idx, void *reduce_data)); 76 OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_v2, 77 /// Call to __kmpc_nvptx_end_reduce_nowait(int32_t global_tid); 78 OMPRTL_NVPTX__kmpc_end_reduce_nowait, 79 /// Call to void __kmpc_data_sharing_init_stack(); 80 OMPRTL_NVPTX__kmpc_data_sharing_init_stack, 81 /// Call to void __kmpc_data_sharing_init_stack_spmd(); 82 OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd, 83 /// Call to void* __kmpc_data_sharing_coalesced_push_stack(size_t size, 84 /// int16_t UseSharedMemory); 85 OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack, 86 /// Call to void* __kmpc_data_sharing_push_stack(size_t size, int16_t 87 /// UseSharedMemory); 88 OMPRTL_NVPTX__kmpc_data_sharing_push_stack, 89 /// Call to void __kmpc_data_sharing_pop_stack(void *a); 90 OMPRTL_NVPTX__kmpc_data_sharing_pop_stack, 91 /// Call to void __kmpc_begin_sharing_variables(void ***args, 92 /// size_t n_args); 93 OMPRTL_NVPTX__kmpc_begin_sharing_variables, 94 /// Call to void __kmpc_end_sharing_variables(); 95 OMPRTL_NVPTX__kmpc_end_sharing_variables, 96 /// Call to void __kmpc_get_shared_variables(void ***GlobalArgs) 97 OMPRTL_NVPTX__kmpc_get_shared_variables, 98 /// Call to uint16_t __kmpc_parallel_level(ident_t *loc, kmp_int32 99 /// global_tid); 100 OMPRTL_NVPTX__kmpc_parallel_level, 101 /// Call to int8_t __kmpc_is_spmd_exec_mode(); 102 OMPRTL_NVPTX__kmpc_is_spmd_exec_mode, 103 /// Call to void __kmpc_get_team_static_memory(int16_t isSPMDExecutionMode, 104 /// const void *buf, size_t size, int16_t is_shared, const void **res); 105 OMPRTL_NVPTX__kmpc_get_team_static_memory, 106 /// Call to void __kmpc_restore_team_static_memory(int16_t 107 /// isSPMDExecutionMode, int16_t is_shared); 108 OMPRTL_NVPTX__kmpc_restore_team_static_memory, 109 /// Call to void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid); 110 OMPRTL__kmpc_barrier, 111 /// Call to void __kmpc_barrier_simple_spmd(ident_t *loc, kmp_int32 112 /// global_tid); 113 OMPRTL__kmpc_barrier_simple_spmd, 114 /// Call to int32_t __kmpc_warp_active_thread_mask(void); 115 OMPRTL_NVPTX__kmpc_warp_active_thread_mask, 116 /// Call to void __kmpc_syncwarp(int32_t Mask); 117 OMPRTL_NVPTX__kmpc_syncwarp, 118}; 119 120/// Pre(post)-action for different OpenMP constructs specialized for NVPTX. 121class NVPTXActionTy final : public PrePostActionTy { 122 llvm::FunctionCallee EnterCallee = nullptr; 123 ArrayRef<llvm::Value *> EnterArgs; 124 llvm::FunctionCallee ExitCallee = nullptr; 125 ArrayRef<llvm::Value *> ExitArgs; 126 bool Conditional = false; 127 llvm::BasicBlock *ContBlock = nullptr; 128 129public: 130 NVPTXActionTy(llvm::FunctionCallee EnterCallee, 131 ArrayRef<llvm::Value *> EnterArgs, 132 llvm::FunctionCallee ExitCallee, 133 ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false) 134 : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee), 135 ExitArgs(ExitArgs), Conditional(Conditional) {} 136 void Enter(CodeGenFunction &CGF) override { 137 llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs); 138 if (Conditional) { 139 llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes); 140 auto *ThenBlock = CGF.createBasicBlock("omp_if.then"); 141 ContBlock = CGF.createBasicBlock("omp_if.end"); 142 // Generate the branch (If-stmt) 143 CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock); 144 CGF.EmitBlock(ThenBlock); 145 } 146 } 147 void Done(CodeGenFunction &CGF) { 148 // Emit the rest of blocks/branches 149 CGF.EmitBranch(ContBlock); 150 CGF.EmitBlock(ContBlock, true); 151 } 152 void Exit(CodeGenFunction &CGF) override { 153 CGF.EmitRuntimeCall(ExitCallee, ExitArgs); 154 } 155}; 156 157/// A class to track the execution mode when codegening directives within 158/// a target region. The appropriate mode (SPMD|NON-SPMD) is set on entry 159/// to the target region and used by containing directives such as 'parallel' 160/// to emit optimized code. 161class ExecutionRuntimeModesRAII { 162private: 163 CGOpenMPRuntimeNVPTX::ExecutionMode SavedExecMode = 164 CGOpenMPRuntimeNVPTX::EM_Unknown; 165 CGOpenMPRuntimeNVPTX::ExecutionMode &ExecMode; 166 bool SavedRuntimeMode = false; 167 bool *RuntimeMode = nullptr; 168 169public: 170 /// Constructor for Non-SPMD mode. 171 ExecutionRuntimeModesRAII(CGOpenMPRuntimeNVPTX::ExecutionMode &ExecMode) 172 : ExecMode(ExecMode) { 173 SavedExecMode = ExecMode; 174 ExecMode = CGOpenMPRuntimeNVPTX::EM_NonSPMD; 175 } 176 /// Constructor for SPMD mode. 177 ExecutionRuntimeModesRAII(CGOpenMPRuntimeNVPTX::ExecutionMode &ExecMode, 178 bool &RuntimeMode, bool FullRuntimeMode) 179 : ExecMode(ExecMode), RuntimeMode(&RuntimeMode) { 180 SavedExecMode = ExecMode; 181 SavedRuntimeMode = RuntimeMode; 182 ExecMode = CGOpenMPRuntimeNVPTX::EM_SPMD; 183 RuntimeMode = FullRuntimeMode; 184 } 185 ~ExecutionRuntimeModesRAII() { 186 ExecMode = SavedExecMode; 187 if (RuntimeMode) 188 *RuntimeMode = SavedRuntimeMode; 189 } 190}; 191 192/// GPU Configuration: This information can be derived from cuda registers, 193/// however, providing compile time constants helps generate more efficient 194/// code. For all practical purposes this is fine because the configuration 195/// is the same for all known NVPTX architectures. 196enum MachineConfiguration : unsigned { 197 WarpSize = 32, 198 /// Number of bits required to represent a lane identifier, which is 199 /// computed as log_2(WarpSize). 200 LaneIDBits = 5, 201 LaneIDMask = WarpSize - 1, 202 203 /// Global memory alignment for performance. 204 GlobalMemoryAlignment = 128, 205 206 /// Maximal size of the shared memory buffer. 207 SharedMemorySize = 128, 208}; 209 210static const ValueDecl *getPrivateItem(const Expr *RefExpr) { 211 RefExpr = RefExpr->IgnoreParens(); 212 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(RefExpr)) { 213 const Expr *Base = ASE->getBase()->IgnoreParenImpCasts(); 214 while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base)) 215 Base = TempASE->getBase()->IgnoreParenImpCasts(); 216 RefExpr = Base; 217 } else if (auto *OASE = dyn_cast<OMPArraySectionExpr>(RefExpr)) { 218 const Expr *Base = OASE->getBase()->IgnoreParenImpCasts(); 219 while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base)) 220 Base = TempOASE->getBase()->IgnoreParenImpCasts(); 221 while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base)) 222 Base = TempASE->getBase()->IgnoreParenImpCasts(); 223 RefExpr = Base; 224 } 225 RefExpr = RefExpr->IgnoreParenImpCasts(); 226 if (const auto *DE = dyn_cast<DeclRefExpr>(RefExpr)) 227 return cast<ValueDecl>(DE->getDecl()->getCanonicalDecl()); 228 const auto *ME = cast<MemberExpr>(RefExpr); 229 return cast<ValueDecl>(ME->getMemberDecl()->getCanonicalDecl()); 230} 231 232 233static RecordDecl *buildRecordForGlobalizedVars( 234 ASTContext &C, ArrayRef<const ValueDecl *> EscapedDecls, 235 ArrayRef<const ValueDecl *> EscapedDeclsForTeams, 236 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> 237 &MappedDeclsFields, int BufSize) { 238 using VarsDataTy = std::pair<CharUnits /*Align*/, const ValueDecl *>; 239 if (EscapedDecls.empty() && EscapedDeclsForTeams.empty()) 240 return nullptr; 241 SmallVector<VarsDataTy, 4> GlobalizedVars; 242 for (const ValueDecl *D : EscapedDecls) 243 GlobalizedVars.emplace_back( 244 CharUnits::fromQuantity(std::max( 245 C.getDeclAlign(D).getQuantity(), 246 static_cast<CharUnits::QuantityType>(GlobalMemoryAlignment))), 247 D); 248 for (const ValueDecl *D : EscapedDeclsForTeams) 249 GlobalizedVars.emplace_back(C.getDeclAlign(D), D); 250 llvm::stable_sort(GlobalizedVars, [](VarsDataTy L, VarsDataTy R) { 251 return L.first > R.first; 252 }); 253 254 // Build struct _globalized_locals_ty { 255 // /* globalized vars */[WarSize] align (max(decl_align, 256 // GlobalMemoryAlignment)) 257 // /* globalized vars */ for EscapedDeclsForTeams 258 // }; 259 RecordDecl *GlobalizedRD = C.buildImplicitRecord("_globalized_locals_ty"); 260 GlobalizedRD->startDefinition(); 261 llvm::SmallPtrSet<const ValueDecl *, 16> SingleEscaped( 262 EscapedDeclsForTeams.begin(), EscapedDeclsForTeams.end()); 263 for (const auto &Pair : GlobalizedVars) { 264 const ValueDecl *VD = Pair.second; 265 QualType Type = VD->getType(); 266 if (Type->isLValueReferenceType()) 267 Type = C.getPointerType(Type.getNonReferenceType()); 268 else 269 Type = Type.getNonReferenceType(); 270 SourceLocation Loc = VD->getLocation(); 271 FieldDecl *Field; 272 if (SingleEscaped.count(VD)) { 273 Field = FieldDecl::Create( 274 C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type, 275 C.getTrivialTypeSourceInfo(Type, SourceLocation()), 276 /*BW=*/nullptr, /*Mutable=*/false, 277 /*InitStyle=*/ICIS_NoInit); 278 Field->setAccess(AS_public); 279 if (VD->hasAttrs()) { 280 for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()), 281 E(VD->getAttrs().end()); 282 I != E; ++I) 283 Field->addAttr(*I); 284 } 285 } else { 286 llvm::APInt ArraySize(32, BufSize); 287 Type = C.getConstantArrayType(Type, ArraySize, nullptr, ArrayType::Normal, 288 0); 289 Field = FieldDecl::Create( 290 C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type, 291 C.getTrivialTypeSourceInfo(Type, SourceLocation()), 292 /*BW=*/nullptr, /*Mutable=*/false, 293 /*InitStyle=*/ICIS_NoInit); 294 Field->setAccess(AS_public); 295 llvm::APInt Align(32, std::max(C.getDeclAlign(VD).getQuantity(), 296 static_cast<CharUnits::QuantityType>( 297 GlobalMemoryAlignment))); 298 Field->addAttr(AlignedAttr::CreateImplicit( 299 C, /*IsAlignmentExpr=*/true, 300 IntegerLiteral::Create(C, Align, 301 C.getIntTypeForBitwidth(32, /*Signed=*/0), 302 SourceLocation()), 303 {}, AttributeCommonInfo::AS_GNU, AlignedAttr::GNU_aligned)); 304 } 305 GlobalizedRD->addDecl(Field); 306 MappedDeclsFields.try_emplace(VD, Field); 307 } 308 GlobalizedRD->completeDefinition(); 309 return GlobalizedRD; 310} 311 312/// Get the list of variables that can escape their declaration context. 313class CheckVarsEscapingDeclContext final 314 : public ConstStmtVisitor<CheckVarsEscapingDeclContext> { 315 CodeGenFunction &CGF; 316 llvm::SetVector<const ValueDecl *> EscapedDecls; 317 llvm::SetVector<const ValueDecl *> EscapedVariableLengthDecls; 318 llvm::SmallPtrSet<const Decl *, 4> EscapedParameters; 319 RecordDecl *GlobalizedRD = nullptr; 320 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields; 321 bool AllEscaped = false; 322 bool IsForCombinedParallelRegion = false; 323 324 void markAsEscaped(const ValueDecl *VD) { 325 // Do not globalize declare target variables. 326 if (!isa<VarDecl>(VD) || 327 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) 328 return; 329 VD = cast<ValueDecl>(VD->getCanonicalDecl()); 330 // Use user-specified allocation. 331 if (VD->hasAttrs() && VD->hasAttr<OMPAllocateDeclAttr>()) 332 return; 333 // Variables captured by value must be globalized. 334 if (auto *CSI = CGF.CapturedStmtInfo) { 335 if (const FieldDecl *FD = CSI->lookup(cast<VarDecl>(VD))) { 336 // Check if need to capture the variable that was already captured by 337 // value in the outer region. 338 if (!IsForCombinedParallelRegion) { 339 if (!FD->hasAttrs()) 340 return; 341 const auto *Attr = FD->getAttr<OMPCaptureKindAttr>(); 342 if (!Attr) 343 return; 344 if (((Attr->getCaptureKind() != OMPC_map) && 345 !isOpenMPPrivate(Attr->getCaptureKind())) || 346 ((Attr->getCaptureKind() == OMPC_map) && 347 !FD->getType()->isAnyPointerType())) 348 return; 349 } 350 if (!FD->getType()->isReferenceType()) { 351 assert(!VD->getType()->isVariablyModifiedType() && 352 "Parameter captured by value with variably modified type"); 353 EscapedParameters.insert(VD); 354 } else if (!IsForCombinedParallelRegion) { 355 return; 356 } 357 } 358 } 359 if ((!CGF.CapturedStmtInfo || 360 (IsForCombinedParallelRegion && CGF.CapturedStmtInfo)) && 361 VD->getType()->isReferenceType()) 362 // Do not globalize variables with reference type. 363 return; 364 if (VD->getType()->isVariablyModifiedType()) 365 EscapedVariableLengthDecls.insert(VD); 366 else 367 EscapedDecls.insert(VD); 368 } 369 370 void VisitValueDecl(const ValueDecl *VD) { 371 if (VD->getType()->isLValueReferenceType()) 372 markAsEscaped(VD); 373 if (const auto *VarD = dyn_cast<VarDecl>(VD)) { 374 if (!isa<ParmVarDecl>(VarD) && VarD->hasInit()) { 375 const bool SavedAllEscaped = AllEscaped; 376 AllEscaped = VD->getType()->isLValueReferenceType(); 377 Visit(VarD->getInit()); 378 AllEscaped = SavedAllEscaped; 379 } 380 } 381 } 382 void VisitOpenMPCapturedStmt(const CapturedStmt *S, 383 ArrayRef<OMPClause *> Clauses, 384 bool IsCombinedParallelRegion) { 385 if (!S) 386 return; 387 for (const CapturedStmt::Capture &C : S->captures()) { 388 if (C.capturesVariable() && !C.capturesVariableByCopy()) { 389 const ValueDecl *VD = C.getCapturedVar(); 390 bool SavedIsForCombinedParallelRegion = IsForCombinedParallelRegion; 391 if (IsCombinedParallelRegion) { 392 // Check if the variable is privatized in the combined construct and 393 // those private copies must be shared in the inner parallel 394 // directive. 395 IsForCombinedParallelRegion = false; 396 for (const OMPClause *C : Clauses) { 397 if (!isOpenMPPrivate(C->getClauseKind()) || 398 C->getClauseKind() == OMPC_reduction || 399 C->getClauseKind() == OMPC_linear || 400 C->getClauseKind() == OMPC_private) 401 continue; 402 ArrayRef<const Expr *> Vars; 403 if (const auto *PC = dyn_cast<OMPFirstprivateClause>(C)) 404 Vars = PC->getVarRefs(); 405 else if (const auto *PC = dyn_cast<OMPLastprivateClause>(C)) 406 Vars = PC->getVarRefs(); 407 else 408 llvm_unreachable("Unexpected clause."); 409 for (const auto *E : Vars) { 410 const Decl *D = 411 cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl(); 412 if (D == VD->getCanonicalDecl()) { 413 IsForCombinedParallelRegion = true; 414 break; 415 } 416 } 417 if (IsForCombinedParallelRegion) 418 break; 419 } 420 } 421 markAsEscaped(VD); 422 if (isa<OMPCapturedExprDecl>(VD)) 423 VisitValueDecl(VD); 424 IsForCombinedParallelRegion = SavedIsForCombinedParallelRegion; 425 } 426 } 427 } 428 429 void buildRecordForGlobalizedVars(bool IsInTTDRegion) { 430 assert(!GlobalizedRD && 431 "Record for globalized variables is built already."); 432 ArrayRef<const ValueDecl *> EscapedDeclsForParallel, EscapedDeclsForTeams; 433 if (IsInTTDRegion) 434 EscapedDeclsForTeams = EscapedDecls.getArrayRef(); 435 else 436 EscapedDeclsForParallel = EscapedDecls.getArrayRef(); 437 GlobalizedRD = ::buildRecordForGlobalizedVars( 438 CGF.getContext(), EscapedDeclsForParallel, EscapedDeclsForTeams, 439 MappedDeclsFields, WarpSize); 440 } 441 442public: 443 CheckVarsEscapingDeclContext(CodeGenFunction &CGF, 444 ArrayRef<const ValueDecl *> TeamsReductions) 445 : CGF(CGF), EscapedDecls(TeamsReductions.begin(), TeamsReductions.end()) { 446 } 447 virtual ~CheckVarsEscapingDeclContext() = default; 448 void VisitDeclStmt(const DeclStmt *S) { 449 if (!S) 450 return; 451 for (const Decl *D : S->decls()) 452 if (const auto *VD = dyn_cast_or_null<ValueDecl>(D)) 453 VisitValueDecl(VD); 454 } 455 void VisitOMPExecutableDirective(const OMPExecutableDirective *D) { 456 if (!D) 457 return; 458 if (!D->hasAssociatedStmt()) 459 return; 460 if (const auto *S = 461 dyn_cast_or_null<CapturedStmt>(D->getAssociatedStmt())) { 462 // Do not analyze directives that do not actually require capturing, 463 // like `omp for` or `omp simd` directives. 464 llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; 465 getOpenMPCaptureRegions(CaptureRegions, D->getDirectiveKind()); 466 if (CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown) { 467 VisitStmt(S->getCapturedStmt()); 468 return; 469 } 470 VisitOpenMPCapturedStmt( 471 S, D->clauses(), 472 CaptureRegions.back() == OMPD_parallel && 473 isOpenMPDistributeDirective(D->getDirectiveKind())); 474 } 475 } 476 void VisitCapturedStmt(const CapturedStmt *S) { 477 if (!S) 478 return; 479 for (const CapturedStmt::Capture &C : S->captures()) { 480 if (C.capturesVariable() && !C.capturesVariableByCopy()) { 481 const ValueDecl *VD = C.getCapturedVar(); 482 markAsEscaped(VD); 483 if (isa<OMPCapturedExprDecl>(VD)) 484 VisitValueDecl(VD); 485 } 486 } 487 } 488 void VisitLambdaExpr(const LambdaExpr *E) { 489 if (!E) 490 return; 491 for (const LambdaCapture &C : E->captures()) { 492 if (C.capturesVariable()) { 493 if (C.getCaptureKind() == LCK_ByRef) { 494 const ValueDecl *VD = C.getCapturedVar(); 495 markAsEscaped(VD); 496 if (E->isInitCapture(&C) || isa<OMPCapturedExprDecl>(VD)) 497 VisitValueDecl(VD); 498 } 499 } 500 } 501 } 502 void VisitBlockExpr(const BlockExpr *E) { 503 if (!E) 504 return; 505 for (const BlockDecl::Capture &C : E->getBlockDecl()->captures()) { 506 if (C.isByRef()) { 507 const VarDecl *VD = C.getVariable(); 508 markAsEscaped(VD); 509 if (isa<OMPCapturedExprDecl>(VD) || VD->isInitCapture()) 510 VisitValueDecl(VD); 511 } 512 } 513 } 514 void VisitCallExpr(const CallExpr *E) { 515 if (!E) 516 return; 517 for (const Expr *Arg : E->arguments()) { 518 if (!Arg) 519 continue; 520 if (Arg->isLValue()) { 521 const bool SavedAllEscaped = AllEscaped; 522 AllEscaped = true; 523 Visit(Arg); 524 AllEscaped = SavedAllEscaped; 525 } else { 526 Visit(Arg); 527 } 528 } 529 Visit(E->getCallee()); 530 } 531 void VisitDeclRefExpr(const DeclRefExpr *E) { 532 if (!E) 533 return; 534 const ValueDecl *VD = E->getDecl(); 535 if (AllEscaped) 536 markAsEscaped(VD); 537 if (isa<OMPCapturedExprDecl>(VD)) 538 VisitValueDecl(VD); 539 else if (const auto *VarD = dyn_cast<VarDecl>(VD)) 540 if (VarD->isInitCapture()) 541 VisitValueDecl(VD); 542 } 543 void VisitUnaryOperator(const UnaryOperator *E) { 544 if (!E) 545 return; 546 if (E->getOpcode() == UO_AddrOf) { 547 const bool SavedAllEscaped = AllEscaped; 548 AllEscaped = true; 549 Visit(E->getSubExpr()); 550 AllEscaped = SavedAllEscaped; 551 } else { 552 Visit(E->getSubExpr()); 553 } 554 } 555 void VisitImplicitCastExpr(const ImplicitCastExpr *E) { 556 if (!E) 557 return; 558 if (E->getCastKind() == CK_ArrayToPointerDecay) { 559 const bool SavedAllEscaped = AllEscaped; 560 AllEscaped = true; 561 Visit(E->getSubExpr()); 562 AllEscaped = SavedAllEscaped; 563 } else { 564 Visit(E->getSubExpr()); 565 } 566 } 567 void VisitExpr(const Expr *E) { 568 if (!E) 569 return; 570 bool SavedAllEscaped = AllEscaped; 571 if (!E->isLValue()) 572 AllEscaped = false; 573 for (const Stmt *Child : E->children()) 574 if (Child) 575 Visit(Child); 576 AllEscaped = SavedAllEscaped; 577 } 578 void VisitStmt(const Stmt *S) { 579 if (!S) 580 return; 581 for (const Stmt *Child : S->children()) 582 if (Child) 583 Visit(Child); 584 } 585 586 /// Returns the record that handles all the escaped local variables and used 587 /// instead of their original storage. 588 const RecordDecl *getGlobalizedRecord(bool IsInTTDRegion) { 589 if (!GlobalizedRD) 590 buildRecordForGlobalizedVars(IsInTTDRegion); 591 return GlobalizedRD; 592 } 593 594 /// Returns the field in the globalized record for the escaped variable. 595 const FieldDecl *getFieldForGlobalizedVar(const ValueDecl *VD) const { 596 assert(GlobalizedRD && 597 "Record for globalized variables must be generated already."); 598 auto I = MappedDeclsFields.find(VD); 599 if (I == MappedDeclsFields.end()) 600 return nullptr; 601 return I->getSecond(); 602 } 603 604 /// Returns the list of the escaped local variables/parameters. 605 ArrayRef<const ValueDecl *> getEscapedDecls() const { 606 return EscapedDecls.getArrayRef(); 607 } 608 609 /// Checks if the escaped local variable is actually a parameter passed by 610 /// value. 611 const llvm::SmallPtrSetImpl<const Decl *> &getEscapedParameters() const { 612 return EscapedParameters; 613 } 614 615 /// Returns the list of the escaped variables with the variably modified 616 /// types. 617 ArrayRef<const ValueDecl *> getEscapedVariableLengthDecls() const { 618 return EscapedVariableLengthDecls.getArrayRef(); 619 } 620}; 621} // anonymous namespace 622 623/// Get the GPU warp size. 624static llvm::Value *getNVPTXWarpSize(CodeGenFunction &CGF) { 625 return CGF.EmitRuntimeCall( 626 llvm::Intrinsic::getDeclaration( 627 &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_warpsize), 628 "nvptx_warp_size"); 629} 630 631/// Get the id of the current thread on the GPU. 632static llvm::Value *getNVPTXThreadID(CodeGenFunction &CGF) { 633 return CGF.EmitRuntimeCall( 634 llvm::Intrinsic::getDeclaration( 635 &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_tid_x), 636 "nvptx_tid"); 637} 638 639/// Get the id of the warp in the block. 640/// We assume that the warp size is 32, which is always the case 641/// on the NVPTX device, to generate more efficient code. 642static llvm::Value *getNVPTXWarpID(CodeGenFunction &CGF) { 643 CGBuilderTy &Bld = CGF.Builder; 644 return Bld.CreateAShr(getNVPTXThreadID(CGF), LaneIDBits, "nvptx_warp_id"); 645} 646 647/// Get the id of the current lane in the Warp. 648/// We assume that the warp size is 32, which is always the case 649/// on the NVPTX device, to generate more efficient code. 650static llvm::Value *getNVPTXLaneID(CodeGenFunction &CGF) { 651 CGBuilderTy &Bld = CGF.Builder; 652 return Bld.CreateAnd(getNVPTXThreadID(CGF), Bld.getInt32(LaneIDMask), 653 "nvptx_lane_id"); 654} 655 656/// Get the maximum number of threads in a block of the GPU. 657static llvm::Value *getNVPTXNumThreads(CodeGenFunction &CGF) { 658 return CGF.EmitRuntimeCall( 659 llvm::Intrinsic::getDeclaration( 660 &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_ntid_x), 661 "nvptx_num_threads"); 662} 663 664/// Get the value of the thread_limit clause in the teams directive. 665/// For the 'generic' execution mode, the runtime encodes thread_limit in 666/// the launch parameters, always starting thread_limit+warpSize threads per 667/// CTA. The threads in the last warp are reserved for master execution. 668/// For the 'spmd' execution mode, all threads in a CTA are part of the team. 669static llvm::Value *getThreadLimit(CodeGenFunction &CGF, 670 bool IsInSPMDExecutionMode = false) { 671 CGBuilderTy &Bld = CGF.Builder; 672 return IsInSPMDExecutionMode 673 ? getNVPTXNumThreads(CGF) 674 : Bld.CreateNUWSub(getNVPTXNumThreads(CGF), getNVPTXWarpSize(CGF), 675 "thread_limit"); 676} 677 678/// Get the thread id of the OMP master thread. 679/// The master thread id is the first thread (lane) of the last warp in the 680/// GPU block. Warp size is assumed to be some power of 2. 681/// Thread id is 0 indexed. 682/// E.g: If NumThreads is 33, master id is 32. 683/// If NumThreads is 64, master id is 32. 684/// If NumThreads is 1024, master id is 992. 685static llvm::Value *getMasterThreadID(CodeGenFunction &CGF) { 686 CGBuilderTy &Bld = CGF.Builder; 687 llvm::Value *NumThreads = getNVPTXNumThreads(CGF); 688 689 // We assume that the warp size is a power of 2. 690 llvm::Value *Mask = Bld.CreateNUWSub(getNVPTXWarpSize(CGF), Bld.getInt32(1)); 691 692 return Bld.CreateAnd(Bld.CreateNUWSub(NumThreads, Bld.getInt32(1)), 693 Bld.CreateNot(Mask), "master_tid"); 694} 695 696CGOpenMPRuntimeNVPTX::WorkerFunctionState::WorkerFunctionState( 697 CodeGenModule &CGM, SourceLocation Loc) 698 : WorkerFn(nullptr), CGFI(CGM.getTypes().arrangeNullaryFunction()), 699 Loc(Loc) { 700 createWorkerFunction(CGM); 701} 702 703void CGOpenMPRuntimeNVPTX::WorkerFunctionState::createWorkerFunction( 704 CodeGenModule &CGM) { 705 // Create an worker function with no arguments. 706 707 WorkerFn = llvm::Function::Create( 708 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, 709 /*placeholder=*/"_worker", &CGM.getModule()); 710 CGM.SetInternalFunctionAttributes(GlobalDecl(), WorkerFn, CGFI); 711 WorkerFn->setDoesNotRecurse(); 712} 713 714CGOpenMPRuntimeNVPTX::ExecutionMode 715CGOpenMPRuntimeNVPTX::getExecutionMode() const { 716 return CurrentExecutionMode; 717} 718 719static CGOpenMPRuntimeNVPTX::DataSharingMode 720getDataSharingMode(CodeGenModule &CGM) { 721 return CGM.getLangOpts().OpenMPCUDAMode ? CGOpenMPRuntimeNVPTX::CUDA 722 : CGOpenMPRuntimeNVPTX::Generic; 723} 724 725/// Check for inner (nested) SPMD construct, if any 726static bool hasNestedSPMDDirective(ASTContext &Ctx, 727 const OMPExecutableDirective &D) { 728 const auto *CS = D.getInnermostCapturedStmt(); 729 const auto *Body = 730 CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true); 731 const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body); 732 733 if (const auto *NestedDir = 734 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) { 735 OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind(); 736 switch (D.getDirectiveKind()) { 737 case OMPD_target: 738 if (isOpenMPParallelDirective(DKind)) 739 return true; 740 if (DKind == OMPD_teams) { 741 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers( 742 /*IgnoreCaptured=*/true); 743 if (!Body) 744 return false; 745 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body); 746 if (const auto *NND = 747 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) { 748 DKind = NND->getDirectiveKind(); 749 if (isOpenMPParallelDirective(DKind)) 750 return true; 751 } 752 } 753 return false; 754 case OMPD_target_teams: 755 return isOpenMPParallelDirective(DKind); 756 case OMPD_target_simd: 757 case OMPD_target_parallel: 758 case OMPD_target_parallel_for: 759 case OMPD_target_parallel_for_simd: 760 case OMPD_target_teams_distribute: 761 case OMPD_target_teams_distribute_simd: 762 case OMPD_target_teams_distribute_parallel_for: 763 case OMPD_target_teams_distribute_parallel_for_simd: 764 case OMPD_parallel: 765 case OMPD_for: 766 case OMPD_parallel_for: 767 case OMPD_parallel_master: 768 case OMPD_parallel_sections: 769 case OMPD_for_simd: 770 case OMPD_parallel_for_simd: 771 case OMPD_cancel: 772 case OMPD_cancellation_point: 773 case OMPD_ordered: 774 case OMPD_threadprivate: 775 case OMPD_allocate: 776 case OMPD_task: 777 case OMPD_simd: 778 case OMPD_sections: 779 case OMPD_section: 780 case OMPD_single: 781 case OMPD_master: 782 case OMPD_critical: 783 case OMPD_taskyield: 784 case OMPD_barrier: 785 case OMPD_taskwait: 786 case OMPD_taskgroup: 787 case OMPD_atomic: 788 case OMPD_flush: 789 case OMPD_depobj: 790 case OMPD_scan: 791 case OMPD_teams: 792 case OMPD_target_data: 793 case OMPD_target_exit_data: 794 case OMPD_target_enter_data: 795 case OMPD_distribute: 796 case OMPD_distribute_simd: 797 case OMPD_distribute_parallel_for: 798 case OMPD_distribute_parallel_for_simd: 799 case OMPD_teams_distribute: 800 case OMPD_teams_distribute_simd: 801 case OMPD_teams_distribute_parallel_for: 802 case OMPD_teams_distribute_parallel_for_simd: 803 case OMPD_target_update: 804 case OMPD_declare_simd: 805 case OMPD_declare_variant: 806 case OMPD_begin_declare_variant: 807 case OMPD_end_declare_variant: 808 case OMPD_declare_target: 809 case OMPD_end_declare_target: 810 case OMPD_declare_reduction: 811 case OMPD_declare_mapper: 812 case OMPD_taskloop: 813 case OMPD_taskloop_simd: 814 case OMPD_master_taskloop: 815 case OMPD_master_taskloop_simd: 816 case OMPD_parallel_master_taskloop: 817 case OMPD_parallel_master_taskloop_simd: 818 case OMPD_requires: 819 case OMPD_unknown: 820 default: 821 llvm_unreachable("Unexpected directive."); 822 } 823 } 824 825 return false; 826} 827 828static bool supportsSPMDExecutionMode(ASTContext &Ctx, 829 const OMPExecutableDirective &D) { 830 OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind(); 831 switch (DirectiveKind) { 832 case OMPD_target: 833 case OMPD_target_teams: 834 return hasNestedSPMDDirective(Ctx, D); 835 case OMPD_target_parallel: 836 case OMPD_target_parallel_for: 837 case OMPD_target_parallel_for_simd: 838 case OMPD_target_teams_distribute_parallel_for: 839 case OMPD_target_teams_distribute_parallel_for_simd: 840 case OMPD_target_simd: 841 case OMPD_target_teams_distribute_simd: 842 return true; 843 case OMPD_target_teams_distribute: 844 return false; 845 case OMPD_parallel: 846 case OMPD_for: 847 case OMPD_parallel_for: 848 case OMPD_parallel_master: 849 case OMPD_parallel_sections: 850 case OMPD_for_simd: 851 case OMPD_parallel_for_simd: 852 case OMPD_cancel: 853 case OMPD_cancellation_point: 854 case OMPD_ordered: 855 case OMPD_threadprivate: 856 case OMPD_allocate: 857 case OMPD_task: 858 case OMPD_simd: 859 case OMPD_sections: 860 case OMPD_section: 861 case OMPD_single: 862 case OMPD_master: 863 case OMPD_critical: 864 case OMPD_taskyield: 865 case OMPD_barrier: 866 case OMPD_taskwait: 867 case OMPD_taskgroup: 868 case OMPD_atomic: 869 case OMPD_flush: 870 case OMPD_depobj: 871 case OMPD_scan: 872 case OMPD_teams: 873 case OMPD_target_data: 874 case OMPD_target_exit_data: 875 case OMPD_target_enter_data: 876 case OMPD_distribute: 877 case OMPD_distribute_simd: 878 case OMPD_distribute_parallel_for: 879 case OMPD_distribute_parallel_for_simd: 880 case OMPD_teams_distribute: 881 case OMPD_teams_distribute_simd: 882 case OMPD_teams_distribute_parallel_for: 883 case OMPD_teams_distribute_parallel_for_simd: 884 case OMPD_target_update: 885 case OMPD_declare_simd: 886 case OMPD_declare_variant: 887 case OMPD_begin_declare_variant: 888 case OMPD_end_declare_variant: 889 case OMPD_declare_target: 890 case OMPD_end_declare_target: 891 case OMPD_declare_reduction: 892 case OMPD_declare_mapper: 893 case OMPD_taskloop: 894 case OMPD_taskloop_simd: 895 case OMPD_master_taskloop: 896 case OMPD_master_taskloop_simd: 897 case OMPD_parallel_master_taskloop: 898 case OMPD_parallel_master_taskloop_simd: 899 case OMPD_requires: 900 case OMPD_unknown: 901 default: 902 break; 903 } 904 llvm_unreachable( 905 "Unknown programming model for OpenMP directive on NVPTX target."); 906} 907 908/// Check if the directive is loops based and has schedule clause at all or has 909/// static scheduling. 910static bool hasStaticScheduling(const OMPExecutableDirective &D) { 911 assert(isOpenMPWorksharingDirective(D.getDirectiveKind()) && 912 isOpenMPLoopDirective(D.getDirectiveKind()) && 913 "Expected loop-based directive."); 914 return !D.hasClausesOfKind<OMPOrderedClause>() && 915 (!D.hasClausesOfKind<OMPScheduleClause>() || 916 llvm::any_of(D.getClausesOfKind<OMPScheduleClause>(), 917 [](const OMPScheduleClause *C) { 918 return C->getScheduleKind() == OMPC_SCHEDULE_static; 919 })); 920} 921 922/// Check for inner (nested) lightweight runtime construct, if any 923static bool hasNestedLightweightDirective(ASTContext &Ctx, 924 const OMPExecutableDirective &D) { 925 assert(supportsSPMDExecutionMode(Ctx, D) && "Expected SPMD mode directive."); 926 const auto *CS = D.getInnermostCapturedStmt(); 927 const auto *Body = 928 CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true); 929 const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body); 930 931 if (const auto *NestedDir = 932 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) { 933 OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind(); 934 switch (D.getDirectiveKind()) { 935 case OMPD_target: 936 if (isOpenMPParallelDirective(DKind) && 937 isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) && 938 hasStaticScheduling(*NestedDir)) 939 return true; 940 if (DKind == OMPD_teams_distribute_simd || DKind == OMPD_simd) 941 return true; 942 if (DKind == OMPD_parallel) { 943 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers( 944 /*IgnoreCaptured=*/true); 945 if (!Body) 946 return false; 947 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body); 948 if (const auto *NND = 949 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) { 950 DKind = NND->getDirectiveKind(); 951 if (isOpenMPWorksharingDirective(DKind) && 952 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND)) 953 return true; 954 } 955 } else if (DKind == OMPD_teams) { 956 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers( 957 /*IgnoreCaptured=*/true); 958 if (!Body) 959 return false; 960 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body); 961 if (const auto *NND = 962 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) { 963 DKind = NND->getDirectiveKind(); 964 if (isOpenMPParallelDirective(DKind) && 965 isOpenMPWorksharingDirective(DKind) && 966 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND)) 967 return true; 968 if (DKind == OMPD_parallel) { 969 Body = NND->getInnermostCapturedStmt()->IgnoreContainers( 970 /*IgnoreCaptured=*/true); 971 if (!Body) 972 return false; 973 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body); 974 if (const auto *NND = 975 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) { 976 DKind = NND->getDirectiveKind(); 977 if (isOpenMPWorksharingDirective(DKind) && 978 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND)) 979 return true; 980 } 981 } 982 } 983 } 984 return false; 985 case OMPD_target_teams: 986 if (isOpenMPParallelDirective(DKind) && 987 isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) && 988 hasStaticScheduling(*NestedDir)) 989 return true; 990 if (DKind == OMPD_distribute_simd || DKind == OMPD_simd) 991 return true; 992 if (DKind == OMPD_parallel) { 993 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers( 994 /*IgnoreCaptured=*/true); 995 if (!Body) 996 return false; 997 ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body); 998 if (const auto *NND = 999 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) { 1000 DKind = NND->getDirectiveKind(); 1001 if (isOpenMPWorksharingDirective(DKind) && 1002 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND)) 1003 return true; 1004 } 1005 } 1006 return false; 1007 case OMPD_target_parallel: 1008 if (DKind == OMPD_simd) 1009 return true; 1010 return isOpenMPWorksharingDirective(DKind) && 1011 isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NestedDir); 1012 case OMPD_target_teams_distribute: 1013 case OMPD_target_simd: 1014 case OMPD_target_parallel_for: 1015 case OMPD_target_parallel_for_simd: 1016 case OMPD_target_teams_distribute_simd: 1017 case OMPD_target_teams_distribute_parallel_for: 1018 case OMPD_target_teams_distribute_parallel_for_simd: 1019 case OMPD_parallel: 1020 case OMPD_for: 1021 case OMPD_parallel_for: 1022 case OMPD_parallel_master: 1023 case OMPD_parallel_sections: 1024 case OMPD_for_simd: 1025 case OMPD_parallel_for_simd: 1026 case OMPD_cancel: 1027 case OMPD_cancellation_point: 1028 case OMPD_ordered: 1029 case OMPD_threadprivate: 1030 case OMPD_allocate: 1031 case OMPD_task: 1032 case OMPD_simd: 1033 case OMPD_sections: 1034 case OMPD_section: 1035 case OMPD_single: 1036 case OMPD_master: 1037 case OMPD_critical: 1038 case OMPD_taskyield: 1039 case OMPD_barrier: 1040 case OMPD_taskwait: 1041 case OMPD_taskgroup: 1042 case OMPD_atomic: 1043 case OMPD_flush: 1044 case OMPD_depobj: 1045 case OMPD_scan: 1046 case OMPD_teams: 1047 case OMPD_target_data: 1048 case OMPD_target_exit_data: 1049 case OMPD_target_enter_data: 1050 case OMPD_distribute: 1051 case OMPD_distribute_simd: 1052 case OMPD_distribute_parallel_for: 1053 case OMPD_distribute_parallel_for_simd: 1054 case OMPD_teams_distribute: 1055 case OMPD_teams_distribute_simd: 1056 case OMPD_teams_distribute_parallel_for: 1057 case OMPD_teams_distribute_parallel_for_simd: 1058 case OMPD_target_update: 1059 case OMPD_declare_simd: 1060 case OMPD_declare_variant: 1061 case OMPD_begin_declare_variant: 1062 case OMPD_end_declare_variant: 1063 case OMPD_declare_target: 1064 case OMPD_end_declare_target: 1065 case OMPD_declare_reduction: 1066 case OMPD_declare_mapper: 1067 case OMPD_taskloop: 1068 case OMPD_taskloop_simd: 1069 case OMPD_master_taskloop: 1070 case OMPD_master_taskloop_simd: 1071 case OMPD_parallel_master_taskloop: 1072 case OMPD_parallel_master_taskloop_simd: 1073 case OMPD_requires: 1074 case OMPD_unknown: 1075 default: 1076 llvm_unreachable("Unexpected directive."); 1077 } 1078 } 1079 1080 return false; 1081} 1082 1083/// Checks if the construct supports lightweight runtime. It must be SPMD 1084/// construct + inner loop-based construct with static scheduling. 1085static bool supportsLightweightRuntime(ASTContext &Ctx, 1086 const OMPExecutableDirective &D) { 1087 if (!supportsSPMDExecutionMode(Ctx, D)) 1088 return false; 1089 OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind(); 1090 switch (DirectiveKind) { 1091 case OMPD_target: 1092 case OMPD_target_teams: 1093 case OMPD_target_parallel: 1094 return hasNestedLightweightDirective(Ctx, D); 1095 case OMPD_target_parallel_for: 1096 case OMPD_target_parallel_for_simd: 1097 case OMPD_target_teams_distribute_parallel_for: 1098 case OMPD_target_teams_distribute_parallel_for_simd: 1099 // (Last|First)-privates must be shared in parallel region. 1100 return hasStaticScheduling(D); 1101 case OMPD_target_simd: 1102 case OMPD_target_teams_distribute_simd: 1103 return true; 1104 case OMPD_target_teams_distribute: 1105 return false; 1106 case OMPD_parallel: 1107 case OMPD_for: 1108 case OMPD_parallel_for: 1109 case OMPD_parallel_master: 1110 case OMPD_parallel_sections: 1111 case OMPD_for_simd: 1112 case OMPD_parallel_for_simd: 1113 case OMPD_cancel: 1114 case OMPD_cancellation_point: 1115 case OMPD_ordered: 1116 case OMPD_threadprivate: 1117 case OMPD_allocate: 1118 case OMPD_task: 1119 case OMPD_simd: 1120 case OMPD_sections: 1121 case OMPD_section: 1122 case OMPD_single: 1123 case OMPD_master: 1124 case OMPD_critical: 1125 case OMPD_taskyield: 1126 case OMPD_barrier: 1127 case OMPD_taskwait: 1128 case OMPD_taskgroup: 1129 case OMPD_atomic: 1130 case OMPD_flush: 1131 case OMPD_depobj: 1132 case OMPD_scan: 1133 case OMPD_teams: 1134 case OMPD_target_data: 1135 case OMPD_target_exit_data: 1136 case OMPD_target_enter_data: 1137 case OMPD_distribute: 1138 case OMPD_distribute_simd: 1139 case OMPD_distribute_parallel_for: 1140 case OMPD_distribute_parallel_for_simd: 1141 case OMPD_teams_distribute: 1142 case OMPD_teams_distribute_simd: 1143 case OMPD_teams_distribute_parallel_for: 1144 case OMPD_teams_distribute_parallel_for_simd: 1145 case OMPD_target_update: 1146 case OMPD_declare_simd: 1147 case OMPD_declare_variant: 1148 case OMPD_begin_declare_variant: 1149 case OMPD_end_declare_variant: 1150 case OMPD_declare_target: 1151 case OMPD_end_declare_target: 1152 case OMPD_declare_reduction: 1153 case OMPD_declare_mapper: 1154 case OMPD_taskloop: 1155 case OMPD_taskloop_simd: 1156 case OMPD_master_taskloop: 1157 case OMPD_master_taskloop_simd: 1158 case OMPD_parallel_master_taskloop: 1159 case OMPD_parallel_master_taskloop_simd: 1160 case OMPD_requires: 1161 case OMPD_unknown: 1162 default: 1163 break; 1164 } 1165 llvm_unreachable( 1166 "Unknown programming model for OpenMP directive on NVPTX target."); 1167} 1168 1169void CGOpenMPRuntimeNVPTX::emitNonSPMDKernel(const OMPExecutableDirective &D, 1170 StringRef ParentName, 1171 llvm::Function *&OutlinedFn, 1172 llvm::Constant *&OutlinedFnID, 1173 bool IsOffloadEntry, 1174 const RegionCodeGenTy &CodeGen) { 1175 ExecutionRuntimeModesRAII ModeRAII(CurrentExecutionMode); 1176 EntryFunctionState EST; 1177 WorkerFunctionState WST(CGM, D.getBeginLoc()); 1178 Work.clear(); 1179 WrapperFunctionsMap.clear(); 1180 1181 // Emit target region as a standalone region. 1182 class NVPTXPrePostActionTy : public PrePostActionTy { 1183 CGOpenMPRuntimeNVPTX::EntryFunctionState &EST; 1184 CGOpenMPRuntimeNVPTX::WorkerFunctionState &WST; 1185 1186 public: 1187 NVPTXPrePostActionTy(CGOpenMPRuntimeNVPTX::EntryFunctionState &EST, 1188 CGOpenMPRuntimeNVPTX::WorkerFunctionState &WST) 1189 : EST(EST), WST(WST) {} 1190 void Enter(CodeGenFunction &CGF) override { 1191 auto &RT = 1192 static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime()); 1193 RT.emitNonSPMDEntryHeader(CGF, EST, WST); 1194 // Skip target region initialization. 1195 RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true); 1196 } 1197 void Exit(CodeGenFunction &CGF) override { 1198 auto &RT = 1199 static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime()); 1200 RT.clearLocThreadIdInsertPt(CGF); 1201 RT.emitNonSPMDEntryFooter(CGF, EST); 1202 } 1203 } Action(EST, WST); 1204 CodeGen.setAction(Action); 1205 IsInTTDRegion = true; 1206 // Reserve place for the globalized memory. 1207 GlobalizedRecords.emplace_back(); 1208 if (!KernelStaticGlobalized) { 1209 KernelStaticGlobalized = new llvm::GlobalVariable( 1210 CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/false, 1211 llvm::GlobalValue::InternalLinkage, 1212 llvm::ConstantPointerNull::get(CGM.VoidPtrTy), 1213 "_openmp_kernel_static_glob_rd$ptr", /*InsertBefore=*/nullptr, 1214 llvm::GlobalValue::NotThreadLocal, 1215 CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared)); 1216 } 1217 emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID, 1218 IsOffloadEntry, CodeGen); 1219 IsInTTDRegion = false; 1220 1221 // Now change the name of the worker function to correspond to this target 1222 // region's entry function. 1223 WST.WorkerFn->setName(Twine(OutlinedFn->getName(), "_worker")); 1224 1225 // Create the worker function 1226 emitWorkerFunction(WST); 1227} 1228 1229// Setup NVPTX threads for master-worker OpenMP scheme. 1230void CGOpenMPRuntimeNVPTX::emitNonSPMDEntryHeader(CodeGenFunction &CGF, 1231 EntryFunctionState &EST, 1232 WorkerFunctionState &WST) { 1233 CGBuilderTy &Bld = CGF.Builder; 1234 1235 llvm::BasicBlock *WorkerBB = CGF.createBasicBlock(".worker"); 1236 llvm::BasicBlock *MasterCheckBB = CGF.createBasicBlock(".mastercheck"); 1237 llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master"); 1238 EST.ExitBB = CGF.createBasicBlock(".exit"); 1239 1240 llvm::Value *IsWorker = 1241 Bld.CreateICmpULT(getNVPTXThreadID(CGF), getThreadLimit(CGF)); 1242 Bld.CreateCondBr(IsWorker, WorkerBB, MasterCheckBB); 1243 1244 CGF.EmitBlock(WorkerBB); 1245 emitCall(CGF, WST.Loc, WST.WorkerFn); 1246 CGF.EmitBranch(EST.ExitBB); 1247 1248 CGF.EmitBlock(MasterCheckBB); 1249 llvm::Value *IsMaster = 1250 Bld.CreateICmpEQ(getNVPTXThreadID(CGF), getMasterThreadID(CGF)); 1251 Bld.CreateCondBr(IsMaster, MasterBB, EST.ExitBB); 1252 1253 CGF.EmitBlock(MasterBB); 1254 IsInTargetMasterThreadRegion = true; 1255 // SEQUENTIAL (MASTER) REGION START 1256 // First action in sequential region: 1257 // Initialize the state of the OpenMP runtime library on the GPU. 1258 // TODO: Optimize runtime initialization and pass in correct value. 1259 llvm::Value *Args[] = {getThreadLimit(CGF), 1260 Bld.getInt16(/*RequiresOMPRuntime=*/1)}; 1261 CGF.EmitRuntimeCall( 1262 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_init), Args); 1263 1264 // For data sharing, we need to initialize the stack. 1265 CGF.EmitRuntimeCall( 1266 createNVPTXRuntimeFunction( 1267 OMPRTL_NVPTX__kmpc_data_sharing_init_stack)); 1268 1269 emitGenericVarsProlog(CGF, WST.Loc); 1270} 1271 1272void CGOpenMPRuntimeNVPTX::emitNonSPMDEntryFooter(CodeGenFunction &CGF, 1273 EntryFunctionState &EST) { 1274 IsInTargetMasterThreadRegion = false; 1275 if (!CGF.HaveInsertPoint()) 1276 return; 1277 1278 emitGenericVarsEpilog(CGF); 1279 1280 if (!EST.ExitBB) 1281 EST.ExitBB = CGF.createBasicBlock(".exit"); 1282 1283 llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".termination.notifier"); 1284 CGF.EmitBranch(TerminateBB); 1285 1286 CGF.EmitBlock(TerminateBB); 1287 // Signal termination condition. 1288 // TODO: Optimize runtime initialization and pass in correct value. 1289 llvm::Value *Args[] = {CGF.Builder.getInt16(/*IsOMPRuntimeInitialized=*/1)}; 1290 CGF.EmitRuntimeCall( 1291 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_deinit), Args); 1292 // Barrier to terminate worker threads. 1293 syncCTAThreads(CGF); 1294 // Master thread jumps to exit point. 1295 CGF.EmitBranch(EST.ExitBB); 1296 1297 CGF.EmitBlock(EST.ExitBB); 1298 EST.ExitBB = nullptr; 1299} 1300 1301void CGOpenMPRuntimeNVPTX::emitSPMDKernel(const OMPExecutableDirective &D, 1302 StringRef ParentName, 1303 llvm::Function *&OutlinedFn, 1304 llvm::Constant *&OutlinedFnID, 1305 bool IsOffloadEntry, 1306 const RegionCodeGenTy &CodeGen) { 1307 ExecutionRuntimeModesRAII ModeRAII( 1308 CurrentExecutionMode, RequiresFullRuntime, 1309 CGM.getLangOpts().OpenMPCUDAForceFullRuntime || 1310 !supportsLightweightRuntime(CGM.getContext(), D)); 1311 EntryFunctionState EST; 1312 1313 // Emit target region as a standalone region. 1314 class NVPTXPrePostActionTy : public PrePostActionTy { 1315 CGOpenMPRuntimeNVPTX &RT; 1316 CGOpenMPRuntimeNVPTX::EntryFunctionState &EST; 1317 const OMPExecutableDirective &D; 1318 1319 public: 1320 NVPTXPrePostActionTy(CGOpenMPRuntimeNVPTX &RT, 1321 CGOpenMPRuntimeNVPTX::EntryFunctionState &EST, 1322 const OMPExecutableDirective &D) 1323 : RT(RT), EST(EST), D(D) {} 1324 void Enter(CodeGenFunction &CGF) override { 1325 RT.emitSPMDEntryHeader(CGF, EST, D); 1326 // Skip target region initialization. 1327 RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true); 1328 } 1329 void Exit(CodeGenFunction &CGF) override { 1330 RT.clearLocThreadIdInsertPt(CGF); 1331 RT.emitSPMDEntryFooter(CGF, EST); 1332 } 1333 } Action(*this, EST, D); 1334 CodeGen.setAction(Action); 1335 IsInTTDRegion = true; 1336 // Reserve place for the globalized memory. 1337 GlobalizedRecords.emplace_back(); 1338 if (!KernelStaticGlobalized) { 1339 KernelStaticGlobalized = new llvm::GlobalVariable( 1340 CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/false, 1341 llvm::GlobalValue::InternalLinkage, 1342 llvm::ConstantPointerNull::get(CGM.VoidPtrTy), 1343 "_openmp_kernel_static_glob_rd$ptr", /*InsertBefore=*/nullptr, 1344 llvm::GlobalValue::NotThreadLocal, 1345 CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared)); 1346 } 1347 emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID, 1348 IsOffloadEntry, CodeGen); 1349 IsInTTDRegion = false; 1350} 1351 1352void CGOpenMPRuntimeNVPTX::emitSPMDEntryHeader( 1353 CodeGenFunction &CGF, EntryFunctionState &EST, 1354 const OMPExecutableDirective &D) { 1355 CGBuilderTy &Bld = CGF.Builder; 1356 1357 // Setup BBs in entry function. 1358 llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute"); 1359 EST.ExitBB = CGF.createBasicBlock(".exit"); 1360 1361 llvm::Value *Args[] = {getThreadLimit(CGF, /*IsInSPMDExecutionMode=*/true), 1362 /*RequiresOMPRuntime=*/ 1363 Bld.getInt16(RequiresFullRuntime ? 1 : 0), 1364 /*RequiresDataSharing=*/Bld.getInt16(0)}; 1365 CGF.EmitRuntimeCall( 1366 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_spmd_kernel_init), Args); 1367 1368 if (RequiresFullRuntime) { 1369 // For data sharing, we need to initialize the stack. 1370 CGF.EmitRuntimeCall(createNVPTXRuntimeFunction( 1371 OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd)); 1372 } 1373 1374 CGF.EmitBranch(ExecuteBB); 1375 1376 CGF.EmitBlock(ExecuteBB); 1377 1378 IsInTargetMasterThreadRegion = true; 1379} 1380 1381void CGOpenMPRuntimeNVPTX::emitSPMDEntryFooter(CodeGenFunction &CGF, 1382 EntryFunctionState &EST) { 1383 IsInTargetMasterThreadRegion = false; 1384 if (!CGF.HaveInsertPoint()) 1385 return; 1386 1387 if (!EST.ExitBB) 1388 EST.ExitBB = CGF.createBasicBlock(".exit"); 1389 1390 llvm::BasicBlock *OMPDeInitBB = CGF.createBasicBlock(".omp.deinit"); 1391 CGF.EmitBranch(OMPDeInitBB); 1392 1393 CGF.EmitBlock(OMPDeInitBB); 1394 // DeInitialize the OMP state in the runtime; called by all active threads. 1395 llvm::Value *Args[] = {/*RequiresOMPRuntime=*/ 1396 CGF.Builder.getInt16(RequiresFullRuntime ? 1 : 0)}; 1397 CGF.EmitRuntimeCall( 1398 createNVPTXRuntimeFunction( 1399 OMPRTL_NVPTX__kmpc_spmd_kernel_deinit_v2), Args); 1400 CGF.EmitBranch(EST.ExitBB); 1401 1402 CGF.EmitBlock(EST.ExitBB); 1403 EST.ExitBB = nullptr; 1404} 1405 1406// Create a unique global variable to indicate the execution mode of this target 1407// region. The execution mode is either 'generic', or 'spmd' depending on the 1408// target directive. This variable is picked up by the offload library to setup 1409// the device appropriately before kernel launch. If the execution mode is 1410// 'generic', the runtime reserves one warp for the master, otherwise, all 1411// warps participate in parallel work. 1412static void setPropertyExecutionMode(CodeGenModule &CGM, StringRef Name, 1413 bool Mode) { 1414 auto *GVMode = 1415 new llvm::GlobalVariable(CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true, 1416 llvm::GlobalValue::WeakAnyLinkage, 1417 llvm::ConstantInt::get(CGM.Int8Ty, Mode ? 0 : 1), 1418 Twine(Name, "_exec_mode")); 1419 CGM.addCompilerUsedGlobal(GVMode); 1420} 1421 1422void CGOpenMPRuntimeNVPTX::emitWorkerFunction(WorkerFunctionState &WST) { 1423 ASTContext &Ctx = CGM.getContext(); 1424 1425 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true); 1426 CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, WST.WorkerFn, WST.CGFI, {}, 1427 WST.Loc, WST.Loc); 1428 emitWorkerLoop(CGF, WST); 1429 CGF.FinishFunction(); 1430} 1431 1432void CGOpenMPRuntimeNVPTX::emitWorkerLoop(CodeGenFunction &CGF, 1433 WorkerFunctionState &WST) { 1434 // 1435 // The workers enter this loop and wait for parallel work from the master. 1436 // When the master encounters a parallel region it sets up the work + variable 1437 // arguments, and wakes up the workers. The workers first check to see if 1438 // they are required for the parallel region, i.e., within the # of requested 1439 // parallel threads. The activated workers load the variable arguments and 1440 // execute the parallel work. 1441 // 1442 1443 CGBuilderTy &Bld = CGF.Builder; 1444 1445 llvm::BasicBlock *AwaitBB = CGF.createBasicBlock(".await.work"); 1446 llvm::BasicBlock *SelectWorkersBB = CGF.createBasicBlock(".select.workers"); 1447 llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute.parallel"); 1448 llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".terminate.parallel"); 1449 llvm::BasicBlock *BarrierBB = CGF.createBasicBlock(".barrier.parallel"); 1450 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit"); 1451 1452 CGF.EmitBranch(AwaitBB); 1453 1454 // Workers wait for work from master. 1455 CGF.EmitBlock(AwaitBB); 1456 // Wait for parallel work 1457 syncCTAThreads(CGF); 1458 1459 Address WorkFn = 1460 CGF.CreateDefaultAlignTempAlloca(CGF.Int8PtrTy, /*Name=*/"work_fn"); 1461 Address ExecStatus = 1462 CGF.CreateDefaultAlignTempAlloca(CGF.Int8Ty, /*Name=*/"exec_status"); 1463 CGF.InitTempAlloca(ExecStatus, Bld.getInt8(/*C=*/0)); 1464 CGF.InitTempAlloca(WorkFn, llvm::Constant::getNullValue(CGF.Int8PtrTy)); 1465 1466 // TODO: Optimize runtime initialization and pass in correct value. 1467 llvm::Value *Args[] = {WorkFn.getPointer()}; 1468 llvm::Value *Ret = CGF.EmitRuntimeCall( 1469 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_parallel), Args); 1470 Bld.CreateStore(Bld.CreateZExt(Ret, CGF.Int8Ty), ExecStatus); 1471 1472 // On termination condition (workid == 0), exit loop. 1473 llvm::Value *WorkID = Bld.CreateLoad(WorkFn); 1474 llvm::Value *ShouldTerminate = Bld.CreateIsNull(WorkID, "should_terminate"); 1475 Bld.CreateCondBr(ShouldTerminate, ExitBB, SelectWorkersBB); 1476 1477 // Activate requested workers. 1478 CGF.EmitBlock(SelectWorkersBB); 1479 llvm::Value *IsActive = 1480 Bld.CreateIsNotNull(Bld.CreateLoad(ExecStatus), "is_active"); 1481 Bld.CreateCondBr(IsActive, ExecuteBB, BarrierBB); 1482 1483 // Signal start of parallel region. 1484 CGF.EmitBlock(ExecuteBB); 1485 // Skip initialization. 1486 setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true); 1487 1488 // Process work items: outlined parallel functions. 1489 for (llvm::Function *W : Work) { 1490 // Try to match this outlined function. 1491 llvm::Value *ID = Bld.CreatePointerBitCastOrAddrSpaceCast(W, CGM.Int8PtrTy); 1492 1493 llvm::Value *WorkFnMatch = 1494 Bld.CreateICmpEQ(Bld.CreateLoad(WorkFn), ID, "work_match"); 1495 1496 llvm::BasicBlock *ExecuteFNBB = CGF.createBasicBlock(".execute.fn"); 1497 llvm::BasicBlock *CheckNextBB = CGF.createBasicBlock(".check.next"); 1498 Bld.CreateCondBr(WorkFnMatch, ExecuteFNBB, CheckNextBB); 1499 1500 // Execute this outlined function. 1501 CGF.EmitBlock(ExecuteFNBB); 1502 1503 // Insert call to work function via shared wrapper. The shared 1504 // wrapper takes two arguments: 1505 // - the parallelism level; 1506 // - the thread ID; 1507 emitCall(CGF, WST.Loc, W, 1508 {Bld.getInt16(/*ParallelLevel=*/0), getThreadID(CGF, WST.Loc)}); 1509 1510 // Go to end of parallel region. 1511 CGF.EmitBranch(TerminateBB); 1512 1513 CGF.EmitBlock(CheckNextBB); 1514 } 1515 // Default case: call to outlined function through pointer if the target 1516 // region makes a declare target call that may contain an orphaned parallel 1517 // directive. 1518 auto *ParallelFnTy = 1519 llvm::FunctionType::get(CGM.VoidTy, {CGM.Int16Ty, CGM.Int32Ty}, 1520 /*isVarArg=*/false); 1521 llvm::Value *WorkFnCast = 1522 Bld.CreateBitCast(WorkID, ParallelFnTy->getPointerTo()); 1523 // Insert call to work function via shared wrapper. The shared 1524 // wrapper takes two arguments: 1525 // - the parallelism level; 1526 // - the thread ID; 1527 emitCall(CGF, WST.Loc, {ParallelFnTy, WorkFnCast}, 1528 {Bld.getInt16(/*ParallelLevel=*/0), getThreadID(CGF, WST.Loc)}); 1529 // Go to end of parallel region. 1530 CGF.EmitBranch(TerminateBB); 1531 1532 // Signal end of parallel region. 1533 CGF.EmitBlock(TerminateBB); 1534 CGF.EmitRuntimeCall( 1535 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_end_parallel), 1536 llvm::None); 1537 CGF.EmitBranch(BarrierBB); 1538 1539 // All active and inactive workers wait at a barrier after parallel region. 1540 CGF.EmitBlock(BarrierBB); 1541 // Barrier after parallel region. 1542 syncCTAThreads(CGF); 1543 CGF.EmitBranch(AwaitBB); 1544 1545 // Exit target region. 1546 CGF.EmitBlock(ExitBB); 1547 // Skip initialization. 1548 clearLocThreadIdInsertPt(CGF); 1549} 1550 1551/// Returns specified OpenMP runtime function for the current OpenMP 1552/// implementation. Specialized for the NVPTX device. 1553/// \param Function OpenMP runtime function. 1554/// \return Specified function. 1555llvm::FunctionCallee 1556CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) { 1557 llvm::FunctionCallee RTLFn = nullptr; 1558 switch (static_cast<OpenMPRTLFunctionNVPTX>(Function)) { 1559 case OMPRTL_NVPTX__kmpc_kernel_init: { 1560 // Build void __kmpc_kernel_init(kmp_int32 thread_limit, int16_t 1561 // RequiresOMPRuntime); 1562 llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty}; 1563 auto *FnTy = 1564 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1565 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_init"); 1566 break; 1567 } 1568 case OMPRTL_NVPTX__kmpc_kernel_deinit: { 1569 // Build void __kmpc_kernel_deinit(int16_t IsOMPRuntimeInitialized); 1570 llvm::Type *TypeParams[] = {CGM.Int16Ty}; 1571 auto *FnTy = 1572 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1573 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_deinit"); 1574 break; 1575 } 1576 case OMPRTL_NVPTX__kmpc_spmd_kernel_init: { 1577 // Build void __kmpc_spmd_kernel_init(kmp_int32 thread_limit, 1578 // int16_t RequiresOMPRuntime, int16_t RequiresDataSharing); 1579 llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty, CGM.Int16Ty}; 1580 auto *FnTy = 1581 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1582 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_spmd_kernel_init"); 1583 break; 1584 } 1585 case OMPRTL_NVPTX__kmpc_spmd_kernel_deinit_v2: { 1586 // Build void __kmpc_spmd_kernel_deinit_v2(int16_t RequiresOMPRuntime); 1587 llvm::Type *TypeParams[] = {CGM.Int16Ty}; 1588 auto *FnTy = 1589 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1590 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_spmd_kernel_deinit_v2"); 1591 break; 1592 } 1593 case OMPRTL_NVPTX__kmpc_kernel_prepare_parallel: { 1594 /// Build void __kmpc_kernel_prepare_parallel( 1595 /// void *outlined_function); 1596 llvm::Type *TypeParams[] = {CGM.Int8PtrTy}; 1597 auto *FnTy = 1598 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1599 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_prepare_parallel"); 1600 break; 1601 } 1602 case OMPRTL_NVPTX__kmpc_kernel_parallel: { 1603 /// Build bool __kmpc_kernel_parallel(void **outlined_function); 1604 llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy}; 1605 llvm::Type *RetTy = CGM.getTypes().ConvertType(CGM.getContext().BoolTy); 1606 auto *FnTy = 1607 llvm::FunctionType::get(RetTy, TypeParams, /*isVarArg*/ false); 1608 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_parallel"); 1609 break; 1610 } 1611 case OMPRTL_NVPTX__kmpc_kernel_end_parallel: { 1612 /// Build void __kmpc_kernel_end_parallel(); 1613 auto *FnTy = 1614 llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false); 1615 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_end_parallel"); 1616 break; 1617 } 1618 case OMPRTL_NVPTX__kmpc_serialized_parallel: { 1619 // Build void __kmpc_serialized_parallel(ident_t *loc, kmp_int32 1620 // global_tid); 1621 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty}; 1622 auto *FnTy = 1623 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1624 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_serialized_parallel"); 1625 break; 1626 } 1627 case OMPRTL_NVPTX__kmpc_end_serialized_parallel: { 1628 // Build void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32 1629 // global_tid); 1630 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty}; 1631 auto *FnTy = 1632 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1633 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_serialized_parallel"); 1634 break; 1635 } 1636 case OMPRTL_NVPTX__kmpc_shuffle_int32: { 1637 // Build int32_t __kmpc_shuffle_int32(int32_t element, 1638 // int16_t lane_offset, int16_t warp_size); 1639 llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty, CGM.Int16Ty}; 1640 auto *FnTy = 1641 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false); 1642 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_shuffle_int32"); 1643 break; 1644 } 1645 case OMPRTL_NVPTX__kmpc_shuffle_int64: { 1646 // Build int64_t __kmpc_shuffle_int64(int64_t element, 1647 // int16_t lane_offset, int16_t warp_size); 1648 llvm::Type *TypeParams[] = {CGM.Int64Ty, CGM.Int16Ty, CGM.Int16Ty}; 1649 auto *FnTy = 1650 llvm::FunctionType::get(CGM.Int64Ty, TypeParams, /*isVarArg*/ false); 1651 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_shuffle_int64"); 1652 break; 1653 } 1654 case OMPRTL_NVPTX__kmpc_nvptx_parallel_reduce_nowait_v2: { 1655 // Build int32_t kmpc_nvptx_parallel_reduce_nowait_v2(ident_t *loc, 1656 // kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void* 1657 // reduce_data, void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t 1658 // lane_id, int16_t lane_offset, int16_t Algorithm Version), void 1659 // (*kmp_InterWarpCopyFctPtr)(void* src, int warp_num)); 1660 llvm::Type *ShuffleReduceTypeParams[] = {CGM.VoidPtrTy, CGM.Int16Ty, 1661 CGM.Int16Ty, CGM.Int16Ty}; 1662 auto *ShuffleReduceFnTy = 1663 llvm::FunctionType::get(CGM.VoidTy, ShuffleReduceTypeParams, 1664 /*isVarArg=*/false); 1665 llvm::Type *InterWarpCopyTypeParams[] = {CGM.VoidPtrTy, CGM.Int32Ty}; 1666 auto *InterWarpCopyFnTy = 1667 llvm::FunctionType::get(CGM.VoidTy, InterWarpCopyTypeParams, 1668 /*isVarArg=*/false); 1669 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), 1670 CGM.Int32Ty, 1671 CGM.Int32Ty, 1672 CGM.SizeTy, 1673 CGM.VoidPtrTy, 1674 ShuffleReduceFnTy->getPointerTo(), 1675 InterWarpCopyFnTy->getPointerTo()}; 1676 auto *FnTy = 1677 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false); 1678 RTLFn = CGM.CreateRuntimeFunction( 1679 FnTy, /*Name=*/"__kmpc_nvptx_parallel_reduce_nowait_v2"); 1680 break; 1681 } 1682 case OMPRTL_NVPTX__kmpc_end_reduce_nowait: { 1683 // Build __kmpc_end_reduce_nowait(kmp_int32 global_tid); 1684 llvm::Type *TypeParams[] = {CGM.Int32Ty}; 1685 auto *FnTy = 1686 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); 1687 RTLFn = CGM.CreateRuntimeFunction( 1688 FnTy, /*Name=*/"__kmpc_nvptx_end_reduce_nowait"); 1689 break; 1690 } 1691 case OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_v2: { 1692 // Build int32_t __kmpc_nvptx_teams_reduce_nowait_v2(ident_t *loc, kmp_int32 1693 // global_tid, void *global_buffer, int32_t num_of_records, void* 1694 // reduce_data, 1695 // void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t 1696 // lane_offset, int16_t shortCircuit), 1697 // void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num), void 1698 // (*kmp_ListToGlobalCpyFctPtr)(void *buffer, int idx, void *reduce_data), 1699 // void (*kmp_GlobalToListCpyFctPtr)(void *buffer, int idx, 1700 // void *reduce_data), void (*kmp_GlobalToListCpyPtrsFctPtr)(void *buffer, 1701 // int idx, void *reduce_data), void (*kmp_GlobalToListRedFctPtr)(void 1702 // *buffer, int idx, void *reduce_data)); 1703 llvm::Type *ShuffleReduceTypeParams[] = {CGM.VoidPtrTy, CGM.Int16Ty, 1704 CGM.Int16Ty, CGM.Int16Ty}; 1705 auto *ShuffleReduceFnTy = 1706 llvm::FunctionType::get(CGM.VoidTy, ShuffleReduceTypeParams, 1707 /*isVarArg=*/false); 1708 llvm::Type *InterWarpCopyTypeParams[] = {CGM.VoidPtrTy, CGM.Int32Ty}; 1709 auto *InterWarpCopyFnTy = 1710 llvm::FunctionType::get(CGM.VoidTy, InterWarpCopyTypeParams, 1711 /*isVarArg=*/false); 1712 llvm::Type *GlobalListTypeParams[] = {CGM.VoidPtrTy, CGM.IntTy, 1713 CGM.VoidPtrTy}; 1714 auto *GlobalListFnTy = 1715 llvm::FunctionType::get(CGM.VoidTy, GlobalListTypeParams, 1716 /*isVarArg=*/false); 1717 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), 1718 CGM.Int32Ty, 1719 CGM.VoidPtrTy, 1720 CGM.Int32Ty, 1721 CGM.VoidPtrTy, 1722 ShuffleReduceFnTy->getPointerTo(), 1723 InterWarpCopyFnTy->getPointerTo(), 1724 GlobalListFnTy->getPointerTo(), 1725 GlobalListFnTy->getPointerTo(), 1726 GlobalListFnTy->getPointerTo(), 1727 GlobalListFnTy->getPointerTo()}; 1728 auto *FnTy = 1729 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false); 1730 RTLFn = CGM.CreateRuntimeFunction( 1731 FnTy, /*Name=*/"__kmpc_nvptx_teams_reduce_nowait_v2"); 1732 break; 1733 } 1734 case OMPRTL_NVPTX__kmpc_data_sharing_init_stack: { 1735 /// Build void __kmpc_data_sharing_init_stack(); 1736 auto *FnTy = 1737 llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false); 1738 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_data_sharing_init_stack"); 1739 break; 1740 } 1741 case OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd: { 1742 /// Build void __kmpc_data_sharing_init_stack_spmd(); 1743 auto *FnTy = 1744 llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false); 1745 RTLFn = 1746 CGM.CreateRuntimeFunction(FnTy, "__kmpc_data_sharing_init_stack_spmd"); 1747 break; 1748 } 1749 case OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack: { 1750 // Build void *__kmpc_data_sharing_coalesced_push_stack(size_t size, 1751 // int16_t UseSharedMemory); 1752 llvm::Type *TypeParams[] = {CGM.SizeTy, CGM.Int16Ty}; 1753 auto *FnTy = 1754 llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false); 1755 RTLFn = CGM.CreateRuntimeFunction( 1756 FnTy, /*Name=*/"__kmpc_data_sharing_coalesced_push_stack"); 1757 break; 1758 } 1759 case OMPRTL_NVPTX__kmpc_data_sharing_push_stack: { 1760 // Build void *__kmpc_data_sharing_push_stack(size_t size, int16_t 1761 // UseSharedMemory); 1762 llvm::Type *TypeParams[] = {CGM.SizeTy, CGM.Int16Ty}; 1763 auto *FnTy = 1764 llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false); 1765 RTLFn = CGM.CreateRuntimeFunction( 1766 FnTy, /*Name=*/"__kmpc_data_sharing_push_stack"); 1767 break; 1768 } 1769 case OMPRTL_NVPTX__kmpc_data_sharing_pop_stack: { 1770 // Build void __kmpc_data_sharing_pop_stack(void *a); 1771 llvm::Type *TypeParams[] = {CGM.VoidPtrTy}; 1772 auto *FnTy = 1773 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); 1774 RTLFn = CGM.CreateRuntimeFunction(FnTy, 1775 /*Name=*/"__kmpc_data_sharing_pop_stack"); 1776 break; 1777 } 1778 case OMPRTL_NVPTX__kmpc_begin_sharing_variables: { 1779 /// Build void __kmpc_begin_sharing_variables(void ***args, 1780 /// size_t n_args); 1781 llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy->getPointerTo(), CGM.SizeTy}; 1782 auto *FnTy = 1783 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1784 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_begin_sharing_variables"); 1785 break; 1786 } 1787 case OMPRTL_NVPTX__kmpc_end_sharing_variables: { 1788 /// Build void __kmpc_end_sharing_variables(); 1789 auto *FnTy = 1790 llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false); 1791 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_sharing_variables"); 1792 break; 1793 } 1794 case OMPRTL_NVPTX__kmpc_get_shared_variables: { 1795 /// Build void __kmpc_get_shared_variables(void ***GlobalArgs); 1796 llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy->getPointerTo()}; 1797 auto *FnTy = 1798 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1799 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_get_shared_variables"); 1800 break; 1801 } 1802 case OMPRTL_NVPTX__kmpc_parallel_level: { 1803 // Build uint16_t __kmpc_parallel_level(ident_t *loc, kmp_int32 global_tid); 1804 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty}; 1805 auto *FnTy = 1806 llvm::FunctionType::get(CGM.Int16Ty, TypeParams, /*isVarArg*/ false); 1807 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_parallel_level"); 1808 break; 1809 } 1810 case OMPRTL_NVPTX__kmpc_is_spmd_exec_mode: { 1811 // Build int8_t __kmpc_is_spmd_exec_mode(); 1812 auto *FnTy = llvm::FunctionType::get(CGM.Int8Ty, /*isVarArg=*/false); 1813 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_is_spmd_exec_mode"); 1814 break; 1815 } 1816 case OMPRTL_NVPTX__kmpc_get_team_static_memory: { 1817 // Build void __kmpc_get_team_static_memory(int16_t isSPMDExecutionMode, 1818 // const void *buf, size_t size, int16_t is_shared, const void **res); 1819 llvm::Type *TypeParams[] = {CGM.Int16Ty, CGM.VoidPtrTy, CGM.SizeTy, 1820 CGM.Int16Ty, CGM.VoidPtrPtrTy}; 1821 auto *FnTy = 1822 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1823 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_get_team_static_memory"); 1824 break; 1825 } 1826 case OMPRTL_NVPTX__kmpc_restore_team_static_memory: { 1827 // Build void __kmpc_restore_team_static_memory(int16_t isSPMDExecutionMode, 1828 // int16_t is_shared); 1829 llvm::Type *TypeParams[] = {CGM.Int16Ty, CGM.Int16Ty}; 1830 auto *FnTy = 1831 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); 1832 RTLFn = 1833 CGM.CreateRuntimeFunction(FnTy, "__kmpc_restore_team_static_memory"); 1834 break; 1835 } 1836 case OMPRTL__kmpc_barrier: { 1837 // Build void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid); 1838 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty}; 1839 auto *FnTy = 1840 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1841 RTLFn = 1842 CGM.CreateConvergentRuntimeFunction(FnTy, /*Name*/ "__kmpc_barrier"); 1843 break; 1844 } 1845 case OMPRTL__kmpc_barrier_simple_spmd: { 1846 // Build void __kmpc_barrier_simple_spmd(ident_t *loc, kmp_int32 1847 // global_tid); 1848 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty}; 1849 auto *FnTy = 1850 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1851 RTLFn = CGM.CreateConvergentRuntimeFunction( 1852 FnTy, /*Name*/ "__kmpc_barrier_simple_spmd"); 1853 break; 1854 } 1855 case OMPRTL_NVPTX__kmpc_warp_active_thread_mask: { 1856 // Build int32_t __kmpc_warp_active_thread_mask(void); 1857 auto *FnTy = 1858 llvm::FunctionType::get(CGM.Int32Ty, llvm::None, /*isVarArg=*/false); 1859 RTLFn = CGM.CreateConvergentRuntimeFunction(FnTy, "__kmpc_warp_active_thread_mask"); 1860 break; 1861 } 1862 case OMPRTL_NVPTX__kmpc_syncwarp: { 1863 // Build void __kmpc_syncwarp(kmp_int32 Mask); 1864 auto *FnTy = 1865 llvm::FunctionType::get(CGM.VoidTy, CGM.Int32Ty, /*isVarArg=*/false); 1866 RTLFn = CGM.CreateConvergentRuntimeFunction(FnTy, "__kmpc_syncwarp"); 1867 break; 1868 } 1869 } 1870 return RTLFn; 1871} 1872 1873void CGOpenMPRuntimeNVPTX::createOffloadEntry(llvm::Constant *ID, 1874 llvm::Constant *Addr, 1875 uint64_t Size, int32_t, 1876 llvm::GlobalValue::LinkageTypes) { 1877 // TODO: Add support for global variables on the device after declare target 1878 // support. 1879 if (!isa<llvm::Function>(Addr)) 1880 return; 1881 llvm::Module &M = CGM.getModule(); 1882 llvm::LLVMContext &Ctx = CGM.getLLVMContext(); 1883 1884 // Get "nvvm.annotations" metadata node 1885 llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations"); 1886 1887 llvm::Metadata *MDVals[] = { 1888 llvm::ConstantAsMetadata::get(Addr), llvm::MDString::get(Ctx, "kernel"), 1889 llvm::ConstantAsMetadata::get( 1890 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1))}; 1891 // Append metadata to nvvm.annotations 1892 MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); 1893} 1894 1895void CGOpenMPRuntimeNVPTX::emitTargetOutlinedFunction( 1896 const OMPExecutableDirective &D, StringRef ParentName, 1897 llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, 1898 bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) { 1899 if (!IsOffloadEntry) // Nothing to do. 1900 return; 1901 1902 assert(!ParentName.empty() && "Invalid target region parent name!"); 1903 1904 bool Mode = supportsSPMDExecutionMode(CGM.getContext(), D); 1905 if (Mode) 1906 emitSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry, 1907 CodeGen); 1908 else 1909 emitNonSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry, 1910 CodeGen); 1911 1912 setPropertyExecutionMode(CGM, OutlinedFn->getName(), Mode); 1913} 1914 1915namespace { 1916LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE(); 1917/// Enum for accesseing the reserved_2 field of the ident_t struct. 1918enum ModeFlagsTy : unsigned { 1919 /// Bit set to 1 when in SPMD mode. 1920 KMP_IDENT_SPMD_MODE = 0x01, 1921 /// Bit set to 1 when a simplified runtime is used. 1922 KMP_IDENT_SIMPLE_RT_MODE = 0x02, 1923 LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/KMP_IDENT_SIMPLE_RT_MODE) 1924}; 1925 1926/// Special mode Undefined. Is the combination of Non-SPMD mode + SimpleRuntime. 1927static const ModeFlagsTy UndefinedMode = 1928 (~KMP_IDENT_SPMD_MODE) & KMP_IDENT_SIMPLE_RT_MODE; 1929} // anonymous namespace 1930 1931unsigned CGOpenMPRuntimeNVPTX::getDefaultLocationReserved2Flags() const { 1932 switch (getExecutionMode()) { 1933 case EM_SPMD: 1934 if (requiresFullRuntime()) 1935 return KMP_IDENT_SPMD_MODE & (~KMP_IDENT_SIMPLE_RT_MODE); 1936 return KMP_IDENT_SPMD_MODE | KMP_IDENT_SIMPLE_RT_MODE; 1937 case EM_NonSPMD: 1938 assert(requiresFullRuntime() && "Expected full runtime."); 1939 return (~KMP_IDENT_SPMD_MODE) & (~KMP_IDENT_SIMPLE_RT_MODE); 1940 case EM_Unknown: 1941 return UndefinedMode; 1942 } 1943 llvm_unreachable("Unknown flags are requested."); 1944} 1945 1946CGOpenMPRuntimeNVPTX::CGOpenMPRuntimeNVPTX(CodeGenModule &CGM) 1947 : CGOpenMPRuntime(CGM, "_", "$") { 1948 if (!CGM.getLangOpts().OpenMPIsDevice) 1949 llvm_unreachable("OpenMP NVPTX can only handle device code."); 1950} 1951 1952void CGOpenMPRuntimeNVPTX::emitProcBindClause(CodeGenFunction &CGF, 1953 ProcBindKind ProcBind, 1954 SourceLocation Loc) { 1955 // Do nothing in case of SPMD mode and L0 parallel. 1956 if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD) 1957 return; 1958 1959 CGOpenMPRuntime::emitProcBindClause(CGF, ProcBind, Loc); 1960} 1961 1962void CGOpenMPRuntimeNVPTX::emitNumThreadsClause(CodeGenFunction &CGF, 1963 llvm::Value *NumThreads, 1964 SourceLocation Loc) { 1965 // Do nothing in case of SPMD mode and L0 parallel. 1966 if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD) 1967 return; 1968 1969 CGOpenMPRuntime::emitNumThreadsClause(CGF, NumThreads, Loc); 1970} 1971 1972void CGOpenMPRuntimeNVPTX::emitNumTeamsClause(CodeGenFunction &CGF, 1973 const Expr *NumTeams, 1974 const Expr *ThreadLimit, 1975 SourceLocation Loc) {} 1976 1977llvm::Function *CGOpenMPRuntimeNVPTX::emitParallelOutlinedFunction( 1978 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, 1979 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) { 1980 // Emit target region as a standalone region. 1981 class NVPTXPrePostActionTy : public PrePostActionTy { 1982 bool &IsInParallelRegion; 1983 bool PrevIsInParallelRegion; 1984 1985 public: 1986 NVPTXPrePostActionTy(bool &IsInParallelRegion) 1987 : IsInParallelRegion(IsInParallelRegion) {} 1988 void Enter(CodeGenFunction &CGF) override { 1989 PrevIsInParallelRegion = IsInParallelRegion; 1990 IsInParallelRegion = true; 1991 } 1992 void Exit(CodeGenFunction &CGF) override { 1993 IsInParallelRegion = PrevIsInParallelRegion; 1994 } 1995 } Action(IsInParallelRegion); 1996 CodeGen.setAction(Action); 1997 bool PrevIsInTTDRegion = IsInTTDRegion; 1998 IsInTTDRegion = false; 1999 bool PrevIsInTargetMasterThreadRegion = IsInTargetMasterThreadRegion; 2000 IsInTargetMasterThreadRegion = false; 2001 auto *OutlinedFun = 2002 cast<llvm::Function>(CGOpenMPRuntime::emitParallelOutlinedFunction( 2003 D, ThreadIDVar, InnermostKind, CodeGen)); 2004 if (CGM.getLangOpts().Optimize) { 2005 OutlinedFun->removeFnAttr(llvm::Attribute::NoInline); 2006 OutlinedFun->removeFnAttr(llvm::Attribute::OptimizeNone); 2007 OutlinedFun->addFnAttr(llvm::Attribute::AlwaysInline); 2008 } 2009 IsInTargetMasterThreadRegion = PrevIsInTargetMasterThreadRegion; 2010 IsInTTDRegion = PrevIsInTTDRegion; 2011 if (getExecutionMode() != CGOpenMPRuntimeNVPTX::EM_SPMD && 2012 !IsInParallelRegion) { 2013 llvm::Function *WrapperFun = 2014 createParallelDataSharingWrapper(OutlinedFun, D); 2015 WrapperFunctionsMap[OutlinedFun] = WrapperFun; 2016 } 2017 2018 return OutlinedFun; 2019} 2020 2021/// Get list of lastprivate variables from the teams distribute ... or 2022/// teams {distribute ...} directives. 2023static void 2024getDistributeLastprivateVars(ASTContext &Ctx, const OMPExecutableDirective &D, 2025 llvm::SmallVectorImpl<const ValueDecl *> &Vars) { 2026 assert(isOpenMPTeamsDirective(D.getDirectiveKind()) && 2027 "expected teams directive."); 2028 const OMPExecutableDirective *Dir = &D; 2029 if (!isOpenMPDistributeDirective(D.getDirectiveKind())) { 2030 if (const Stmt *S = CGOpenMPRuntime::getSingleCompoundChild( 2031 Ctx, 2032 D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers( 2033 /*IgnoreCaptured=*/true))) { 2034 Dir = dyn_cast_or_null<OMPExecutableDirective>(S); 2035 if (Dir && !isOpenMPDistributeDirective(Dir->getDirectiveKind())) 2036 Dir = nullptr; 2037 } 2038 } 2039 if (!Dir) 2040 return; 2041 for (const auto *C : Dir->getClausesOfKind<OMPLastprivateClause>()) { 2042 for (const Expr *E : C->getVarRefs()) 2043 Vars.push_back(getPrivateItem(E)); 2044 } 2045} 2046 2047/// Get list of reduction variables from the teams ... directives. 2048static void 2049getTeamsReductionVars(ASTContext &Ctx, const OMPExecutableDirective &D, 2050 llvm::SmallVectorImpl<const ValueDecl *> &Vars) { 2051 assert(isOpenMPTeamsDirective(D.getDirectiveKind()) && 2052 "expected teams directive."); 2053 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) { 2054 for (const Expr *E : C->privates()) 2055 Vars.push_back(getPrivateItem(E)); 2056 } 2057} 2058 2059llvm::Function *CGOpenMPRuntimeNVPTX::emitTeamsOutlinedFunction( 2060 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, 2061 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) { 2062 SourceLocation Loc = D.getBeginLoc(); 2063 2064 const RecordDecl *GlobalizedRD = nullptr; 2065 llvm::SmallVector<const ValueDecl *, 4> LastPrivatesReductions; 2066 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields; 2067 // Globalize team reductions variable unconditionally in all modes. 2068 if (getExecutionMode() != CGOpenMPRuntimeNVPTX::EM_SPMD) 2069 getTeamsReductionVars(CGM.getContext(), D, LastPrivatesReductions); 2070 if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD) { 2071 getDistributeLastprivateVars(CGM.getContext(), D, LastPrivatesReductions); 2072 if (!LastPrivatesReductions.empty()) { 2073 GlobalizedRD = ::buildRecordForGlobalizedVars( 2074 CGM.getContext(), llvm::None, LastPrivatesReductions, 2075 MappedDeclsFields, WarpSize); 2076 } 2077 } else if (!LastPrivatesReductions.empty()) { 2078 assert(!TeamAndReductions.first && 2079 "Previous team declaration is not expected."); 2080 TeamAndReductions.first = D.getCapturedStmt(OMPD_teams)->getCapturedDecl(); 2081 std::swap(TeamAndReductions.second, LastPrivatesReductions); 2082 } 2083 2084 // Emit target region as a standalone region. 2085 class NVPTXPrePostActionTy : public PrePostActionTy { 2086 SourceLocation &Loc; 2087 const RecordDecl *GlobalizedRD; 2088 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> 2089 &MappedDeclsFields; 2090 2091 public: 2092 NVPTXPrePostActionTy( 2093 SourceLocation &Loc, const RecordDecl *GlobalizedRD, 2094 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> 2095 &MappedDeclsFields) 2096 : Loc(Loc), GlobalizedRD(GlobalizedRD), 2097 MappedDeclsFields(MappedDeclsFields) {} 2098 void Enter(CodeGenFunction &CGF) override { 2099 auto &Rt = 2100 static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime()); 2101 if (GlobalizedRD) { 2102 auto I = Rt.FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first; 2103 I->getSecond().GlobalRecord = GlobalizedRD; 2104 I->getSecond().MappedParams = 2105 std::make_unique<CodeGenFunction::OMPMapVars>(); 2106 DeclToAddrMapTy &Data = I->getSecond().LocalVarData; 2107 for (const auto &Pair : MappedDeclsFields) { 2108 assert(Pair.getFirst()->isCanonicalDecl() && 2109 "Expected canonical declaration"); 2110 Data.insert(std::make_pair(Pair.getFirst(), 2111 MappedVarData(Pair.getSecond(), 2112 /*IsOnePerTeam=*/true))); 2113 } 2114 } 2115 Rt.emitGenericVarsProlog(CGF, Loc); 2116 } 2117 void Exit(CodeGenFunction &CGF) override { 2118 static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime()) 2119 .emitGenericVarsEpilog(CGF); 2120 } 2121 } Action(Loc, GlobalizedRD, MappedDeclsFields); 2122 CodeGen.setAction(Action); 2123 llvm::Function *OutlinedFun = CGOpenMPRuntime::emitTeamsOutlinedFunction( 2124 D, ThreadIDVar, InnermostKind, CodeGen); 2125 if (CGM.getLangOpts().Optimize) { 2126 OutlinedFun->removeFnAttr(llvm::Attribute::NoInline); 2127 OutlinedFun->removeFnAttr(llvm::Attribute::OptimizeNone); 2128 OutlinedFun->addFnAttr(llvm::Attribute::AlwaysInline); 2129 } 2130 2131 return OutlinedFun; 2132} 2133 2134void CGOpenMPRuntimeNVPTX::emitGenericVarsProlog(CodeGenFunction &CGF, 2135 SourceLocation Loc, 2136 bool WithSPMDCheck) { 2137 if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic && 2138 getExecutionMode() != CGOpenMPRuntimeNVPTX::EM_SPMD) 2139 return; 2140 2141 CGBuilderTy &Bld = CGF.Builder; 2142 2143 const auto I = FunctionGlobalizedDecls.find(CGF.CurFn); 2144 if (I == FunctionGlobalizedDecls.end()) 2145 return; 2146 if (const RecordDecl *GlobalizedVarsRecord = I->getSecond().GlobalRecord) { 2147 QualType GlobalRecTy = CGM.getContext().getRecordType(GlobalizedVarsRecord); 2148 QualType SecGlobalRecTy; 2149 2150 // Recover pointer to this function's global record. The runtime will 2151 // handle the specifics of the allocation of the memory. 2152 // Use actual memory size of the record including the padding 2153 // for alignment purposes. 2154 unsigned Alignment = 2155 CGM.getContext().getTypeAlignInChars(GlobalRecTy).getQuantity(); 2156 unsigned GlobalRecordSize = 2157 CGM.getContext().getTypeSizeInChars(GlobalRecTy).getQuantity(); 2158 GlobalRecordSize = llvm::alignTo(GlobalRecordSize, Alignment); 2159 2160 llvm::PointerType *GlobalRecPtrTy = 2161 CGF.ConvertTypeForMem(GlobalRecTy)->getPointerTo(); 2162 llvm::Value *GlobalRecCastAddr; 2163 llvm::Value *IsTTD = nullptr; 2164 if (!IsInTTDRegion && 2165 (WithSPMDCheck || 2166 getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_Unknown)) { 2167 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit"); 2168 llvm::BasicBlock *SPMDBB = CGF.createBasicBlock(".spmd"); 2169 llvm::BasicBlock *NonSPMDBB = CGF.createBasicBlock(".non-spmd"); 2170 if (I->getSecond().SecondaryGlobalRecord.hasValue()) { 2171 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc); 2172 llvm::Value *ThreadID = getThreadID(CGF, Loc); 2173 llvm::Value *PL = CGF.EmitRuntimeCall( 2174 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_parallel_level), 2175 {RTLoc, ThreadID}); 2176 IsTTD = Bld.CreateIsNull(PL); 2177 } 2178 llvm::Value *IsSPMD = Bld.CreateIsNotNull(CGF.EmitNounwindRuntimeCall( 2179 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_is_spmd_exec_mode))); 2180 Bld.CreateCondBr(IsSPMD, SPMDBB, NonSPMDBB); 2181 // There is no need to emit line number for unconditional branch. 2182 (void)ApplyDebugLocation::CreateEmpty(CGF); 2183 CGF.EmitBlock(SPMDBB); 2184 Address RecPtr = Address(llvm::ConstantPointerNull::get(GlobalRecPtrTy), 2185 CharUnits::fromQuantity(Alignment)); 2186 CGF.EmitBranch(ExitBB); 2187 // There is no need to emit line number for unconditional branch. 2188 (void)ApplyDebugLocation::CreateEmpty(CGF); 2189 CGF.EmitBlock(NonSPMDBB); 2190 llvm::Value *Size = llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize); 2191 if (const RecordDecl *SecGlobalizedVarsRecord = 2192 I->getSecond().SecondaryGlobalRecord.getValueOr(nullptr)) { 2193 SecGlobalRecTy = 2194 CGM.getContext().getRecordType(SecGlobalizedVarsRecord); 2195 2196 // Recover pointer to this function's global record. The runtime will 2197 // handle the specifics of the allocation of the memory. 2198 // Use actual memory size of the record including the padding 2199 // for alignment purposes. 2200 unsigned Alignment = 2201 CGM.getContext().getTypeAlignInChars(SecGlobalRecTy).getQuantity(); 2202 unsigned GlobalRecordSize = 2203 CGM.getContext().getTypeSizeInChars(SecGlobalRecTy).getQuantity(); 2204 GlobalRecordSize = llvm::alignTo(GlobalRecordSize, Alignment); 2205 Size = Bld.CreateSelect( 2206 IsTTD, llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize), Size); 2207 } 2208 // TODO: allow the usage of shared memory to be controlled by 2209 // the user, for now, default to global. 2210 llvm::Value *GlobalRecordSizeArg[] = { 2211 Size, CGF.Builder.getInt16(/*UseSharedMemory=*/0)}; 2212 llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall( 2213 createNVPTXRuntimeFunction( 2214 OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack), 2215 GlobalRecordSizeArg); 2216 GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast( 2217 GlobalRecValue, GlobalRecPtrTy); 2218 CGF.EmitBlock(ExitBB); 2219 auto *Phi = Bld.CreatePHI(GlobalRecPtrTy, 2220 /*NumReservedValues=*/2, "_select_stack"); 2221 Phi->addIncoming(RecPtr.getPointer(), SPMDBB); 2222 Phi->addIncoming(GlobalRecCastAddr, NonSPMDBB); 2223 GlobalRecCastAddr = Phi; 2224 I->getSecond().GlobalRecordAddr = Phi; 2225 I->getSecond().IsInSPMDModeFlag = IsSPMD; 2226 } else if (!CGM.getLangOpts().OpenMPCUDATargetParallel && IsInTTDRegion) { 2227 assert(GlobalizedRecords.back().Records.size() < 2 && 2228 "Expected less than 2 globalized records: one for target and one " 2229 "for teams."); 2230 unsigned Offset = 0; 2231 for (const RecordDecl *RD : GlobalizedRecords.back().Records) { 2232 QualType RDTy = CGM.getContext().getRecordType(RD); 2233 unsigned Alignment = 2234 CGM.getContext().getTypeAlignInChars(RDTy).getQuantity(); 2235 unsigned Size = CGM.getContext().getTypeSizeInChars(RDTy).getQuantity(); 2236 Offset = 2237 llvm::alignTo(llvm::alignTo(Offset, Alignment) + Size, Alignment); 2238 } 2239 unsigned Alignment = 2240 CGM.getContext().getTypeAlignInChars(GlobalRecTy).getQuantity(); 2241 Offset = llvm::alignTo(Offset, Alignment); 2242 GlobalizedRecords.back().Records.push_back(GlobalizedVarsRecord); 2243 ++GlobalizedRecords.back().RegionCounter; 2244 if (GlobalizedRecords.back().Records.size() == 1) { 2245 assert(KernelStaticGlobalized && 2246 "Kernel static pointer must be initialized already."); 2247 auto *UseSharedMemory = new llvm::GlobalVariable( 2248 CGM.getModule(), CGM.Int16Ty, /*isConstant=*/true, 2249 llvm::GlobalValue::InternalLinkage, nullptr, 2250 "_openmp_static_kernel$is_shared"); 2251 UseSharedMemory->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 2252 QualType Int16Ty = CGM.getContext().getIntTypeForBitwidth( 2253 /*DestWidth=*/16, /*Signed=*/0); 2254 llvm::Value *IsInSharedMemory = CGF.EmitLoadOfScalar( 2255 Address(UseSharedMemory, 2256 CGM.getContext().getTypeAlignInChars(Int16Ty)), 2257 /*Volatile=*/false, Int16Ty, Loc); 2258 auto *StaticGlobalized = new llvm::GlobalVariable( 2259 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false, 2260 llvm::GlobalValue::CommonLinkage, nullptr); 2261 auto *RecSize = new llvm::GlobalVariable( 2262 CGM.getModule(), CGM.SizeTy, /*isConstant=*/true, 2263 llvm::GlobalValue::InternalLinkage, nullptr, 2264 "_openmp_static_kernel$size"); 2265 RecSize->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 2266 llvm::Value *Ld = CGF.EmitLoadOfScalar( 2267 Address(RecSize, CGM.getSizeAlign()), /*Volatile=*/false, 2268 CGM.getContext().getSizeType(), Loc); 2269 llvm::Value *ResAddr = Bld.CreatePointerBitCastOrAddrSpaceCast( 2270 KernelStaticGlobalized, CGM.VoidPtrPtrTy); 2271 llvm::Value *GlobalRecordSizeArg[] = { 2272 llvm::ConstantInt::get( 2273 CGM.Int16Ty, 2274 getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD ? 1 : 0), 2275 StaticGlobalized, Ld, IsInSharedMemory, ResAddr}; 2276 CGF.EmitRuntimeCall(createNVPTXRuntimeFunction( 2277 OMPRTL_NVPTX__kmpc_get_team_static_memory), 2278 GlobalRecordSizeArg); 2279 GlobalizedRecords.back().Buffer = StaticGlobalized; 2280 GlobalizedRecords.back().RecSize = RecSize; 2281 GlobalizedRecords.back().UseSharedMemory = UseSharedMemory; 2282 GlobalizedRecords.back().Loc = Loc; 2283 } 2284 assert(KernelStaticGlobalized && "Global address must be set already."); 2285 Address FrameAddr = CGF.EmitLoadOfPointer( 2286 Address(KernelStaticGlobalized, CGM.getPointerAlign()), 2287 CGM.getContext() 2288 .getPointerType(CGM.getContext().VoidPtrTy) 2289 .castAs<PointerType>()); 2290 llvm::Value *GlobalRecValue = 2291 Bld.CreateConstInBoundsGEP(FrameAddr, Offset).getPointer(); 2292 I->getSecond().GlobalRecordAddr = GlobalRecValue; 2293 I->getSecond().IsInSPMDModeFlag = nullptr; 2294 GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast( 2295 GlobalRecValue, CGF.ConvertTypeForMem(GlobalRecTy)->getPointerTo()); 2296 } else { 2297 // TODO: allow the usage of shared memory to be controlled by 2298 // the user, for now, default to global. 2299 bool UseSharedMemory = 2300 IsInTTDRegion && GlobalRecordSize <= SharedMemorySize; 2301 llvm::Value *GlobalRecordSizeArg[] = { 2302 llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize), 2303 CGF.Builder.getInt16(UseSharedMemory ? 1 : 0)}; 2304 llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall( 2305 createNVPTXRuntimeFunction( 2306 IsInTTDRegion 2307 ? OMPRTL_NVPTX__kmpc_data_sharing_push_stack 2308 : OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack), 2309 GlobalRecordSizeArg); 2310 GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast( 2311 GlobalRecValue, GlobalRecPtrTy); 2312 I->getSecond().GlobalRecordAddr = GlobalRecValue; 2313 I->getSecond().IsInSPMDModeFlag = nullptr; 2314 } 2315 LValue Base = 2316 CGF.MakeNaturalAlignPointeeAddrLValue(GlobalRecCastAddr, GlobalRecTy); 2317 2318 // Emit the "global alloca" which is a GEP from the global declaration 2319 // record using the pointer returned by the runtime. 2320 LValue SecBase; 2321 decltype(I->getSecond().LocalVarData)::const_iterator SecIt; 2322 if (IsTTD) { 2323 SecIt = I->getSecond().SecondaryLocalVarData->begin(); 2324 llvm::PointerType *SecGlobalRecPtrTy = 2325 CGF.ConvertTypeForMem(SecGlobalRecTy)->getPointerTo(); 2326 SecBase = CGF.MakeNaturalAlignPointeeAddrLValue( 2327 Bld.CreatePointerBitCastOrAddrSpaceCast( 2328 I->getSecond().GlobalRecordAddr, SecGlobalRecPtrTy), 2329 SecGlobalRecTy); 2330 } 2331 for (auto &Rec : I->getSecond().LocalVarData) { 2332 bool EscapedParam = I->getSecond().EscapedParameters.count(Rec.first); 2333 llvm::Value *ParValue; 2334 if (EscapedParam) { 2335 const auto *VD = cast<VarDecl>(Rec.first); 2336 LValue ParLVal = 2337 CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType()); 2338 ParValue = CGF.EmitLoadOfScalar(ParLVal, Loc); 2339 } 2340 LValue VarAddr = CGF.EmitLValueForField(Base, Rec.second.FD); 2341 // Emit VarAddr basing on lane-id if required. 2342 QualType VarTy; 2343 if (Rec.second.IsOnePerTeam) { 2344 VarTy = Rec.second.FD->getType(); 2345 } else { 2346 llvm::Value *Ptr = CGF.Builder.CreateInBoundsGEP( 2347 VarAddr.getAddress(CGF).getPointer(), 2348 {Bld.getInt32(0), getNVPTXLaneID(CGF)}); 2349 VarTy = 2350 Rec.second.FD->getType()->castAsArrayTypeUnsafe()->getElementType(); 2351 VarAddr = CGF.MakeAddrLValue( 2352 Address(Ptr, CGM.getContext().getDeclAlign(Rec.first)), VarTy, 2353 AlignmentSource::Decl); 2354 } 2355 Rec.second.PrivateAddr = VarAddr.getAddress(CGF); 2356 if (!IsInTTDRegion && 2357 (WithSPMDCheck || 2358 getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_Unknown)) { 2359 assert(I->getSecond().IsInSPMDModeFlag && 2360 "Expected unknown execution mode or required SPMD check."); 2361 if (IsTTD) { 2362 assert(SecIt->second.IsOnePerTeam && 2363 "Secondary glob data must be one per team."); 2364 LValue SecVarAddr = CGF.EmitLValueForField(SecBase, SecIt->second.FD); 2365 VarAddr.setAddress( 2366 Address(Bld.CreateSelect(IsTTD, SecVarAddr.getPointer(CGF), 2367 VarAddr.getPointer(CGF)), 2368 VarAddr.getAlignment())); 2369 Rec.second.PrivateAddr = VarAddr.getAddress(CGF); 2370 } 2371 Address GlobalPtr = Rec.second.PrivateAddr; 2372 Address LocalAddr = CGF.CreateMemTemp(VarTy, Rec.second.FD->getName()); 2373 Rec.second.PrivateAddr = Address( 2374 Bld.CreateSelect(I->getSecond().IsInSPMDModeFlag, 2375 LocalAddr.getPointer(), GlobalPtr.getPointer()), 2376 LocalAddr.getAlignment()); 2377 } 2378 if (EscapedParam) { 2379 const auto *VD = cast<VarDecl>(Rec.first); 2380 CGF.EmitStoreOfScalar(ParValue, VarAddr); 2381 I->getSecond().MappedParams->setVarAddr(CGF, VD, 2382 VarAddr.getAddress(CGF)); 2383 } 2384 if (IsTTD) 2385 ++SecIt; 2386 } 2387 } 2388 for (const ValueDecl *VD : I->getSecond().EscapedVariableLengthDecls) { 2389 // Recover pointer to this function's global record. The runtime will 2390 // handle the specifics of the allocation of the memory. 2391 // Use actual memory size of the record including the padding 2392 // for alignment purposes. 2393 CGBuilderTy &Bld = CGF.Builder; 2394 llvm::Value *Size = CGF.getTypeSize(VD->getType()); 2395 CharUnits Align = CGM.getContext().getDeclAlign(VD); 2396 Size = Bld.CreateNUWAdd( 2397 Size, llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity() - 1)); 2398 llvm::Value *AlignVal = 2399 llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity()); 2400 Size = Bld.CreateUDiv(Size, AlignVal); 2401 Size = Bld.CreateNUWMul(Size, AlignVal); 2402 // TODO: allow the usage of shared memory to be controlled by 2403 // the user, for now, default to global. 2404 llvm::Value *GlobalRecordSizeArg[] = { 2405 Size, CGF.Builder.getInt16(/*UseSharedMemory=*/0)}; 2406 llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall( 2407 createNVPTXRuntimeFunction( 2408 OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack), 2409 GlobalRecordSizeArg); 2410 llvm::Value *GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast( 2411 GlobalRecValue, CGF.ConvertTypeForMem(VD->getType())->getPointerTo()); 2412 LValue Base = CGF.MakeAddrLValue(GlobalRecCastAddr, VD->getType(), 2413 CGM.getContext().getDeclAlign(VD), 2414 AlignmentSource::Decl); 2415 I->getSecond().MappedParams->setVarAddr(CGF, cast<VarDecl>(VD), 2416 Base.getAddress(CGF)); 2417 I->getSecond().EscapedVariableLengthDeclsAddrs.emplace_back(GlobalRecValue); 2418 } 2419 I->getSecond().MappedParams->apply(CGF); 2420} 2421 2422void CGOpenMPRuntimeNVPTX::emitGenericVarsEpilog(CodeGenFunction &CGF, 2423 bool WithSPMDCheck) { 2424 if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic && 2425 getExecutionMode() != CGOpenMPRuntimeNVPTX::EM_SPMD) 2426 return; 2427 2428 const auto I = FunctionGlobalizedDecls.find(CGF.CurFn); 2429 if (I != FunctionGlobalizedDecls.end()) { 2430 I->getSecond().MappedParams->restore(CGF); 2431 if (!CGF.HaveInsertPoint()) 2432 return; 2433 for (llvm::Value *Addr : 2434 llvm::reverse(I->getSecond().EscapedVariableLengthDeclsAddrs)) { 2435 CGF.EmitRuntimeCall( 2436 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_data_sharing_pop_stack), 2437 Addr); 2438 } 2439 if (I->getSecond().GlobalRecordAddr) { 2440 if (!IsInTTDRegion && 2441 (WithSPMDCheck || 2442 getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_Unknown)) { 2443 CGBuilderTy &Bld = CGF.Builder; 2444 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit"); 2445 llvm::BasicBlock *NonSPMDBB = CGF.createBasicBlock(".non-spmd"); 2446 Bld.CreateCondBr(I->getSecond().IsInSPMDModeFlag, ExitBB, NonSPMDBB); 2447 // There is no need to emit line number for unconditional branch. 2448 (void)ApplyDebugLocation::CreateEmpty(CGF); 2449 CGF.EmitBlock(NonSPMDBB); 2450 CGF.EmitRuntimeCall( 2451 createNVPTXRuntimeFunction( 2452 OMPRTL_NVPTX__kmpc_data_sharing_pop_stack), 2453 CGF.EmitCastToVoidPtr(I->getSecond().GlobalRecordAddr)); 2454 CGF.EmitBlock(ExitBB); 2455 } else if (!CGM.getLangOpts().OpenMPCUDATargetParallel && IsInTTDRegion) { 2456 assert(GlobalizedRecords.back().RegionCounter > 0 && 2457 "region counter must be > 0."); 2458 --GlobalizedRecords.back().RegionCounter; 2459 // Emit the restore function only in the target region. 2460 if (GlobalizedRecords.back().RegionCounter == 0) { 2461 QualType Int16Ty = CGM.getContext().getIntTypeForBitwidth( 2462 /*DestWidth=*/16, /*Signed=*/0); 2463 llvm::Value *IsInSharedMemory = CGF.EmitLoadOfScalar( 2464 Address(GlobalizedRecords.back().UseSharedMemory, 2465 CGM.getContext().getTypeAlignInChars(Int16Ty)), 2466 /*Volatile=*/false, Int16Ty, GlobalizedRecords.back().Loc); 2467 llvm::Value *Args[] = { 2468 llvm::ConstantInt::get( 2469 CGM.Int16Ty, 2470 getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD ? 1 : 0), 2471 IsInSharedMemory}; 2472 CGF.EmitRuntimeCall( 2473 createNVPTXRuntimeFunction( 2474 OMPRTL_NVPTX__kmpc_restore_team_static_memory), 2475 Args); 2476 } 2477 } else { 2478 CGF.EmitRuntimeCall(createNVPTXRuntimeFunction( 2479 OMPRTL_NVPTX__kmpc_data_sharing_pop_stack), 2480 I->getSecond().GlobalRecordAddr); 2481 } 2482 } 2483 } 2484} 2485 2486void CGOpenMPRuntimeNVPTX::emitTeamsCall(CodeGenFunction &CGF, 2487 const OMPExecutableDirective &D, 2488 SourceLocation Loc, 2489 llvm::Function *OutlinedFn, 2490 ArrayRef<llvm::Value *> CapturedVars) { 2491 if (!CGF.HaveInsertPoint()) 2492 return; 2493 2494 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty, 2495 /*Name=*/".zero.addr"); 2496 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0)); 2497 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs; 2498 OutlinedFnArgs.push_back(emitThreadIDAddress(CGF, Loc).getPointer()); 2499 OutlinedFnArgs.push_back(ZeroAddr.getPointer()); 2500 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end()); 2501 emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs); 2502} 2503 2504void CGOpenMPRuntimeNVPTX::emitParallelCall( 2505 CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn, 2506 ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) { 2507 if (!CGF.HaveInsertPoint()) 2508 return; 2509 2510 if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD) 2511 emitSPMDParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond); 2512 else 2513 emitNonSPMDParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond); 2514} 2515 2516void CGOpenMPRuntimeNVPTX::emitNonSPMDParallelCall( 2517 CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn, 2518 ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) { 2519 llvm::Function *Fn = cast<llvm::Function>(OutlinedFn); 2520 2521 // Force inline this outlined function at its call site. 2522 Fn->setLinkage(llvm::GlobalValue::InternalLinkage); 2523 2524 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty, 2525 /*Name=*/".zero.addr"); 2526 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0)); 2527 // ThreadId for serialized parallels is 0. 2528 Address ThreadIDAddr = ZeroAddr; 2529 auto &&CodeGen = [this, Fn, CapturedVars, Loc, &ThreadIDAddr]( 2530 CodeGenFunction &CGF, PrePostActionTy &Action) { 2531 Action.Enter(CGF); 2532 2533 Address ZeroAddr = 2534 CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty, 2535 /*Name=*/".bound.zero.addr"); 2536 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0)); 2537 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs; 2538 OutlinedFnArgs.push_back(ThreadIDAddr.getPointer()); 2539 OutlinedFnArgs.push_back(ZeroAddr.getPointer()); 2540 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end()); 2541 emitOutlinedFunctionCall(CGF, Loc, Fn, OutlinedFnArgs); 2542 }; 2543 auto &&SeqGen = [this, &CodeGen, Loc](CodeGenFunction &CGF, 2544 PrePostActionTy &) { 2545 2546 RegionCodeGenTy RCG(CodeGen); 2547 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc); 2548 llvm::Value *ThreadID = getThreadID(CGF, Loc); 2549 llvm::Value *Args[] = {RTLoc, ThreadID}; 2550 2551 NVPTXActionTy Action( 2552 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_serialized_parallel), 2553 Args, 2554 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_serialized_parallel), 2555 Args); 2556 RCG.setAction(Action); 2557 RCG(CGF); 2558 }; 2559 2560 auto &&L0ParallelGen = [this, CapturedVars, Fn](CodeGenFunction &CGF, 2561 PrePostActionTy &Action) { 2562 CGBuilderTy &Bld = CGF.Builder; 2563 llvm::Function *WFn = WrapperFunctionsMap[Fn]; 2564 assert(WFn && "Wrapper function does not exist!"); 2565 llvm::Value *ID = Bld.CreateBitOrPointerCast(WFn, CGM.Int8PtrTy); 2566 2567 // Prepare for parallel region. Indicate the outlined function. 2568 llvm::Value *Args[] = {ID}; 2569 CGF.EmitRuntimeCall( 2570 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_prepare_parallel), 2571 Args); 2572 2573 // Create a private scope that will globalize the arguments 2574 // passed from the outside of the target region. 2575 CodeGenFunction::OMPPrivateScope PrivateArgScope(CGF); 2576 2577 // There's something to share. 2578 if (!CapturedVars.empty()) { 2579 // Prepare for parallel region. Indicate the outlined function. 2580 Address SharedArgs = 2581 CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "shared_arg_refs"); 2582 llvm::Value *SharedArgsPtr = SharedArgs.getPointer(); 2583 2584 llvm::Value *DataSharingArgs[] = { 2585 SharedArgsPtr, 2586 llvm::ConstantInt::get(CGM.SizeTy, CapturedVars.size())}; 2587 CGF.EmitRuntimeCall(createNVPTXRuntimeFunction( 2588 OMPRTL_NVPTX__kmpc_begin_sharing_variables), 2589 DataSharingArgs); 2590 2591 // Store variable address in a list of references to pass to workers. 2592 unsigned Idx = 0; 2593 ASTContext &Ctx = CGF.getContext(); 2594 Address SharedArgListAddress = CGF.EmitLoadOfPointer( 2595 SharedArgs, Ctx.getPointerType(Ctx.getPointerType(Ctx.VoidPtrTy)) 2596 .castAs<PointerType>()); 2597 for (llvm::Value *V : CapturedVars) { 2598 Address Dst = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx); 2599 llvm::Value *PtrV; 2600 if (V->getType()->isIntegerTy()) 2601 PtrV = Bld.CreateIntToPtr(V, CGF.VoidPtrTy); 2602 else 2603 PtrV = Bld.CreatePointerBitCastOrAddrSpaceCast(V, CGF.VoidPtrTy); 2604 CGF.EmitStoreOfScalar(PtrV, Dst, /*Volatile=*/false, 2605 Ctx.getPointerType(Ctx.VoidPtrTy)); 2606 ++Idx; 2607 } 2608 } 2609 2610 // Activate workers. This barrier is used by the master to signal 2611 // work for the workers. 2612 syncCTAThreads(CGF); 2613 2614 // OpenMP [2.5, Parallel Construct, p.49] 2615 // There is an implied barrier at the end of a parallel region. After the 2616 // end of a parallel region, only the master thread of the team resumes 2617 // execution of the enclosing task region. 2618 // 2619 // The master waits at this barrier until all workers are done. 2620 syncCTAThreads(CGF); 2621 2622 if (!CapturedVars.empty()) 2623 CGF.EmitRuntimeCall( 2624 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_sharing_variables)); 2625 2626 // Remember for post-processing in worker loop. 2627 Work.emplace_back(WFn); 2628 }; 2629 2630 auto &&LNParallelGen = [this, Loc, &SeqGen, &L0ParallelGen]( 2631 CodeGenFunction &CGF, PrePostActionTy &Action) { 2632 if (IsInParallelRegion) { 2633 SeqGen(CGF, Action); 2634 } else if (IsInTargetMasterThreadRegion) { 2635 L0ParallelGen(CGF, Action); 2636 } else { 2637 // Check for master and then parallelism: 2638 // if (__kmpc_is_spmd_exec_mode() || __kmpc_parallel_level(loc, gtid)) { 2639 // Serialized execution. 2640 // } else { 2641 // Worker call. 2642 // } 2643 CGBuilderTy &Bld = CGF.Builder; 2644 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit"); 2645 llvm::BasicBlock *SeqBB = CGF.createBasicBlock(".sequential"); 2646 llvm::BasicBlock *ParallelCheckBB = CGF.createBasicBlock(".parcheck"); 2647 llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master"); 2648 llvm::Value *IsSPMD = Bld.CreateIsNotNull(CGF.EmitNounwindRuntimeCall( 2649 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_is_spmd_exec_mode))); 2650 Bld.CreateCondBr(IsSPMD, SeqBB, ParallelCheckBB); 2651 // There is no need to emit line number for unconditional branch. 2652 (void)ApplyDebugLocation::CreateEmpty(CGF); 2653 CGF.EmitBlock(ParallelCheckBB); 2654 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc); 2655 llvm::Value *ThreadID = getThreadID(CGF, Loc); 2656 llvm::Value *PL = CGF.EmitRuntimeCall( 2657 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_parallel_level), 2658 {RTLoc, ThreadID}); 2659 llvm::Value *Res = Bld.CreateIsNotNull(PL); 2660 Bld.CreateCondBr(Res, SeqBB, MasterBB); 2661 CGF.EmitBlock(SeqBB); 2662 SeqGen(CGF, Action); 2663 CGF.EmitBranch(ExitBB); 2664 // There is no need to emit line number for unconditional branch. 2665 (void)ApplyDebugLocation::CreateEmpty(CGF); 2666 CGF.EmitBlock(MasterBB); 2667 L0ParallelGen(CGF, Action); 2668 CGF.EmitBranch(ExitBB); 2669 // There is no need to emit line number for unconditional branch. 2670 (void)ApplyDebugLocation::CreateEmpty(CGF); 2671 // Emit the continuation block for code after the if. 2672 CGF.EmitBlock(ExitBB, /*IsFinished=*/true); 2673 } 2674 }; 2675 2676 if (IfCond) { 2677 emitIfClause(CGF, IfCond, LNParallelGen, SeqGen); 2678 } else { 2679 CodeGenFunction::RunCleanupsScope Scope(CGF); 2680 RegionCodeGenTy ThenRCG(LNParallelGen); 2681 ThenRCG(CGF); 2682 } 2683} 2684 2685void CGOpenMPRuntimeNVPTX::emitSPMDParallelCall( 2686 CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn, 2687 ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) { 2688 // Just call the outlined function to execute the parallel region. 2689 // OutlinedFn(>id, &zero, CapturedStruct); 2690 // 2691 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs; 2692 2693 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty, 2694 /*Name=*/".zero.addr"); 2695 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0)); 2696 // ThreadId for serialized parallels is 0. 2697 Address ThreadIDAddr = ZeroAddr; 2698 auto &&CodeGen = [this, OutlinedFn, CapturedVars, Loc, &ThreadIDAddr]( 2699 CodeGenFunction &CGF, PrePostActionTy &Action) { 2700 Action.Enter(CGF); 2701 2702 Address ZeroAddr = 2703 CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty, 2704 /*Name=*/".bound.zero.addr"); 2705 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0)); 2706 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs; 2707 OutlinedFnArgs.push_back(ThreadIDAddr.getPointer()); 2708 OutlinedFnArgs.push_back(ZeroAddr.getPointer()); 2709 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end()); 2710 emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs); 2711 }; 2712 auto &&SeqGen = [this, &CodeGen, Loc](CodeGenFunction &CGF, 2713 PrePostActionTy &) { 2714 2715 RegionCodeGenTy RCG(CodeGen); 2716 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc); 2717 llvm::Value *ThreadID = getThreadID(CGF, Loc); 2718 llvm::Value *Args[] = {RTLoc, ThreadID}; 2719 2720 NVPTXActionTy Action( 2721 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_serialized_parallel), 2722 Args, 2723 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_serialized_parallel), 2724 Args); 2725 RCG.setAction(Action); 2726 RCG(CGF); 2727 }; 2728 2729 if (IsInTargetMasterThreadRegion) { 2730 // In the worker need to use the real thread id. 2731 ThreadIDAddr = emitThreadIDAddress(CGF, Loc); 2732 RegionCodeGenTy RCG(CodeGen); 2733 RCG(CGF); 2734 } else { 2735 // If we are not in the target region, it is definitely L2 parallelism or 2736 // more, because for SPMD mode we always has L1 parallel level, sowe don't 2737 // need to check for orphaned directives. 2738 RegionCodeGenTy RCG(SeqGen); 2739 RCG(CGF); 2740 } 2741} 2742 2743void CGOpenMPRuntimeNVPTX::syncCTAThreads(CodeGenFunction &CGF) { 2744 // Always emit simple barriers! 2745 if (!CGF.HaveInsertPoint()) 2746 return; 2747 // Build call __kmpc_barrier_simple_spmd(nullptr, 0); 2748 // This function does not use parameters, so we can emit just default values. 2749 llvm::Value *Args[] = { 2750 llvm::ConstantPointerNull::get( 2751 cast<llvm::PointerType>(getIdentTyPointerTy())), 2752 llvm::ConstantInt::get(CGF.Int32Ty, /*V=*/0, /*isSigned=*/true)}; 2753 llvm::CallInst *Call = CGF.EmitRuntimeCall( 2754 createNVPTXRuntimeFunction(OMPRTL__kmpc_barrier_simple_spmd), Args); 2755 Call->setConvergent(); 2756} 2757 2758void CGOpenMPRuntimeNVPTX::emitBarrierCall(CodeGenFunction &CGF, 2759 SourceLocation Loc, 2760 OpenMPDirectiveKind Kind, bool, 2761 bool) { 2762 // Always emit simple barriers! 2763 if (!CGF.HaveInsertPoint()) 2764 return; 2765 // Build call __kmpc_cancel_barrier(loc, thread_id); 2766 unsigned Flags = getDefaultFlagsForBarriers(Kind); 2767 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags), 2768 getThreadID(CGF, Loc)}; 2769 llvm::CallInst *Call = CGF.EmitRuntimeCall( 2770 createNVPTXRuntimeFunction(OMPRTL__kmpc_barrier), Args); 2771 Call->setConvergent(); 2772} 2773 2774void CGOpenMPRuntimeNVPTX::emitCriticalRegion( 2775 CodeGenFunction &CGF, StringRef CriticalName, 2776 const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, 2777 const Expr *Hint) { 2778 llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.critical.loop"); 2779 llvm::BasicBlock *TestBB = CGF.createBasicBlock("omp.critical.test"); 2780 llvm::BasicBlock *SyncBB = CGF.createBasicBlock("omp.critical.sync"); 2781 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.critical.body"); 2782 llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.critical.exit"); 2783 2784 // Get the mask of active threads in the warp. 2785 llvm::Value *Mask = CGF.EmitRuntimeCall( 2786 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_warp_active_thread_mask)); 2787 // Fetch team-local id of the thread. 2788 llvm::Value *ThreadID = getNVPTXThreadID(CGF); 2789 2790 // Get the width of the team. 2791 llvm::Value *TeamWidth = getNVPTXNumThreads(CGF); 2792 2793 // Initialize the counter variable for the loop. 2794 QualType Int32Ty = 2795 CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/0); 2796 Address Counter = CGF.CreateMemTemp(Int32Ty, "critical_counter"); 2797 LValue CounterLVal = CGF.MakeAddrLValue(Counter, Int32Ty); 2798 CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.Int32Ty), CounterLVal, 2799 /*isInit=*/true); 2800 2801 // Block checks if loop counter exceeds upper bound. 2802 CGF.EmitBlock(LoopBB); 2803 llvm::Value *CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc); 2804 llvm::Value *CmpLoopBound = CGF.Builder.CreateICmpSLT(CounterVal, TeamWidth); 2805 CGF.Builder.CreateCondBr(CmpLoopBound, TestBB, ExitBB); 2806 2807 // Block tests which single thread should execute region, and which threads 2808 // should go straight to synchronisation point. 2809 CGF.EmitBlock(TestBB); 2810 CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc); 2811 llvm::Value *CmpThreadToCounter = 2812 CGF.Builder.CreateICmpEQ(ThreadID, CounterVal); 2813 CGF.Builder.CreateCondBr(CmpThreadToCounter, BodyBB, SyncBB); 2814 2815 // Block emits the body of the critical region. 2816 CGF.EmitBlock(BodyBB); 2817 2818 // Output the critical statement. 2819 CGOpenMPRuntime::emitCriticalRegion(CGF, CriticalName, CriticalOpGen, Loc, 2820 Hint); 2821 2822 // After the body surrounded by the critical region, the single executing 2823 // thread will jump to the synchronisation point. 2824 // Block waits for all threads in current team to finish then increments the 2825 // counter variable and returns to the loop. 2826 CGF.EmitBlock(SyncBB); 2827 // Reconverge active threads in the warp. 2828 (void)CGF.EmitRuntimeCall( 2829 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_syncwarp), Mask); 2830 2831 llvm::Value *IncCounterVal = 2832 CGF.Builder.CreateNSWAdd(CounterVal, CGF.Builder.getInt32(1)); 2833 CGF.EmitStoreOfScalar(IncCounterVal, CounterLVal); 2834 CGF.EmitBranch(LoopBB); 2835 2836 // Block that is reached when all threads in the team complete the region. 2837 CGF.EmitBlock(ExitBB, /*IsFinished=*/true); 2838} 2839 2840/// Cast value to the specified type. 2841static llvm::Value *castValueToType(CodeGenFunction &CGF, llvm::Value *Val, 2842 QualType ValTy, QualType CastTy, 2843 SourceLocation Loc) { 2844 assert(!CGF.getContext().getTypeSizeInChars(CastTy).isZero() && 2845 "Cast type must sized."); 2846 assert(!CGF.getContext().getTypeSizeInChars(ValTy).isZero() && 2847 "Val type must sized."); 2848 llvm::Type *LLVMCastTy = CGF.ConvertTypeForMem(CastTy); 2849 if (ValTy == CastTy) 2850 return Val; 2851 if (CGF.getContext().getTypeSizeInChars(ValTy) == 2852 CGF.getContext().getTypeSizeInChars(CastTy)) 2853 return CGF.Builder.CreateBitCast(Val, LLVMCastTy); 2854 if (CastTy->isIntegerType() && ValTy->isIntegerType()) 2855 return CGF.Builder.CreateIntCast(Val, LLVMCastTy, 2856 CastTy->hasSignedIntegerRepresentation()); 2857 Address CastItem = CGF.CreateMemTemp(CastTy); 2858 Address ValCastItem = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 2859 CastItem, Val->getType()->getPointerTo(CastItem.getAddressSpace())); 2860 CGF.EmitStoreOfScalar(Val, ValCastItem, /*Volatile=*/false, ValTy, 2861 LValueBaseInfo(AlignmentSource::Type), 2862 TBAAAccessInfo()); 2863 return CGF.EmitLoadOfScalar(CastItem, /*Volatile=*/false, CastTy, Loc, 2864 LValueBaseInfo(AlignmentSource::Type), 2865 TBAAAccessInfo()); 2866} 2867 2868/// This function creates calls to one of two shuffle functions to copy 2869/// variables between lanes in a warp. 2870static llvm::Value *createRuntimeShuffleFunction(CodeGenFunction &CGF, 2871 llvm::Value *Elem, 2872 QualType ElemType, 2873 llvm::Value *Offset, 2874 SourceLocation Loc) { 2875 CodeGenModule &CGM = CGF.CGM; 2876 CGBuilderTy &Bld = CGF.Builder; 2877 CGOpenMPRuntimeNVPTX &RT = 2878 *(static_cast<CGOpenMPRuntimeNVPTX *>(&CGM.getOpenMPRuntime())); 2879 2880 CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType); 2881 assert(Size.getQuantity() <= 8 && 2882 "Unsupported bitwidth in shuffle instruction."); 2883 2884 OpenMPRTLFunctionNVPTX ShuffleFn = Size.getQuantity() <= 4 2885 ? OMPRTL_NVPTX__kmpc_shuffle_int32 2886 : OMPRTL_NVPTX__kmpc_shuffle_int64; 2887 2888 // Cast all types to 32- or 64-bit values before calling shuffle routines. 2889 QualType CastTy = CGF.getContext().getIntTypeForBitwidth( 2890 Size.getQuantity() <= 4 ? 32 : 64, /*Signed=*/1); 2891 llvm::Value *ElemCast = castValueToType(CGF, Elem, ElemType, CastTy, Loc); 2892 llvm::Value *WarpSize = 2893 Bld.CreateIntCast(getNVPTXWarpSize(CGF), CGM.Int16Ty, /*isSigned=*/true); 2894 2895 llvm::Value *ShuffledVal = CGF.EmitRuntimeCall( 2896 RT.createNVPTXRuntimeFunction(ShuffleFn), {ElemCast, Offset, WarpSize}); 2897 2898 return castValueToType(CGF, ShuffledVal, CastTy, ElemType, Loc); 2899} 2900 2901static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr, 2902 Address DestAddr, QualType ElemType, 2903 llvm::Value *Offset, SourceLocation Loc) { 2904 CGBuilderTy &Bld = CGF.Builder; 2905 2906 CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType); 2907 // Create the loop over the big sized data. 2908 // ptr = (void*)Elem; 2909 // ptrEnd = (void*) Elem + 1; 2910 // Step = 8; 2911 // while (ptr + Step < ptrEnd) 2912 // shuffle((int64_t)*ptr); 2913 // Step = 4; 2914 // while (ptr + Step < ptrEnd) 2915 // shuffle((int32_t)*ptr); 2916 // ... 2917 Address ElemPtr = DestAddr; 2918 Address Ptr = SrcAddr; 2919 Address PtrEnd = Bld.CreatePointerBitCastOrAddrSpaceCast( 2920 Bld.CreateConstGEP(SrcAddr, 1), CGF.VoidPtrTy); 2921 for (int IntSize = 8; IntSize >= 1; IntSize /= 2) { 2922 if (Size < CharUnits::fromQuantity(IntSize)) 2923 continue; 2924 QualType IntType = CGF.getContext().getIntTypeForBitwidth( 2925 CGF.getContext().toBits(CharUnits::fromQuantity(IntSize)), 2926 /*Signed=*/1); 2927 llvm::Type *IntTy = CGF.ConvertTypeForMem(IntType); 2928 Ptr = Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr, IntTy->getPointerTo()); 2929 ElemPtr = 2930 Bld.CreatePointerBitCastOrAddrSpaceCast(ElemPtr, IntTy->getPointerTo()); 2931 if (Size.getQuantity() / IntSize > 1) { 2932 llvm::BasicBlock *PreCondBB = CGF.createBasicBlock(".shuffle.pre_cond"); 2933 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".shuffle.then"); 2934 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".shuffle.exit"); 2935 llvm::BasicBlock *CurrentBB = Bld.GetInsertBlock(); 2936 CGF.EmitBlock(PreCondBB); 2937 llvm::PHINode *PhiSrc = 2938 Bld.CreatePHI(Ptr.getType(), /*NumReservedValues=*/2); 2939 PhiSrc->addIncoming(Ptr.getPointer(), CurrentBB); 2940 llvm::PHINode *PhiDest = 2941 Bld.CreatePHI(ElemPtr.getType(), /*NumReservedValues=*/2); 2942 PhiDest->addIncoming(ElemPtr.getPointer(), CurrentBB); 2943 Ptr = Address(PhiSrc, Ptr.getAlignment()); 2944 ElemPtr = Address(PhiDest, ElemPtr.getAlignment()); 2945 llvm::Value *PtrDiff = Bld.CreatePtrDiff( 2946 PtrEnd.getPointer(), Bld.CreatePointerBitCastOrAddrSpaceCast( 2947 Ptr.getPointer(), CGF.VoidPtrTy)); 2948 Bld.CreateCondBr(Bld.CreateICmpSGT(PtrDiff, Bld.getInt64(IntSize - 1)), 2949 ThenBB, ExitBB); 2950 CGF.EmitBlock(ThenBB); 2951 llvm::Value *Res = createRuntimeShuffleFunction( 2952 CGF, 2953 CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc, 2954 LValueBaseInfo(AlignmentSource::Type), 2955 TBAAAccessInfo()), 2956 IntType, Offset, Loc); 2957 CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType, 2958 LValueBaseInfo(AlignmentSource::Type), 2959 TBAAAccessInfo()); 2960 Address LocalPtr = Bld.CreateConstGEP(Ptr, 1); 2961 Address LocalElemPtr = Bld.CreateConstGEP(ElemPtr, 1); 2962 PhiSrc->addIncoming(LocalPtr.getPointer(), ThenBB); 2963 PhiDest->addIncoming(LocalElemPtr.getPointer(), ThenBB); 2964 CGF.EmitBranch(PreCondBB); 2965 CGF.EmitBlock(ExitBB); 2966 } else { 2967 llvm::Value *Res = createRuntimeShuffleFunction( 2968 CGF, 2969 CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc, 2970 LValueBaseInfo(AlignmentSource::Type), 2971 TBAAAccessInfo()), 2972 IntType, Offset, Loc); 2973 CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType, 2974 LValueBaseInfo(AlignmentSource::Type), 2975 TBAAAccessInfo()); 2976 Ptr = Bld.CreateConstGEP(Ptr, 1); 2977 ElemPtr = Bld.CreateConstGEP(ElemPtr, 1); 2978 } 2979 Size = Size % IntSize; 2980 } 2981} 2982 2983namespace { 2984enum CopyAction : unsigned { 2985 // RemoteLaneToThread: Copy over a Reduce list from a remote lane in 2986 // the warp using shuffle instructions. 2987 RemoteLaneToThread, 2988 // ThreadCopy: Make a copy of a Reduce list on the thread's stack. 2989 ThreadCopy, 2990 // ThreadToScratchpad: Copy a team-reduced array to the scratchpad. 2991 ThreadToScratchpad, 2992 // ScratchpadToThread: Copy from a scratchpad array in global memory 2993 // containing team-reduced data to a thread's stack. 2994 ScratchpadToThread, 2995}; 2996} // namespace 2997 2998struct CopyOptionsTy { 2999 llvm::Value *RemoteLaneOffset; 3000 llvm::Value *ScratchpadIndex; 3001 llvm::Value *ScratchpadWidth; 3002}; 3003 3004/// Emit instructions to copy a Reduce list, which contains partially 3005/// aggregated values, in the specified direction. 3006static void emitReductionListCopy( 3007 CopyAction Action, CodeGenFunction &CGF, QualType ReductionArrayTy, 3008 ArrayRef<const Expr *> Privates, Address SrcBase, Address DestBase, 3009 CopyOptionsTy CopyOptions = {nullptr, nullptr, nullptr}) { 3010 3011 CodeGenModule &CGM = CGF.CGM; 3012 ASTContext &C = CGM.getContext(); 3013 CGBuilderTy &Bld = CGF.Builder; 3014 3015 llvm::Value *RemoteLaneOffset = CopyOptions.RemoteLaneOffset; 3016 llvm::Value *ScratchpadIndex = CopyOptions.ScratchpadIndex; 3017 llvm::Value *ScratchpadWidth = CopyOptions.ScratchpadWidth; 3018 3019 // Iterates, element-by-element, through the source Reduce list and 3020 // make a copy. 3021 unsigned Idx = 0; 3022 unsigned Size = Privates.size(); 3023 for (const Expr *Private : Privates) { 3024 Address SrcElementAddr = Address::invalid(); 3025 Address DestElementAddr = Address::invalid(); 3026 Address DestElementPtrAddr = Address::invalid(); 3027 // Should we shuffle in an element from a remote lane? 3028 bool ShuffleInElement = false; 3029 // Set to true to update the pointer in the dest Reduce list to a 3030 // newly created element. 3031 bool UpdateDestListPtr = false; 3032 // Increment the src or dest pointer to the scratchpad, for each 3033 // new element. 3034 bool IncrScratchpadSrc = false; 3035 bool IncrScratchpadDest = false; 3036 3037 switch (Action) { 3038 case RemoteLaneToThread: { 3039 // Step 1.1: Get the address for the src element in the Reduce list. 3040 Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx); 3041 SrcElementAddr = CGF.EmitLoadOfPointer( 3042 SrcElementPtrAddr, 3043 C.getPointerType(Private->getType())->castAs<PointerType>()); 3044 3045 // Step 1.2: Create a temporary to store the element in the destination 3046 // Reduce list. 3047 DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx); 3048 DestElementAddr = 3049 CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element"); 3050 ShuffleInElement = true; 3051 UpdateDestListPtr = true; 3052 break; 3053 } 3054 case ThreadCopy: { 3055 // Step 1.1: Get the address for the src element in the Reduce list. 3056 Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx); 3057 SrcElementAddr = CGF.EmitLoadOfPointer( 3058 SrcElementPtrAddr, 3059 C.getPointerType(Private->getType())->castAs<PointerType>()); 3060 3061 // Step 1.2: Get the address for dest element. The destination 3062 // element has already been created on the thread's stack. 3063 DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx); 3064 DestElementAddr = CGF.EmitLoadOfPointer( 3065 DestElementPtrAddr, 3066 C.getPointerType(Private->getType())->castAs<PointerType>()); 3067 break; 3068 } 3069 case ThreadToScratchpad: { 3070 // Step 1.1: Get the address for the src element in the Reduce list. 3071 Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx); 3072 SrcElementAddr = CGF.EmitLoadOfPointer( 3073 SrcElementPtrAddr, 3074 C.getPointerType(Private->getType())->castAs<PointerType>()); 3075 3076 // Step 1.2: Get the address for dest element: 3077 // address = base + index * ElementSizeInChars. 3078 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType()); 3079 llvm::Value *CurrentOffset = 3080 Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex); 3081 llvm::Value *ScratchPadElemAbsolutePtrVal = 3082 Bld.CreateNUWAdd(DestBase.getPointer(), CurrentOffset); 3083 ScratchPadElemAbsolutePtrVal = 3084 Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy); 3085 DestElementAddr = Address(ScratchPadElemAbsolutePtrVal, 3086 C.getTypeAlignInChars(Private->getType())); 3087 IncrScratchpadDest = true; 3088 break; 3089 } 3090 case ScratchpadToThread: { 3091 // Step 1.1: Get the address for the src element in the scratchpad. 3092 // address = base + index * ElementSizeInChars. 3093 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType()); 3094 llvm::Value *CurrentOffset = 3095 Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex); 3096 llvm::Value *ScratchPadElemAbsolutePtrVal = 3097 Bld.CreateNUWAdd(SrcBase.getPointer(), CurrentOffset); 3098 ScratchPadElemAbsolutePtrVal = 3099 Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy); 3100 SrcElementAddr = Address(ScratchPadElemAbsolutePtrVal, 3101 C.getTypeAlignInChars(Private->getType())); 3102 IncrScratchpadSrc = true; 3103 3104 // Step 1.2: Create a temporary to store the element in the destination 3105 // Reduce list. 3106 DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx); 3107 DestElementAddr = 3108 CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element"); 3109 UpdateDestListPtr = true; 3110 break; 3111 } 3112 } 3113 3114 // Regardless of src and dest of copy, we emit the load of src 3115 // element as this is required in all directions 3116 SrcElementAddr = Bld.CreateElementBitCast( 3117 SrcElementAddr, CGF.ConvertTypeForMem(Private->getType())); 3118 DestElementAddr = Bld.CreateElementBitCast(DestElementAddr, 3119 SrcElementAddr.getElementType()); 3120 3121 // Now that all active lanes have read the element in the 3122 // Reduce list, shuffle over the value from the remote lane. 3123 if (ShuffleInElement) { 3124 shuffleAndStore(CGF, SrcElementAddr, DestElementAddr, Private->getType(), 3125 RemoteLaneOffset, Private->getExprLoc()); 3126 } else { 3127 switch (CGF.getEvaluationKind(Private->getType())) { 3128 case TEK_Scalar: { 3129 llvm::Value *Elem = CGF.EmitLoadOfScalar( 3130 SrcElementAddr, /*Volatile=*/false, Private->getType(), 3131 Private->getExprLoc(), LValueBaseInfo(AlignmentSource::Type), 3132 TBAAAccessInfo()); 3133 // Store the source element value to the dest element address. 3134 CGF.EmitStoreOfScalar( 3135 Elem, DestElementAddr, /*Volatile=*/false, Private->getType(), 3136 LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()); 3137 break; 3138 } 3139 case TEK_Complex: { 3140 CodeGenFunction::ComplexPairTy Elem = CGF.EmitLoadOfComplex( 3141 CGF.MakeAddrLValue(SrcElementAddr, Private->getType()), 3142 Private->getExprLoc()); 3143 CGF.EmitStoreOfComplex( 3144 Elem, CGF.MakeAddrLValue(DestElementAddr, Private->getType()), 3145 /*isInit=*/false); 3146 break; 3147 } 3148 case TEK_Aggregate: 3149 CGF.EmitAggregateCopy( 3150 CGF.MakeAddrLValue(DestElementAddr, Private->getType()), 3151 CGF.MakeAddrLValue(SrcElementAddr, Private->getType()), 3152 Private->getType(), AggValueSlot::DoesNotOverlap); 3153 break; 3154 } 3155 } 3156 3157 // Step 3.1: Modify reference in dest Reduce list as needed. 3158 // Modifying the reference in Reduce list to point to the newly 3159 // created element. The element is live in the current function 3160 // scope and that of functions it invokes (i.e., reduce_function). 3161 // RemoteReduceData[i] = (void*)&RemoteElem 3162 if (UpdateDestListPtr) { 3163 CGF.EmitStoreOfScalar(Bld.CreatePointerBitCastOrAddrSpaceCast( 3164 DestElementAddr.getPointer(), CGF.VoidPtrTy), 3165 DestElementPtrAddr, /*Volatile=*/false, 3166 C.VoidPtrTy); 3167 } 3168 3169 // Step 4.1: Increment SrcBase/DestBase so that it points to the starting 3170 // address of the next element in scratchpad memory, unless we're currently 3171 // processing the last one. Memory alignment is also taken care of here. 3172 if ((IncrScratchpadDest || IncrScratchpadSrc) && (Idx + 1 < Size)) { 3173 llvm::Value *ScratchpadBasePtr = 3174 IncrScratchpadDest ? DestBase.getPointer() : SrcBase.getPointer(); 3175 llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType()); 3176 ScratchpadBasePtr = Bld.CreateNUWAdd( 3177 ScratchpadBasePtr, 3178 Bld.CreateNUWMul(ScratchpadWidth, ElementSizeInChars)); 3179 3180 // Take care of global memory alignment for performance 3181 ScratchpadBasePtr = Bld.CreateNUWSub( 3182 ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1)); 3183 ScratchpadBasePtr = Bld.CreateUDiv( 3184 ScratchpadBasePtr, 3185 llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment)); 3186 ScratchpadBasePtr = Bld.CreateNUWAdd( 3187 ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1)); 3188 ScratchpadBasePtr = Bld.CreateNUWMul( 3189 ScratchpadBasePtr, 3190 llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment)); 3191 3192 if (IncrScratchpadDest) 3193 DestBase = Address(ScratchpadBasePtr, CGF.getPointerAlign()); 3194 else /* IncrScratchpadSrc = true */ 3195 SrcBase = Address(ScratchpadBasePtr, CGF.getPointerAlign()); 3196 } 3197 3198 ++Idx; 3199 } 3200} 3201 3202/// This function emits a helper that gathers Reduce lists from the first 3203/// lane of every active warp to lanes in the first warp. 3204/// 3205/// void inter_warp_copy_func(void* reduce_data, num_warps) 3206/// shared smem[warp_size]; 3207/// For all data entries D in reduce_data: 3208/// sync 3209/// If (I am the first lane in each warp) 3210/// Copy my local D to smem[warp_id] 3211/// sync 3212/// if (I am the first warp) 3213/// Copy smem[thread_id] to my local D 3214static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM, 3215 ArrayRef<const Expr *> Privates, 3216 QualType ReductionArrayTy, 3217 SourceLocation Loc) { 3218 ASTContext &C = CGM.getContext(); 3219 llvm::Module &M = CGM.getModule(); 3220 3221 // ReduceList: thread local Reduce list. 3222 // At the stage of the computation when this function is called, partially 3223 // aggregated values reside in the first lane of every active warp. 3224 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 3225 C.VoidPtrTy, ImplicitParamDecl::Other); 3226 // NumWarps: number of warps active in the parallel region. This could 3227 // be smaller than 32 (max warps in a CTA) for partial block reduction. 3228 ImplicitParamDecl NumWarpsArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 3229 C.getIntTypeForBitwidth(32, /* Signed */ true), 3230 ImplicitParamDecl::Other); 3231 FunctionArgList Args; 3232 Args.push_back(&ReduceListArg); 3233 Args.push_back(&NumWarpsArg); 3234 3235 const CGFunctionInfo &CGFI = 3236 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 3237 auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI), 3238 llvm::GlobalValue::InternalLinkage, 3239 "_omp_reduction_inter_warp_copy_func", &M); 3240 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); 3241 Fn->setDoesNotRecurse(); 3242 CodeGenFunction CGF(CGM); 3243 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); 3244 3245 CGBuilderTy &Bld = CGF.Builder; 3246 3247 // This array is used as a medium to transfer, one reduce element at a time, 3248 // the data from the first lane of every warp to lanes in the first warp 3249 // in order to perform the final step of a reduction in a parallel region 3250 // (reduction across warps). The array is placed in NVPTX __shared__ memory 3251 // for reduced latency, as well as to have a distinct copy for concurrently 3252 // executing target regions. The array is declared with common linkage so 3253 // as to be shared across compilation units. 3254 StringRef TransferMediumName = 3255 "__openmp_nvptx_data_transfer_temporary_storage"; 3256 llvm::GlobalVariable *TransferMedium = 3257 M.getGlobalVariable(TransferMediumName); 3258 if (!TransferMedium) { 3259 auto *Ty = llvm::ArrayType::get(CGM.Int32Ty, WarpSize); 3260 unsigned SharedAddressSpace = C.getTargetAddressSpace(LangAS::cuda_shared); 3261 TransferMedium = new llvm::GlobalVariable( 3262 M, Ty, /*isConstant=*/false, llvm::GlobalVariable::CommonLinkage, 3263 llvm::Constant::getNullValue(Ty), TransferMediumName, 3264 /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal, 3265 SharedAddressSpace); 3266 CGM.addCompilerUsedGlobal(TransferMedium); 3267 } 3268 3269 // Get the CUDA thread id of the current OpenMP thread on the GPU. 3270 llvm::Value *ThreadID = getNVPTXThreadID(CGF); 3271 // nvptx_lane_id = nvptx_id % warpsize 3272 llvm::Value *LaneID = getNVPTXLaneID(CGF); 3273 // nvptx_warp_id = nvptx_id / warpsize 3274 llvm::Value *WarpID = getNVPTXWarpID(CGF); 3275 3276 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg); 3277 Address LocalReduceList( 3278 Bld.CreatePointerBitCastOrAddrSpaceCast( 3279 CGF.EmitLoadOfScalar( 3280 AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc, 3281 LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()), 3282 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()), 3283 CGF.getPointerAlign()); 3284 3285 unsigned Idx = 0; 3286 for (const Expr *Private : Privates) { 3287 // 3288 // Warp master copies reduce element to transfer medium in __shared__ 3289 // memory. 3290 // 3291 unsigned RealTySize = 3292 C.getTypeSizeInChars(Private->getType()) 3293 .alignTo(C.getTypeAlignInChars(Private->getType())) 3294 .getQuantity(); 3295 for (unsigned TySize = 4; TySize > 0 && RealTySize > 0; TySize /=2) { 3296 unsigned NumIters = RealTySize / TySize; 3297 if (NumIters == 0) 3298 continue; 3299 QualType CType = C.getIntTypeForBitwidth( 3300 C.toBits(CharUnits::fromQuantity(TySize)), /*Signed=*/1); 3301 llvm::Type *CopyType = CGF.ConvertTypeForMem(CType); 3302 CharUnits Align = CharUnits::fromQuantity(TySize); 3303 llvm::Value *Cnt = nullptr; 3304 Address CntAddr = Address::invalid(); 3305 llvm::BasicBlock *PrecondBB = nullptr; 3306 llvm::BasicBlock *ExitBB = nullptr; 3307 if (NumIters > 1) { 3308 CntAddr = CGF.CreateMemTemp(C.IntTy, ".cnt.addr"); 3309 CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.IntTy), CntAddr, 3310 /*Volatile=*/false, C.IntTy); 3311 PrecondBB = CGF.createBasicBlock("precond"); 3312 ExitBB = CGF.createBasicBlock("exit"); 3313 llvm::BasicBlock *BodyBB = CGF.createBasicBlock("body"); 3314 // There is no need to emit line number for unconditional branch. 3315 (void)ApplyDebugLocation::CreateEmpty(CGF); 3316 CGF.EmitBlock(PrecondBB); 3317 Cnt = CGF.EmitLoadOfScalar(CntAddr, /*Volatile=*/false, C.IntTy, Loc); 3318 llvm::Value *Cmp = 3319 Bld.CreateICmpULT(Cnt, llvm::ConstantInt::get(CGM.IntTy, NumIters)); 3320 Bld.CreateCondBr(Cmp, BodyBB, ExitBB); 3321 CGF.EmitBlock(BodyBB); 3322 } 3323 // kmpc_barrier. 3324 CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown, 3325 /*EmitChecks=*/false, 3326 /*ForceSimpleCall=*/true); 3327 llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then"); 3328 llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else"); 3329 llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont"); 3330 3331 // if (lane_id == 0) 3332 llvm::Value *IsWarpMaster = Bld.CreateIsNull(LaneID, "warp_master"); 3333 Bld.CreateCondBr(IsWarpMaster, ThenBB, ElseBB); 3334 CGF.EmitBlock(ThenBB); 3335 3336 // Reduce element = LocalReduceList[i] 3337 Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx); 3338 llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar( 3339 ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation()); 3340 // elemptr = ((CopyType*)(elemptrptr)) + I 3341 Address ElemPtr = Address(ElemPtrPtr, Align); 3342 ElemPtr = Bld.CreateElementBitCast(ElemPtr, CopyType); 3343 if (NumIters > 1) { 3344 ElemPtr = Address(Bld.CreateGEP(ElemPtr.getPointer(), Cnt), 3345 ElemPtr.getAlignment()); 3346 } 3347 3348 // Get pointer to location in transfer medium. 3349 // MediumPtr = &medium[warp_id] 3350 llvm::Value *MediumPtrVal = Bld.CreateInBoundsGEP( 3351 TransferMedium, {llvm::Constant::getNullValue(CGM.Int64Ty), WarpID}); 3352 Address MediumPtr(MediumPtrVal, Align); 3353 // Casting to actual data type. 3354 // MediumPtr = (CopyType*)MediumPtrAddr; 3355 MediumPtr = Bld.CreateElementBitCast(MediumPtr, CopyType); 3356 3357 // elem = *elemptr 3358 //*MediumPtr = elem 3359 llvm::Value *Elem = CGF.EmitLoadOfScalar( 3360 ElemPtr, /*Volatile=*/false, CType, Loc, 3361 LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()); 3362 // Store the source element value to the dest element address. 3363 CGF.EmitStoreOfScalar(Elem, MediumPtr, /*Volatile=*/true, CType, 3364 LValueBaseInfo(AlignmentSource::Type), 3365 TBAAAccessInfo()); 3366 3367 Bld.CreateBr(MergeBB); 3368 3369 CGF.EmitBlock(ElseBB); 3370 Bld.CreateBr(MergeBB); 3371 3372 CGF.EmitBlock(MergeBB); 3373 3374 // kmpc_barrier. 3375 CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown, 3376 /*EmitChecks=*/false, 3377 /*ForceSimpleCall=*/true); 3378 3379 // 3380 // Warp 0 copies reduce element from transfer medium. 3381 // 3382 llvm::BasicBlock *W0ThenBB = CGF.createBasicBlock("then"); 3383 llvm::BasicBlock *W0ElseBB = CGF.createBasicBlock("else"); 3384 llvm::BasicBlock *W0MergeBB = CGF.createBasicBlock("ifcont"); 3385 3386 Address AddrNumWarpsArg = CGF.GetAddrOfLocalVar(&NumWarpsArg); 3387 llvm::Value *NumWarpsVal = CGF.EmitLoadOfScalar( 3388 AddrNumWarpsArg, /*Volatile=*/false, C.IntTy, Loc); 3389 3390 // Up to 32 threads in warp 0 are active. 3391 llvm::Value *IsActiveThread = 3392 Bld.CreateICmpULT(ThreadID, NumWarpsVal, "is_active_thread"); 3393 Bld.CreateCondBr(IsActiveThread, W0ThenBB, W0ElseBB); 3394 3395 CGF.EmitBlock(W0ThenBB); 3396 3397 // SrcMediumPtr = &medium[tid] 3398 llvm::Value *SrcMediumPtrVal = Bld.CreateInBoundsGEP( 3399 TransferMedium, 3400 {llvm::Constant::getNullValue(CGM.Int64Ty), ThreadID}); 3401 Address SrcMediumPtr(SrcMediumPtrVal, Align); 3402 // SrcMediumVal = *SrcMediumPtr; 3403 SrcMediumPtr = Bld.CreateElementBitCast(SrcMediumPtr, CopyType); 3404 3405 // TargetElemPtr = (CopyType*)(SrcDataAddr[i]) + I 3406 Address TargetElemPtrPtr = Bld.CreateConstArrayGEP(LocalReduceList, Idx); 3407 llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar( 3408 TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, Loc); 3409 Address TargetElemPtr = Address(TargetElemPtrVal, Align); 3410 TargetElemPtr = Bld.CreateElementBitCast(TargetElemPtr, CopyType); 3411 if (NumIters > 1) { 3412 TargetElemPtr = Address(Bld.CreateGEP(TargetElemPtr.getPointer(), Cnt), 3413 TargetElemPtr.getAlignment()); 3414 } 3415 3416 // *TargetElemPtr = SrcMediumVal; 3417 llvm::Value *SrcMediumValue = 3418 CGF.EmitLoadOfScalar(SrcMediumPtr, /*Volatile=*/true, CType, Loc); 3419 CGF.EmitStoreOfScalar(SrcMediumValue, TargetElemPtr, /*Volatile=*/false, 3420 CType); 3421 Bld.CreateBr(W0MergeBB); 3422 3423 CGF.EmitBlock(W0ElseBB); 3424 Bld.CreateBr(W0MergeBB); 3425 3426 CGF.EmitBlock(W0MergeBB); 3427 3428 if (NumIters > 1) { 3429 Cnt = Bld.CreateNSWAdd(Cnt, llvm::ConstantInt::get(CGM.IntTy, /*V=*/1)); 3430 CGF.EmitStoreOfScalar(Cnt, CntAddr, /*Volatile=*/false, C.IntTy); 3431 CGF.EmitBranch(PrecondBB); 3432 (void)ApplyDebugLocation::CreateEmpty(CGF); 3433 CGF.EmitBlock(ExitBB); 3434 } 3435 RealTySize %= TySize; 3436 } 3437 ++Idx; 3438 } 3439 3440 CGF.FinishFunction(); 3441 return Fn; 3442} 3443 3444/// Emit a helper that reduces data across two OpenMP threads (lanes) 3445/// in the same warp. It uses shuffle instructions to copy over data from 3446/// a remote lane's stack. The reduction algorithm performed is specified 3447/// by the fourth parameter. 3448/// 3449/// Algorithm Versions. 3450/// Full Warp Reduce (argument value 0): 3451/// This algorithm assumes that all 32 lanes are active and gathers 3452/// data from these 32 lanes, producing a single resultant value. 3453/// Contiguous Partial Warp Reduce (argument value 1): 3454/// This algorithm assumes that only a *contiguous* subset of lanes 3455/// are active. This happens for the last warp in a parallel region 3456/// when the user specified num_threads is not an integer multiple of 3457/// 32. This contiguous subset always starts with the zeroth lane. 3458/// Partial Warp Reduce (argument value 2): 3459/// This algorithm gathers data from any number of lanes at any position. 3460/// All reduced values are stored in the lowest possible lane. The set 3461/// of problems every algorithm addresses is a super set of those 3462/// addressable by algorithms with a lower version number. Overhead 3463/// increases as algorithm version increases. 3464/// 3465/// Terminology 3466/// Reduce element: 3467/// Reduce element refers to the individual data field with primitive 3468/// data types to be combined and reduced across threads. 3469/// Reduce list: 3470/// Reduce list refers to a collection of local, thread-private 3471/// reduce elements. 3472/// Remote Reduce list: 3473/// Remote Reduce list refers to a collection of remote (relative to 3474/// the current thread) reduce elements. 3475/// 3476/// We distinguish between three states of threads that are important to 3477/// the implementation of this function. 3478/// Alive threads: 3479/// Threads in a warp executing the SIMT instruction, as distinguished from 3480/// threads that are inactive due to divergent control flow. 3481/// Active threads: 3482/// The minimal set of threads that has to be alive upon entry to this 3483/// function. The computation is correct iff active threads are alive. 3484/// Some threads are alive but they are not active because they do not 3485/// contribute to the computation in any useful manner. Turning them off 3486/// may introduce control flow overheads without any tangible benefits. 3487/// Effective threads: 3488/// In order to comply with the argument requirements of the shuffle 3489/// function, we must keep all lanes holding data alive. But at most 3490/// half of them perform value aggregation; we refer to this half of 3491/// threads as effective. The other half is simply handing off their 3492/// data. 3493/// 3494/// Procedure 3495/// Value shuffle: 3496/// In this step active threads transfer data from higher lane positions 3497/// in the warp to lower lane positions, creating Remote Reduce list. 3498/// Value aggregation: 3499/// In this step, effective threads combine their thread local Reduce list 3500/// with Remote Reduce list and store the result in the thread local 3501/// Reduce list. 3502/// Value copy: 3503/// In this step, we deal with the assumption made by algorithm 2 3504/// (i.e. contiguity assumption). When we have an odd number of lanes 3505/// active, say 2k+1, only k threads will be effective and therefore k 3506/// new values will be produced. However, the Reduce list owned by the 3507/// (2k+1)th thread is ignored in the value aggregation. Therefore 3508/// we copy the Reduce list from the (2k+1)th lane to (k+1)th lane so 3509/// that the contiguity assumption still holds. 3510static llvm::Function *emitShuffleAndReduceFunction( 3511 CodeGenModule &CGM, ArrayRef<const Expr *> Privates, 3512 QualType ReductionArrayTy, llvm::Function *ReduceFn, SourceLocation Loc) { 3513 ASTContext &C = CGM.getContext(); 3514 3515 // Thread local Reduce list used to host the values of data to be reduced. 3516 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 3517 C.VoidPtrTy, ImplicitParamDecl::Other); 3518 // Current lane id; could be logical. 3519 ImplicitParamDecl LaneIDArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.ShortTy, 3520 ImplicitParamDecl::Other); 3521 // Offset of the remote source lane relative to the current lane. 3522 ImplicitParamDecl RemoteLaneOffsetArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 3523 C.ShortTy, ImplicitParamDecl::Other); 3524 // Algorithm version. This is expected to be known at compile time. 3525 ImplicitParamDecl AlgoVerArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 3526 C.ShortTy, ImplicitParamDecl::Other); 3527 FunctionArgList Args; 3528 Args.push_back(&ReduceListArg); 3529 Args.push_back(&LaneIDArg); 3530 Args.push_back(&RemoteLaneOffsetArg); 3531 Args.push_back(&AlgoVerArg); 3532 3533 const CGFunctionInfo &CGFI = 3534 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 3535 auto *Fn = llvm::Function::Create( 3536 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, 3537 "_omp_reduction_shuffle_and_reduce_func", &CGM.getModule()); 3538 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); 3539 Fn->setDoesNotRecurse(); 3540 if (CGM.getLangOpts().Optimize) { 3541 Fn->removeFnAttr(llvm::Attribute::NoInline); 3542 Fn->removeFnAttr(llvm::Attribute::OptimizeNone); 3543 Fn->addFnAttr(llvm::Attribute::AlwaysInline); 3544 } 3545 3546 CodeGenFunction CGF(CGM); 3547 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); 3548 3549 CGBuilderTy &Bld = CGF.Builder; 3550 3551 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg); 3552 Address LocalReduceList( 3553 Bld.CreatePointerBitCastOrAddrSpaceCast( 3554 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false, 3555 C.VoidPtrTy, SourceLocation()), 3556 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()), 3557 CGF.getPointerAlign()); 3558 3559 Address AddrLaneIDArg = CGF.GetAddrOfLocalVar(&LaneIDArg); 3560 llvm::Value *LaneIDArgVal = CGF.EmitLoadOfScalar( 3561 AddrLaneIDArg, /*Volatile=*/false, C.ShortTy, SourceLocation()); 3562 3563 Address AddrRemoteLaneOffsetArg = CGF.GetAddrOfLocalVar(&RemoteLaneOffsetArg); 3564 llvm::Value *RemoteLaneOffsetArgVal = CGF.EmitLoadOfScalar( 3565 AddrRemoteLaneOffsetArg, /*Volatile=*/false, C.ShortTy, SourceLocation()); 3566 3567 Address AddrAlgoVerArg = CGF.GetAddrOfLocalVar(&AlgoVerArg); 3568 llvm::Value *AlgoVerArgVal = CGF.EmitLoadOfScalar( 3569 AddrAlgoVerArg, /*Volatile=*/false, C.ShortTy, SourceLocation()); 3570 3571 // Create a local thread-private variable to host the Reduce list 3572 // from a remote lane. 3573 Address RemoteReduceList = 3574 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.remote_reduce_list"); 3575 3576 // This loop iterates through the list of reduce elements and copies, 3577 // element by element, from a remote lane in the warp to RemoteReduceList, 3578 // hosted on the thread's stack. 3579 emitReductionListCopy(RemoteLaneToThread, CGF, ReductionArrayTy, Privates, 3580 LocalReduceList, RemoteReduceList, 3581 {/*RemoteLaneOffset=*/RemoteLaneOffsetArgVal, 3582 /*ScratchpadIndex=*/nullptr, 3583 /*ScratchpadWidth=*/nullptr}); 3584 3585 // The actions to be performed on the Remote Reduce list is dependent 3586 // on the algorithm version. 3587 // 3588 // if (AlgoVer==0) || (AlgoVer==1 && (LaneId < Offset)) || (AlgoVer==2 && 3589 // LaneId % 2 == 0 && Offset > 0): 3590 // do the reduction value aggregation 3591 // 3592 // The thread local variable Reduce list is mutated in place to host the 3593 // reduced data, which is the aggregated value produced from local and 3594 // remote lanes. 3595 // 3596 // Note that AlgoVer is expected to be a constant integer known at compile 3597 // time. 3598 // When AlgoVer==0, the first conjunction evaluates to true, making 3599 // the entire predicate true during compile time. 3600 // When AlgoVer==1, the second conjunction has only the second part to be 3601 // evaluated during runtime. Other conjunctions evaluates to false 3602 // during compile time. 3603 // When AlgoVer==2, the third conjunction has only the second part to be 3604 // evaluated during runtime. Other conjunctions evaluates to false 3605 // during compile time. 3606 llvm::Value *CondAlgo0 = Bld.CreateIsNull(AlgoVerArgVal); 3607 3608 llvm::Value *Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1)); 3609 llvm::Value *CondAlgo1 = Bld.CreateAnd( 3610 Algo1, Bld.CreateICmpULT(LaneIDArgVal, RemoteLaneOffsetArgVal)); 3611 3612 llvm::Value *Algo2 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(2)); 3613 llvm::Value *CondAlgo2 = Bld.CreateAnd( 3614 Algo2, Bld.CreateIsNull(Bld.CreateAnd(LaneIDArgVal, Bld.getInt16(1)))); 3615 CondAlgo2 = Bld.CreateAnd( 3616 CondAlgo2, Bld.CreateICmpSGT(RemoteLaneOffsetArgVal, Bld.getInt16(0))); 3617 3618 llvm::Value *CondReduce = Bld.CreateOr(CondAlgo0, CondAlgo1); 3619 CondReduce = Bld.CreateOr(CondReduce, CondAlgo2); 3620 3621 llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then"); 3622 llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else"); 3623 llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont"); 3624 Bld.CreateCondBr(CondReduce, ThenBB, ElseBB); 3625 3626 CGF.EmitBlock(ThenBB); 3627 // reduce_function(LocalReduceList, RemoteReduceList) 3628 llvm::Value *LocalReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 3629 LocalReduceList.getPointer(), CGF.VoidPtrTy); 3630 llvm::Value *RemoteReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 3631 RemoteReduceList.getPointer(), CGF.VoidPtrTy); 3632 CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 3633 CGF, Loc, ReduceFn, {LocalReduceListPtr, RemoteReduceListPtr}); 3634 Bld.CreateBr(MergeBB); 3635 3636 CGF.EmitBlock(ElseBB); 3637 Bld.CreateBr(MergeBB); 3638 3639 CGF.EmitBlock(MergeBB); 3640 3641 // if (AlgoVer==1 && (LaneId >= Offset)) copy Remote Reduce list to local 3642 // Reduce list. 3643 Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1)); 3644 llvm::Value *CondCopy = Bld.CreateAnd( 3645 Algo1, Bld.CreateICmpUGE(LaneIDArgVal, RemoteLaneOffsetArgVal)); 3646 3647 llvm::BasicBlock *CpyThenBB = CGF.createBasicBlock("then"); 3648 llvm::BasicBlock *CpyElseBB = CGF.createBasicBlock("else"); 3649 llvm::BasicBlock *CpyMergeBB = CGF.createBasicBlock("ifcont"); 3650 Bld.CreateCondBr(CondCopy, CpyThenBB, CpyElseBB); 3651 3652 CGF.EmitBlock(CpyThenBB); 3653 emitReductionListCopy(ThreadCopy, CGF, ReductionArrayTy, Privates, 3654 RemoteReduceList, LocalReduceList); 3655 Bld.CreateBr(CpyMergeBB); 3656 3657 CGF.EmitBlock(CpyElseBB); 3658 Bld.CreateBr(CpyMergeBB); 3659 3660 CGF.EmitBlock(CpyMergeBB); 3661 3662 CGF.FinishFunction(); 3663 return Fn; 3664} 3665 3666/// This function emits a helper that copies all the reduction variables from 3667/// the team into the provided global buffer for the reduction variables. 3668/// 3669/// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data) 3670/// For all data entries D in reduce_data: 3671/// Copy local D to buffer.D[Idx] 3672static llvm::Value *emitListToGlobalCopyFunction( 3673 CodeGenModule &CGM, ArrayRef<const Expr *> Privates, 3674 QualType ReductionArrayTy, SourceLocation Loc, 3675 const RecordDecl *TeamReductionRec, 3676 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> 3677 &VarFieldMap) { 3678 ASTContext &C = CGM.getContext(); 3679 3680 // Buffer: global reduction buffer. 3681 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 3682 C.VoidPtrTy, ImplicitParamDecl::Other); 3683 // Idx: index of the buffer. 3684 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy, 3685 ImplicitParamDecl::Other); 3686 // ReduceList: thread local Reduce list. 3687 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 3688 C.VoidPtrTy, ImplicitParamDecl::Other); 3689 FunctionArgList Args; 3690 Args.push_back(&BufferArg); 3691 Args.push_back(&IdxArg); 3692 Args.push_back(&ReduceListArg); 3693 3694 const CGFunctionInfo &CGFI = 3695 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 3696 auto *Fn = llvm::Function::Create( 3697 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, 3698 "_omp_reduction_list_to_global_copy_func", &CGM.getModule()); 3699 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); 3700 Fn->setDoesNotRecurse(); 3701 CodeGenFunction CGF(CGM); 3702 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); 3703 3704 CGBuilderTy &Bld = CGF.Builder; 3705 3706 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg); 3707 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg); 3708 Address LocalReduceList( 3709 Bld.CreatePointerBitCastOrAddrSpaceCast( 3710 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false, 3711 C.VoidPtrTy, Loc), 3712 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()), 3713 CGF.getPointerAlign()); 3714 QualType StaticTy = C.getRecordType(TeamReductionRec); 3715 llvm::Type *LLVMReductionsBufferTy = 3716 CGM.getTypes().ConvertTypeForMem(StaticTy); 3717 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 3718 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc), 3719 LLVMReductionsBufferTy->getPointerTo()); 3720 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty), 3721 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg), 3722 /*Volatile=*/false, C.IntTy, 3723 Loc)}; 3724 unsigned Idx = 0; 3725 for (const Expr *Private : Privates) { 3726 // Reduce element = LocalReduceList[i] 3727 Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx); 3728 llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar( 3729 ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation()); 3730 // elemptr = ((CopyType*)(elemptrptr)) + I 3731 ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 3732 ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo()); 3733 Address ElemPtr = 3734 Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType())); 3735 const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl(); 3736 // Global = Buffer.VD[Idx]; 3737 const FieldDecl *FD = VarFieldMap.lookup(VD); 3738 LValue GlobLVal = CGF.EmitLValueForField( 3739 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD); 3740 llvm::Value *BufferPtr = 3741 Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs); 3742 GlobLVal.setAddress(Address(BufferPtr, GlobLVal.getAlignment())); 3743 switch (CGF.getEvaluationKind(Private->getType())) { 3744 case TEK_Scalar: { 3745 llvm::Value *V = CGF.EmitLoadOfScalar( 3746 ElemPtr, /*Volatile=*/false, Private->getType(), Loc, 3747 LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()); 3748 CGF.EmitStoreOfScalar(V, GlobLVal); 3749 break; 3750 } 3751 case TEK_Complex: { 3752 CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex( 3753 CGF.MakeAddrLValue(ElemPtr, Private->getType()), Loc); 3754 CGF.EmitStoreOfComplex(V, GlobLVal, /*isInit=*/false); 3755 break; 3756 } 3757 case TEK_Aggregate: 3758 CGF.EmitAggregateCopy(GlobLVal, 3759 CGF.MakeAddrLValue(ElemPtr, Private->getType()), 3760 Private->getType(), AggValueSlot::DoesNotOverlap); 3761 break; 3762 } 3763 ++Idx; 3764 } 3765 3766 CGF.FinishFunction(); 3767 return Fn; 3768} 3769 3770/// This function emits a helper that reduces all the reduction variables from 3771/// the team into the provided global buffer for the reduction variables. 3772/// 3773/// void list_to_global_reduce_func(void *buffer, int Idx, void *reduce_data) 3774/// void *GlobPtrs[]; 3775/// GlobPtrs[0] = (void*)&buffer.D0[Idx]; 3776/// ... 3777/// GlobPtrs[N] = (void*)&buffer.DN[Idx]; 3778/// reduce_function(GlobPtrs, reduce_data); 3779static llvm::Value *emitListToGlobalReduceFunction( 3780 CodeGenModule &CGM, ArrayRef<const Expr *> Privates, 3781 QualType ReductionArrayTy, SourceLocation Loc, 3782 const RecordDecl *TeamReductionRec, 3783 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> 3784 &VarFieldMap, 3785 llvm::Function *ReduceFn) { 3786 ASTContext &C = CGM.getContext(); 3787 3788 // Buffer: global reduction buffer. 3789 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 3790 C.VoidPtrTy, ImplicitParamDecl::Other); 3791 // Idx: index of the buffer. 3792 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy, 3793 ImplicitParamDecl::Other); 3794 // ReduceList: thread local Reduce list. 3795 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 3796 C.VoidPtrTy, ImplicitParamDecl::Other); 3797 FunctionArgList Args; 3798 Args.push_back(&BufferArg); 3799 Args.push_back(&IdxArg); 3800 Args.push_back(&ReduceListArg); 3801 3802 const CGFunctionInfo &CGFI = 3803 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 3804 auto *Fn = llvm::Function::Create( 3805 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, 3806 "_omp_reduction_list_to_global_reduce_func", &CGM.getModule()); 3807 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); 3808 Fn->setDoesNotRecurse(); 3809 CodeGenFunction CGF(CGM); 3810 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); 3811 3812 CGBuilderTy &Bld = CGF.Builder; 3813 3814 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg); 3815 QualType StaticTy = C.getRecordType(TeamReductionRec); 3816 llvm::Type *LLVMReductionsBufferTy = 3817 CGM.getTypes().ConvertTypeForMem(StaticTy); 3818 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 3819 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc), 3820 LLVMReductionsBufferTy->getPointerTo()); 3821 3822 // 1. Build a list of reduction variables. 3823 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]}; 3824 Address ReductionList = 3825 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list"); 3826 auto IPriv = Privates.begin(); 3827 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty), 3828 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg), 3829 /*Volatile=*/false, C.IntTy, 3830 Loc)}; 3831 unsigned Idx = 0; 3832 for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) { 3833 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); 3834 // Global = Buffer.VD[Idx]; 3835 const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl(); 3836 const FieldDecl *FD = VarFieldMap.lookup(VD); 3837 LValue GlobLVal = CGF.EmitLValueForField( 3838 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD); 3839 llvm::Value *BufferPtr = 3840 Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs); 3841 llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr); 3842 CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy); 3843 if ((*IPriv)->getType()->isVariablyModifiedType()) { 3844 // Store array size. 3845 ++Idx; 3846 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); 3847 llvm::Value *Size = CGF.Builder.CreateIntCast( 3848 CGF.getVLASize( 3849 CGF.getContext().getAsVariableArrayType((*IPriv)->getType())) 3850 .NumElts, 3851 CGF.SizeTy, /*isSigned=*/false); 3852 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy), 3853 Elem); 3854 } 3855 } 3856 3857 // Call reduce_function(GlobalReduceList, ReduceList) 3858 llvm::Value *GlobalReduceList = 3859 CGF.EmitCastToVoidPtr(ReductionList.getPointer()); 3860 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg); 3861 llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar( 3862 AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc); 3863 CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 3864 CGF, Loc, ReduceFn, {GlobalReduceList, ReducedPtr}); 3865 CGF.FinishFunction(); 3866 return Fn; 3867} 3868 3869/// This function emits a helper that copies all the reduction variables from 3870/// the team into the provided global buffer for the reduction variables. 3871/// 3872/// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data) 3873/// For all data entries D in reduce_data: 3874/// Copy buffer.D[Idx] to local D; 3875static llvm::Value *emitGlobalToListCopyFunction( 3876 CodeGenModule &CGM, ArrayRef<const Expr *> Privates, 3877 QualType ReductionArrayTy, SourceLocation Loc, 3878 const RecordDecl *TeamReductionRec, 3879 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> 3880 &VarFieldMap) { 3881 ASTContext &C = CGM.getContext(); 3882 3883 // Buffer: global reduction buffer. 3884 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 3885 C.VoidPtrTy, ImplicitParamDecl::Other); 3886 // Idx: index of the buffer. 3887 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy, 3888 ImplicitParamDecl::Other); 3889 // ReduceList: thread local Reduce list. 3890 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 3891 C.VoidPtrTy, ImplicitParamDecl::Other); 3892 FunctionArgList Args; 3893 Args.push_back(&BufferArg); 3894 Args.push_back(&IdxArg); 3895 Args.push_back(&ReduceListArg); 3896 3897 const CGFunctionInfo &CGFI = 3898 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 3899 auto *Fn = llvm::Function::Create( 3900 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, 3901 "_omp_reduction_global_to_list_copy_func", &CGM.getModule()); 3902 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); 3903 Fn->setDoesNotRecurse(); 3904 CodeGenFunction CGF(CGM); 3905 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); 3906 3907 CGBuilderTy &Bld = CGF.Builder; 3908 3909 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg); 3910 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg); 3911 Address LocalReduceList( 3912 Bld.CreatePointerBitCastOrAddrSpaceCast( 3913 CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false, 3914 C.VoidPtrTy, Loc), 3915 CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()), 3916 CGF.getPointerAlign()); 3917 QualType StaticTy = C.getRecordType(TeamReductionRec); 3918 llvm::Type *LLVMReductionsBufferTy = 3919 CGM.getTypes().ConvertTypeForMem(StaticTy); 3920 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 3921 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc), 3922 LLVMReductionsBufferTy->getPointerTo()); 3923 3924 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty), 3925 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg), 3926 /*Volatile=*/false, C.IntTy, 3927 Loc)}; 3928 unsigned Idx = 0; 3929 for (const Expr *Private : Privates) { 3930 // Reduce element = LocalReduceList[i] 3931 Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx); 3932 llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar( 3933 ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation()); 3934 // elemptr = ((CopyType*)(elemptrptr)) + I 3935 ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 3936 ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo()); 3937 Address ElemPtr = 3938 Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType())); 3939 const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl(); 3940 // Global = Buffer.VD[Idx]; 3941 const FieldDecl *FD = VarFieldMap.lookup(VD); 3942 LValue GlobLVal = CGF.EmitLValueForField( 3943 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD); 3944 llvm::Value *BufferPtr = 3945 Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs); 3946 GlobLVal.setAddress(Address(BufferPtr, GlobLVal.getAlignment())); 3947 switch (CGF.getEvaluationKind(Private->getType())) { 3948 case TEK_Scalar: { 3949 llvm::Value *V = CGF.EmitLoadOfScalar(GlobLVal, Loc); 3950 CGF.EmitStoreOfScalar(V, ElemPtr, /*Volatile=*/false, Private->getType(), 3951 LValueBaseInfo(AlignmentSource::Type), 3952 TBAAAccessInfo()); 3953 break; 3954 } 3955 case TEK_Complex: { 3956 CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(GlobLVal, Loc); 3957 CGF.EmitStoreOfComplex(V, CGF.MakeAddrLValue(ElemPtr, Private->getType()), 3958 /*isInit=*/false); 3959 break; 3960 } 3961 case TEK_Aggregate: 3962 CGF.EmitAggregateCopy(CGF.MakeAddrLValue(ElemPtr, Private->getType()), 3963 GlobLVal, Private->getType(), 3964 AggValueSlot::DoesNotOverlap); 3965 break; 3966 } 3967 ++Idx; 3968 } 3969 3970 CGF.FinishFunction(); 3971 return Fn; 3972} 3973 3974/// This function emits a helper that reduces all the reduction variables from 3975/// the team into the provided global buffer for the reduction variables. 3976/// 3977/// void global_to_list_reduce_func(void *buffer, int Idx, void *reduce_data) 3978/// void *GlobPtrs[]; 3979/// GlobPtrs[0] = (void*)&buffer.D0[Idx]; 3980/// ... 3981/// GlobPtrs[N] = (void*)&buffer.DN[Idx]; 3982/// reduce_function(reduce_data, GlobPtrs); 3983static llvm::Value *emitGlobalToListReduceFunction( 3984 CodeGenModule &CGM, ArrayRef<const Expr *> Privates, 3985 QualType ReductionArrayTy, SourceLocation Loc, 3986 const RecordDecl *TeamReductionRec, 3987 const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> 3988 &VarFieldMap, 3989 llvm::Function *ReduceFn) { 3990 ASTContext &C = CGM.getContext(); 3991 3992 // Buffer: global reduction buffer. 3993 ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 3994 C.VoidPtrTy, ImplicitParamDecl::Other); 3995 // Idx: index of the buffer. 3996 ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy, 3997 ImplicitParamDecl::Other); 3998 // ReduceList: thread local Reduce list. 3999 ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 4000 C.VoidPtrTy, ImplicitParamDecl::Other); 4001 FunctionArgList Args; 4002 Args.push_back(&BufferArg); 4003 Args.push_back(&IdxArg); 4004 Args.push_back(&ReduceListArg); 4005 4006 const CGFunctionInfo &CGFI = 4007 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 4008 auto *Fn = llvm::Function::Create( 4009 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, 4010 "_omp_reduction_global_to_list_reduce_func", &CGM.getModule()); 4011 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); 4012 Fn->setDoesNotRecurse(); 4013 CodeGenFunction CGF(CGM); 4014 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); 4015 4016 CGBuilderTy &Bld = CGF.Builder; 4017 4018 Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg); 4019 QualType StaticTy = C.getRecordType(TeamReductionRec); 4020 llvm::Type *LLVMReductionsBufferTy = 4021 CGM.getTypes().ConvertTypeForMem(StaticTy); 4022 llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast( 4023 CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc), 4024 LLVMReductionsBufferTy->getPointerTo()); 4025 4026 // 1. Build a list of reduction variables. 4027 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]}; 4028 Address ReductionList = 4029 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list"); 4030 auto IPriv = Privates.begin(); 4031 llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty), 4032 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg), 4033 /*Volatile=*/false, C.IntTy, 4034 Loc)}; 4035 unsigned Idx = 0; 4036 for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) { 4037 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); 4038 // Global = Buffer.VD[Idx]; 4039 const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl(); 4040 const FieldDecl *FD = VarFieldMap.lookup(VD); 4041 LValue GlobLVal = CGF.EmitLValueForField( 4042 CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD); 4043 llvm::Value *BufferPtr = 4044 Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs); 4045 llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr); 4046 CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy); 4047 if ((*IPriv)->getType()->isVariablyModifiedType()) { 4048 // Store array size. 4049 ++Idx; 4050 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); 4051 llvm::Value *Size = CGF.Builder.CreateIntCast( 4052 CGF.getVLASize( 4053 CGF.getContext().getAsVariableArrayType((*IPriv)->getType())) 4054 .NumElts, 4055 CGF.SizeTy, /*isSigned=*/false); 4056 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy), 4057 Elem); 4058 } 4059 } 4060 4061 // Call reduce_function(ReduceList, GlobalReduceList) 4062 llvm::Value *GlobalReduceList = 4063 CGF.EmitCastToVoidPtr(ReductionList.getPointer()); 4064 Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg); 4065 llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar( 4066 AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc); 4067 CGM.getOpenMPRuntime().emitOutlinedFunctionCall( 4068 CGF, Loc, ReduceFn, {ReducedPtr, GlobalReduceList}); 4069 CGF.FinishFunction(); 4070 return Fn; 4071} 4072 4073/// 4074/// Design of OpenMP reductions on the GPU 4075/// 4076/// Consider a typical OpenMP program with one or more reduction 4077/// clauses: 4078/// 4079/// float foo; 4080/// double bar; 4081/// #pragma omp target teams distribute parallel for \ 4082/// reduction(+:foo) reduction(*:bar) 4083/// for (int i = 0; i < N; i++) { 4084/// foo += A[i]; bar *= B[i]; 4085/// } 4086/// 4087/// where 'foo' and 'bar' are reduced across all OpenMP threads in 4088/// all teams. In our OpenMP implementation on the NVPTX device an 4089/// OpenMP team is mapped to a CUDA threadblock and OpenMP threads 4090/// within a team are mapped to CUDA threads within a threadblock. 4091/// Our goal is to efficiently aggregate values across all OpenMP 4092/// threads such that: 4093/// 4094/// - the compiler and runtime are logically concise, and 4095/// - the reduction is performed efficiently in a hierarchical 4096/// manner as follows: within OpenMP threads in the same warp, 4097/// across warps in a threadblock, and finally across teams on 4098/// the NVPTX device. 4099/// 4100/// Introduction to Decoupling 4101/// 4102/// We would like to decouple the compiler and the runtime so that the 4103/// latter is ignorant of the reduction variables (number, data types) 4104/// and the reduction operators. This allows a simpler interface 4105/// and implementation while still attaining good performance. 4106/// 4107/// Pseudocode for the aforementioned OpenMP program generated by the 4108/// compiler is as follows: 4109/// 4110/// 1. Create private copies of reduction variables on each OpenMP 4111/// thread: 'foo_private', 'bar_private' 4112/// 2. Each OpenMP thread reduces the chunk of 'A' and 'B' assigned 4113/// to it and writes the result in 'foo_private' and 'bar_private' 4114/// respectively. 4115/// 3. Call the OpenMP runtime on the GPU to reduce within a team 4116/// and store the result on the team master: 4117/// 4118/// __kmpc_nvptx_parallel_reduce_nowait_v2(..., 4119/// reduceData, shuffleReduceFn, interWarpCpyFn) 4120/// 4121/// where: 4122/// struct ReduceData { 4123/// double *foo; 4124/// double *bar; 4125/// } reduceData 4126/// reduceData.foo = &foo_private 4127/// reduceData.bar = &bar_private 4128/// 4129/// 'shuffleReduceFn' and 'interWarpCpyFn' are pointers to two 4130/// auxiliary functions generated by the compiler that operate on 4131/// variables of type 'ReduceData'. They aid the runtime perform 4132/// algorithmic steps in a data agnostic manner. 4133/// 4134/// 'shuffleReduceFn' is a pointer to a function that reduces data 4135/// of type 'ReduceData' across two OpenMP threads (lanes) in the 4136/// same warp. It takes the following arguments as input: 4137/// 4138/// a. variable of type 'ReduceData' on the calling lane, 4139/// b. its lane_id, 4140/// c. an offset relative to the current lane_id to generate a 4141/// remote_lane_id. The remote lane contains the second 4142/// variable of type 'ReduceData' that is to be reduced. 4143/// d. an algorithm version parameter determining which reduction 4144/// algorithm to use. 4145/// 4146/// 'shuffleReduceFn' retrieves data from the remote lane using 4147/// efficient GPU shuffle intrinsics and reduces, using the 4148/// algorithm specified by the 4th parameter, the two operands 4149/// element-wise. The result is written to the first operand. 4150/// 4151/// Different reduction algorithms are implemented in different 4152/// runtime functions, all calling 'shuffleReduceFn' to perform 4153/// the essential reduction step. Therefore, based on the 4th 4154/// parameter, this function behaves slightly differently to 4155/// cooperate with the runtime to ensure correctness under 4156/// different circumstances. 4157/// 4158/// 'InterWarpCpyFn' is a pointer to a function that transfers 4159/// reduced variables across warps. It tunnels, through CUDA 4160/// shared memory, the thread-private data of type 'ReduceData' 4161/// from lane 0 of each warp to a lane in the first warp. 4162/// 4. Call the OpenMP runtime on the GPU to reduce across teams. 4163/// The last team writes the global reduced value to memory. 4164/// 4165/// ret = __kmpc_nvptx_teams_reduce_nowait(..., 4166/// reduceData, shuffleReduceFn, interWarpCpyFn, 4167/// scratchpadCopyFn, loadAndReduceFn) 4168/// 4169/// 'scratchpadCopyFn' is a helper that stores reduced 4170/// data from the team master to a scratchpad array in 4171/// global memory. 4172/// 4173/// 'loadAndReduceFn' is a helper that loads data from 4174/// the scratchpad array and reduces it with the input 4175/// operand. 4176/// 4177/// These compiler generated functions hide address 4178/// calculation and alignment information from the runtime. 4179/// 5. if ret == 1: 4180/// The team master of the last team stores the reduced 4181/// result to the globals in memory. 4182/// foo += reduceData.foo; bar *= reduceData.bar 4183/// 4184/// 4185/// Warp Reduction Algorithms 4186/// 4187/// On the warp level, we have three algorithms implemented in the 4188/// OpenMP runtime depending on the number of active lanes: 4189/// 4190/// Full Warp Reduction 4191/// 4192/// The reduce algorithm within a warp where all lanes are active 4193/// is implemented in the runtime as follows: 4194/// 4195/// full_warp_reduce(void *reduce_data, 4196/// kmp_ShuffleReductFctPtr ShuffleReduceFn) { 4197/// for (int offset = WARPSIZE/2; offset > 0; offset /= 2) 4198/// ShuffleReduceFn(reduce_data, 0, offset, 0); 4199/// } 4200/// 4201/// The algorithm completes in log(2, WARPSIZE) steps. 4202/// 4203/// 'ShuffleReduceFn' is used here with lane_id set to 0 because it is 4204/// not used therefore we save instructions by not retrieving lane_id 4205/// from the corresponding special registers. The 4th parameter, which 4206/// represents the version of the algorithm being used, is set to 0 to 4207/// signify full warp reduction. 4208/// 4209/// In this version, 'ShuffleReduceFn' behaves, per element, as follows: 4210/// 4211/// #reduce_elem refers to an element in the local lane's data structure 4212/// #remote_elem is retrieved from a remote lane 4213/// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE); 4214/// reduce_elem = reduce_elem REDUCE_OP remote_elem; 4215/// 4216/// Contiguous Partial Warp Reduction 4217/// 4218/// This reduce algorithm is used within a warp where only the first 4219/// 'n' (n <= WARPSIZE) lanes are active. It is typically used when the 4220/// number of OpenMP threads in a parallel region is not a multiple of 4221/// WARPSIZE. The algorithm is implemented in the runtime as follows: 4222/// 4223/// void 4224/// contiguous_partial_reduce(void *reduce_data, 4225/// kmp_ShuffleReductFctPtr ShuffleReduceFn, 4226/// int size, int lane_id) { 4227/// int curr_size; 4228/// int offset; 4229/// curr_size = size; 4230/// mask = curr_size/2; 4231/// while (offset>0) { 4232/// ShuffleReduceFn(reduce_data, lane_id, offset, 1); 4233/// curr_size = (curr_size+1)/2; 4234/// offset = curr_size/2; 4235/// } 4236/// } 4237/// 4238/// In this version, 'ShuffleReduceFn' behaves, per element, as follows: 4239/// 4240/// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE); 4241/// if (lane_id < offset) 4242/// reduce_elem = reduce_elem REDUCE_OP remote_elem 4243/// else 4244/// reduce_elem = remote_elem 4245/// 4246/// This algorithm assumes that the data to be reduced are located in a 4247/// contiguous subset of lanes starting from the first. When there is 4248/// an odd number of active lanes, the data in the last lane is not 4249/// aggregated with any other lane's dat but is instead copied over. 4250/// 4251/// Dispersed Partial Warp Reduction 4252/// 4253/// This algorithm is used within a warp when any discontiguous subset of 4254/// lanes are active. It is used to implement the reduction operation 4255/// across lanes in an OpenMP simd region or in a nested parallel region. 4256/// 4257/// void 4258/// dispersed_partial_reduce(void *reduce_data, 4259/// kmp_ShuffleReductFctPtr ShuffleReduceFn) { 4260/// int size, remote_id; 4261/// int logical_lane_id = number_of_active_lanes_before_me() * 2; 4262/// do { 4263/// remote_id = next_active_lane_id_right_after_me(); 4264/// # the above function returns 0 of no active lane 4265/// # is present right after the current lane. 4266/// size = number_of_active_lanes_in_this_warp(); 4267/// logical_lane_id /= 2; 4268/// ShuffleReduceFn(reduce_data, logical_lane_id, 4269/// remote_id-1-threadIdx.x, 2); 4270/// } while (logical_lane_id % 2 == 0 && size > 1); 4271/// } 4272/// 4273/// There is no assumption made about the initial state of the reduction. 4274/// Any number of lanes (>=1) could be active at any position. The reduction 4275/// result is returned in the first active lane. 4276/// 4277/// In this version, 'ShuffleReduceFn' behaves, per element, as follows: 4278/// 4279/// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE); 4280/// if (lane_id % 2 == 0 && offset > 0) 4281/// reduce_elem = reduce_elem REDUCE_OP remote_elem 4282/// else 4283/// reduce_elem = remote_elem 4284/// 4285/// 4286/// Intra-Team Reduction 4287/// 4288/// This function, as implemented in the runtime call 4289/// '__kmpc_nvptx_parallel_reduce_nowait_v2', aggregates data across OpenMP 4290/// threads in a team. It first reduces within a warp using the 4291/// aforementioned algorithms. We then proceed to gather all such 4292/// reduced values at the first warp. 4293/// 4294/// The runtime makes use of the function 'InterWarpCpyFn', which copies 4295/// data from each of the "warp master" (zeroth lane of each warp, where 4296/// warp-reduced data is held) to the zeroth warp. This step reduces (in 4297/// a mathematical sense) the problem of reduction across warp masters in 4298/// a block to the problem of warp reduction. 4299/// 4300/// 4301/// Inter-Team Reduction 4302/// 4303/// Once a team has reduced its data to a single value, it is stored in 4304/// a global scratchpad array. Since each team has a distinct slot, this 4305/// can be done without locking. 4306/// 4307/// The last team to write to the scratchpad array proceeds to reduce the 4308/// scratchpad array. One or more workers in the last team use the helper 4309/// 'loadAndReduceDataFn' to load and reduce values from the array, i.e., 4310/// the k'th worker reduces every k'th element. 4311/// 4312/// Finally, a call is made to '__kmpc_nvptx_parallel_reduce_nowait_v2' to 4313/// reduce across workers and compute a globally reduced value. 4314/// 4315void CGOpenMPRuntimeNVPTX::emitReduction( 4316 CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates, 4317 ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, 4318 ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) { 4319 if (!CGF.HaveInsertPoint()) 4320 return; 4321 4322 bool ParallelReduction = isOpenMPParallelDirective(Options.ReductionKind); 4323#ifndef NDEBUG 4324 bool TeamsReduction = isOpenMPTeamsDirective(Options.ReductionKind); 4325#endif 4326 4327 if (Options.SimpleReduction) { 4328 assert(!TeamsReduction && !ParallelReduction && 4329 "Invalid reduction selection in emitReduction."); 4330 CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs, 4331 ReductionOps, Options); 4332 return; 4333 } 4334 4335 assert((TeamsReduction || ParallelReduction) && 4336 "Invalid reduction selection in emitReduction."); 4337 4338 // Build res = __kmpc_reduce{_nowait}(<gtid>, <n>, sizeof(RedList), 4339 // RedList, shuffle_reduce_func, interwarp_copy_func); 4340 // or 4341 // Build res = __kmpc_reduce_teams_nowait_simple(<loc>, <gtid>, <lck>); 4342 llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc); 4343 llvm::Value *ThreadId = getThreadID(CGF, Loc); 4344 4345 llvm::Value *Res; 4346 ASTContext &C = CGM.getContext(); 4347 // 1. Build a list of reduction variables. 4348 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]}; 4349 auto Size = RHSExprs.size(); 4350 for (const Expr *E : Privates) { 4351 if (E->getType()->isVariablyModifiedType()) 4352 // Reserve place for array size. 4353 ++Size; 4354 } 4355 llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size); 4356 QualType ReductionArrayTy = 4357 C.getConstantArrayType(C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal, 4358 /*IndexTypeQuals=*/0); 4359 Address ReductionList = 4360 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list"); 4361 auto IPriv = Privates.begin(); 4362 unsigned Idx = 0; 4363 for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) { 4364 Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); 4365 CGF.Builder.CreateStore( 4366 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 4367 CGF.EmitLValue(RHSExprs[I]).getPointer(CGF), CGF.VoidPtrTy), 4368 Elem); 4369 if ((*IPriv)->getType()->isVariablyModifiedType()) { 4370 // Store array size. 4371 ++Idx; 4372 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx); 4373 llvm::Value *Size = CGF.Builder.CreateIntCast( 4374 CGF.getVLASize( 4375 CGF.getContext().getAsVariableArrayType((*IPriv)->getType())) 4376 .NumElts, 4377 CGF.SizeTy, /*isSigned=*/false); 4378 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy), 4379 Elem); 4380 } 4381 } 4382 4383 llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 4384 ReductionList.getPointer(), CGF.VoidPtrTy); 4385 llvm::Function *ReductionFn = emitReductionFunction( 4386 Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates, 4387 LHSExprs, RHSExprs, ReductionOps); 4388 llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy); 4389 llvm::Function *ShuffleAndReduceFn = emitShuffleAndReduceFunction( 4390 CGM, Privates, ReductionArrayTy, ReductionFn, Loc); 4391 llvm::Value *InterWarpCopyFn = 4392 emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy, Loc); 4393 4394 if (ParallelReduction) { 4395 llvm::Value *Args[] = {RTLoc, 4396 ThreadId, 4397 CGF.Builder.getInt32(RHSExprs.size()), 4398 ReductionArrayTySize, 4399 RL, 4400 ShuffleAndReduceFn, 4401 InterWarpCopyFn}; 4402 4403 Res = CGF.EmitRuntimeCall( 4404 createNVPTXRuntimeFunction( 4405 OMPRTL_NVPTX__kmpc_nvptx_parallel_reduce_nowait_v2), 4406 Args); 4407 } else { 4408 assert(TeamsReduction && "expected teams reduction."); 4409 llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> VarFieldMap; 4410 llvm::SmallVector<const ValueDecl *, 4> PrivatesReductions(Privates.size()); 4411 int Cnt = 0; 4412 for (const Expr *DRE : Privates) { 4413 PrivatesReductions[Cnt] = cast<DeclRefExpr>(DRE)->getDecl(); 4414 ++Cnt; 4415 } 4416 const RecordDecl *TeamReductionRec = ::buildRecordForGlobalizedVars( 4417 CGM.getContext(), PrivatesReductions, llvm::None, VarFieldMap, 4418 C.getLangOpts().OpenMPCUDAReductionBufNum); 4419 TeamsReductions.push_back(TeamReductionRec); 4420 if (!KernelTeamsReductionPtr) { 4421 KernelTeamsReductionPtr = new llvm::GlobalVariable( 4422 CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/true, 4423 llvm::GlobalValue::InternalLinkage, nullptr, 4424 "_openmp_teams_reductions_buffer_$_$ptr"); 4425 } 4426 llvm::Value *GlobalBufferPtr = CGF.EmitLoadOfScalar( 4427 Address(KernelTeamsReductionPtr, CGM.getPointerAlign()), 4428 /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc); 4429 llvm::Value *GlobalToBufferCpyFn = ::emitListToGlobalCopyFunction( 4430 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap); 4431 llvm::Value *GlobalToBufferRedFn = ::emitListToGlobalReduceFunction( 4432 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap, 4433 ReductionFn); 4434 llvm::Value *BufferToGlobalCpyFn = ::emitGlobalToListCopyFunction( 4435 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap); 4436 llvm::Value *BufferToGlobalRedFn = ::emitGlobalToListReduceFunction( 4437 CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap, 4438 ReductionFn); 4439 4440 llvm::Value *Args[] = { 4441 RTLoc, 4442 ThreadId, 4443 GlobalBufferPtr, 4444 CGF.Builder.getInt32(C.getLangOpts().OpenMPCUDAReductionBufNum), 4445 RL, 4446 ShuffleAndReduceFn, 4447 InterWarpCopyFn, 4448 GlobalToBufferCpyFn, 4449 GlobalToBufferRedFn, 4450 BufferToGlobalCpyFn, 4451 BufferToGlobalRedFn}; 4452 4453 Res = CGF.EmitRuntimeCall( 4454 createNVPTXRuntimeFunction( 4455 OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_v2), 4456 Args); 4457 } 4458 4459 // 5. Build if (res == 1) 4460 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.reduction.done"); 4461 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.then"); 4462 llvm::Value *Cond = CGF.Builder.CreateICmpEQ( 4463 Res, llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1)); 4464 CGF.Builder.CreateCondBr(Cond, ThenBB, ExitBB); 4465 4466 // 6. Build then branch: where we have reduced values in the master 4467 // thread in each team. 4468 // __kmpc_end_reduce{_nowait}(<gtid>); 4469 // break; 4470 CGF.EmitBlock(ThenBB); 4471 4472 // Add emission of __kmpc_end_reduce{_nowait}(<gtid>); 4473 auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps, 4474 this](CodeGenFunction &CGF, PrePostActionTy &Action) { 4475 auto IPriv = Privates.begin(); 4476 auto ILHS = LHSExprs.begin(); 4477 auto IRHS = RHSExprs.begin(); 4478 for (const Expr *E : ReductionOps) { 4479 emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS), 4480 cast<DeclRefExpr>(*IRHS)); 4481 ++IPriv; 4482 ++ILHS; 4483 ++IRHS; 4484 } 4485 }; 4486 llvm::Value *EndArgs[] = {ThreadId}; 4487 RegionCodeGenTy RCG(CodeGen); 4488 NVPTXActionTy Action( 4489 nullptr, llvm::None, 4490 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_reduce_nowait), 4491 EndArgs); 4492 RCG.setAction(Action); 4493 RCG(CGF); 4494 // There is no need to emit line number for unconditional branch. 4495 (void)ApplyDebugLocation::CreateEmpty(CGF); 4496 CGF.EmitBlock(ExitBB, /*IsFinished=*/true); 4497} 4498 4499const VarDecl * 4500CGOpenMPRuntimeNVPTX::translateParameter(const FieldDecl *FD, 4501 const VarDecl *NativeParam) const { 4502 if (!NativeParam->getType()->isReferenceType()) 4503 return NativeParam; 4504 QualType ArgType = NativeParam->getType(); 4505 QualifierCollector QC; 4506 const Type *NonQualTy = QC.strip(ArgType); 4507 QualType PointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType(); 4508 if (const auto *Attr = FD->getAttr<OMPCaptureKindAttr>()) { 4509 if (Attr->getCaptureKind() == OMPC_map) { 4510 PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy, 4511 LangAS::opencl_global); 4512 } else if (Attr->getCaptureKind() == OMPC_firstprivate && 4513 PointeeTy.isConstant(CGM.getContext())) { 4514 PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy, 4515 LangAS::opencl_generic); 4516 } 4517 } 4518 ArgType = CGM.getContext().getPointerType(PointeeTy); 4519 QC.addRestrict(); 4520 enum { NVPTX_local_addr = 5 }; 4521 QC.addAddressSpace(getLangASFromTargetAS(NVPTX_local_addr)); 4522 ArgType = QC.apply(CGM.getContext(), ArgType); 4523 if (isa<ImplicitParamDecl>(NativeParam)) 4524 return ImplicitParamDecl::Create( 4525 CGM.getContext(), /*DC=*/nullptr, NativeParam->getLocation(), 4526 NativeParam->getIdentifier(), ArgType, ImplicitParamDecl::Other); 4527 return ParmVarDecl::Create( 4528 CGM.getContext(), 4529 const_cast<DeclContext *>(NativeParam->getDeclContext()), 4530 NativeParam->getBeginLoc(), NativeParam->getLocation(), 4531 NativeParam->getIdentifier(), ArgType, 4532 /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr); 4533} 4534 4535Address 4536CGOpenMPRuntimeNVPTX::getParameterAddress(CodeGenFunction &CGF, 4537 const VarDecl *NativeParam, 4538 const VarDecl *TargetParam) const { 4539 assert(NativeParam != TargetParam && 4540 NativeParam->getType()->isReferenceType() && 4541 "Native arg must not be the same as target arg."); 4542 Address LocalAddr = CGF.GetAddrOfLocalVar(TargetParam); 4543 QualType NativeParamType = NativeParam->getType(); 4544 QualifierCollector QC; 4545 const Type *NonQualTy = QC.strip(NativeParamType); 4546 QualType NativePointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType(); 4547 unsigned NativePointeeAddrSpace = 4548 CGF.getContext().getTargetAddressSpace(NativePointeeTy); 4549 QualType TargetTy = TargetParam->getType(); 4550 llvm::Value *TargetAddr = CGF.EmitLoadOfScalar( 4551 LocalAddr, /*Volatile=*/false, TargetTy, SourceLocation()); 4552 // First cast to generic. 4553 TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 4554 TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo( 4555 /*AddrSpace=*/0)); 4556 // Cast from generic to native address space. 4557 TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 4558 TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo( 4559 NativePointeeAddrSpace)); 4560 Address NativeParamAddr = CGF.CreateMemTemp(NativeParamType); 4561 CGF.EmitStoreOfScalar(TargetAddr, NativeParamAddr, /*Volatile=*/false, 4562 NativeParamType); 4563 return NativeParamAddr; 4564} 4565 4566void CGOpenMPRuntimeNVPTX::emitOutlinedFunctionCall( 4567 CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn, 4568 ArrayRef<llvm::Value *> Args) const { 4569 SmallVector<llvm::Value *, 4> TargetArgs; 4570 TargetArgs.reserve(Args.size()); 4571 auto *FnType = OutlinedFn.getFunctionType(); 4572 for (unsigned I = 0, E = Args.size(); I < E; ++I) { 4573 if (FnType->isVarArg() && FnType->getNumParams() <= I) { 4574 TargetArgs.append(std::next(Args.begin(), I), Args.end()); 4575 break; 4576 } 4577 llvm::Type *TargetType = FnType->getParamType(I); 4578 llvm::Value *NativeArg = Args[I]; 4579 if (!TargetType->isPointerTy()) { 4580 TargetArgs.emplace_back(NativeArg); 4581 continue; 4582 } 4583 llvm::Value *TargetArg = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 4584 NativeArg, 4585 NativeArg->getType()->getPointerElementType()->getPointerTo()); 4586 TargetArgs.emplace_back( 4587 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TargetArg, TargetType)); 4588 } 4589 CGOpenMPRuntime::emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, TargetArgs); 4590} 4591 4592/// Emit function which wraps the outline parallel region 4593/// and controls the arguments which are passed to this function. 4594/// The wrapper ensures that the outlined function is called 4595/// with the correct arguments when data is shared. 4596llvm::Function *CGOpenMPRuntimeNVPTX::createParallelDataSharingWrapper( 4597 llvm::Function *OutlinedParallelFn, const OMPExecutableDirective &D) { 4598 ASTContext &Ctx = CGM.getContext(); 4599 const auto &CS = *D.getCapturedStmt(OMPD_parallel); 4600 4601 // Create a function that takes as argument the source thread. 4602 FunctionArgList WrapperArgs; 4603 QualType Int16QTy = 4604 Ctx.getIntTypeForBitwidth(/*DestWidth=*/16, /*Signed=*/false); 4605 QualType Int32QTy = 4606 Ctx.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false); 4607 ImplicitParamDecl ParallelLevelArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(), 4608 /*Id=*/nullptr, Int16QTy, 4609 ImplicitParamDecl::Other); 4610 ImplicitParamDecl WrapperArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(), 4611 /*Id=*/nullptr, Int32QTy, 4612 ImplicitParamDecl::Other); 4613 WrapperArgs.emplace_back(&ParallelLevelArg); 4614 WrapperArgs.emplace_back(&WrapperArg); 4615 4616 const CGFunctionInfo &CGFI = 4617 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, WrapperArgs); 4618 4619 auto *Fn = llvm::Function::Create( 4620 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, 4621 Twine(OutlinedParallelFn->getName(), "_wrapper"), &CGM.getModule()); 4622 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI); 4623 Fn->setLinkage(llvm::GlobalValue::InternalLinkage); 4624 Fn->setDoesNotRecurse(); 4625 4626 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true); 4627 CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, Fn, CGFI, WrapperArgs, 4628 D.getBeginLoc(), D.getBeginLoc()); 4629 4630 const auto *RD = CS.getCapturedRecordDecl(); 4631 auto CurField = RD->field_begin(); 4632 4633 Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty, 4634 /*Name=*/".zero.addr"); 4635 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0)); 4636 // Get the array of arguments. 4637 SmallVector<llvm::Value *, 8> Args; 4638 4639 Args.emplace_back(CGF.GetAddrOfLocalVar(&WrapperArg).getPointer()); 4640 Args.emplace_back(ZeroAddr.getPointer()); 4641 4642 CGBuilderTy &Bld = CGF.Builder; 4643 auto CI = CS.capture_begin(); 4644 4645 // Use global memory for data sharing. 4646 // Handle passing of global args to workers. 4647 Address GlobalArgs = 4648 CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "global_args"); 4649 llvm::Value *GlobalArgsPtr = GlobalArgs.getPointer(); 4650 llvm::Value *DataSharingArgs[] = {GlobalArgsPtr}; 4651 CGF.EmitRuntimeCall( 4652 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_get_shared_variables), 4653 DataSharingArgs); 4654 4655 // Retrieve the shared variables from the list of references returned 4656 // by the runtime. Pass the variables to the outlined function. 4657 Address SharedArgListAddress = Address::invalid(); 4658 if (CS.capture_size() > 0 || 4659 isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) { 4660 SharedArgListAddress = CGF.EmitLoadOfPointer( 4661 GlobalArgs, CGF.getContext() 4662 .getPointerType(CGF.getContext().getPointerType( 4663 CGF.getContext().VoidPtrTy)) 4664 .castAs<PointerType>()); 4665 } 4666 unsigned Idx = 0; 4667 if (isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) { 4668 Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx); 4669 Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast( 4670 Src, CGF.SizeTy->getPointerTo()); 4671 llvm::Value *LB = CGF.EmitLoadOfScalar( 4672 TypedAddress, 4673 /*Volatile=*/false, 4674 CGF.getContext().getPointerType(CGF.getContext().getSizeType()), 4675 cast<OMPLoopDirective>(D).getLowerBoundVariable()->getExprLoc()); 4676 Args.emplace_back(LB); 4677 ++Idx; 4678 Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx); 4679 TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast( 4680 Src, CGF.SizeTy->getPointerTo()); 4681 llvm::Value *UB = CGF.EmitLoadOfScalar( 4682 TypedAddress, 4683 /*Volatile=*/false, 4684 CGF.getContext().getPointerType(CGF.getContext().getSizeType()), 4685 cast<OMPLoopDirective>(D).getUpperBoundVariable()->getExprLoc()); 4686 Args.emplace_back(UB); 4687 ++Idx; 4688 } 4689 if (CS.capture_size() > 0) { 4690 ASTContext &CGFContext = CGF.getContext(); 4691 for (unsigned I = 0, E = CS.capture_size(); I < E; ++I, ++CI, ++CurField) { 4692 QualType ElemTy = CurField->getType(); 4693 Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, I + Idx); 4694 Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast( 4695 Src, CGF.ConvertTypeForMem(CGFContext.getPointerType(ElemTy))); 4696 llvm::Value *Arg = CGF.EmitLoadOfScalar(TypedAddress, 4697 /*Volatile=*/false, 4698 CGFContext.getPointerType(ElemTy), 4699 CI->getLocation()); 4700 if (CI->capturesVariableByCopy() && 4701 !CI->getCapturedVar()->getType()->isAnyPointerType()) { 4702 Arg = castValueToType(CGF, Arg, ElemTy, CGFContext.getUIntPtrType(), 4703 CI->getLocation()); 4704 } 4705 Args.emplace_back(Arg); 4706 } 4707 } 4708 4709 emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedParallelFn, Args); 4710 CGF.FinishFunction(); 4711 return Fn; 4712} 4713 4714void CGOpenMPRuntimeNVPTX::emitFunctionProlog(CodeGenFunction &CGF, 4715 const Decl *D) { 4716 if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic) 4717 return; 4718 4719 assert(D && "Expected function or captured|block decl."); 4720 assert(FunctionGlobalizedDecls.count(CGF.CurFn) == 0 && 4721 "Function is registered already."); 4722 assert((!TeamAndReductions.first || TeamAndReductions.first == D) && 4723 "Team is set but not processed."); 4724 const Stmt *Body = nullptr; 4725 bool NeedToDelayGlobalization = false; 4726 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 4727 Body = FD->getBody(); 4728 } else if (const auto *BD = dyn_cast<BlockDecl>(D)) { 4729 Body = BD->getBody(); 4730 } else if (const auto *CD = dyn_cast<CapturedDecl>(D)) { 4731 Body = CD->getBody(); 4732 NeedToDelayGlobalization = CGF.CapturedStmtInfo->getKind() == CR_OpenMP; 4733 if (NeedToDelayGlobalization && 4734 getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD) 4735 return; 4736 } 4737 if (!Body) 4738 return; 4739 CheckVarsEscapingDeclContext VarChecker(CGF, TeamAndReductions.second); 4740 VarChecker.Visit(Body); 4741 const RecordDecl *GlobalizedVarsRecord = 4742 VarChecker.getGlobalizedRecord(IsInTTDRegion); 4743 TeamAndReductions.first = nullptr; 4744 TeamAndReductions.second.clear(); 4745 ArrayRef<const ValueDecl *> EscapedVariableLengthDecls = 4746 VarChecker.getEscapedVariableLengthDecls(); 4747 if (!GlobalizedVarsRecord && EscapedVariableLengthDecls.empty()) 4748 return; 4749 auto I = FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first; 4750 I->getSecond().MappedParams = 4751 std::make_unique<CodeGenFunction::OMPMapVars>(); 4752 I->getSecond().GlobalRecord = GlobalizedVarsRecord; 4753 I->getSecond().EscapedParameters.insert( 4754 VarChecker.getEscapedParameters().begin(), 4755 VarChecker.getEscapedParameters().end()); 4756 I->getSecond().EscapedVariableLengthDecls.append( 4757 EscapedVariableLengthDecls.begin(), EscapedVariableLengthDecls.end()); 4758 DeclToAddrMapTy &Data = I->getSecond().LocalVarData; 4759 for (const ValueDecl *VD : VarChecker.getEscapedDecls()) { 4760 assert(VD->isCanonicalDecl() && "Expected canonical declaration"); 4761 const FieldDecl *FD = VarChecker.getFieldForGlobalizedVar(VD); 4762 Data.insert(std::make_pair(VD, MappedVarData(FD, IsInTTDRegion))); 4763 } 4764 if (!IsInTTDRegion && !NeedToDelayGlobalization && !IsInParallelRegion) { 4765 CheckVarsEscapingDeclContext VarChecker(CGF, llvm::None); 4766 VarChecker.Visit(Body); 4767 I->getSecond().SecondaryGlobalRecord = 4768 VarChecker.getGlobalizedRecord(/*IsInTTDRegion=*/true); 4769 I->getSecond().SecondaryLocalVarData.emplace(); 4770 DeclToAddrMapTy &Data = I->getSecond().SecondaryLocalVarData.getValue(); 4771 for (const ValueDecl *VD : VarChecker.getEscapedDecls()) { 4772 assert(VD->isCanonicalDecl() && "Expected canonical declaration"); 4773 const FieldDecl *FD = VarChecker.getFieldForGlobalizedVar(VD); 4774 Data.insert( 4775 std::make_pair(VD, MappedVarData(FD, /*IsInTTDRegion=*/true))); 4776 } 4777 } 4778 if (!NeedToDelayGlobalization) { 4779 emitGenericVarsProlog(CGF, D->getBeginLoc(), /*WithSPMDCheck=*/true); 4780 struct GlobalizationScope final : EHScopeStack::Cleanup { 4781 GlobalizationScope() = default; 4782 4783 void Emit(CodeGenFunction &CGF, Flags flags) override { 4784 static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime()) 4785 .emitGenericVarsEpilog(CGF, /*WithSPMDCheck=*/true); 4786 } 4787 }; 4788 CGF.EHStack.pushCleanup<GlobalizationScope>(NormalAndEHCleanup); 4789 } 4790} 4791 4792Address CGOpenMPRuntimeNVPTX::getAddressOfLocalVariable(CodeGenFunction &CGF, 4793 const VarDecl *VD) { 4794 if (VD && VD->hasAttr<OMPAllocateDeclAttr>()) { 4795 const auto *A = VD->getAttr<OMPAllocateDeclAttr>(); 4796 auto AS = LangAS::Default; 4797 switch (A->getAllocatorType()) { 4798 // Use the default allocator here as by default local vars are 4799 // threadlocal. 4800 case OMPAllocateDeclAttr::OMPNullMemAlloc: 4801 case OMPAllocateDeclAttr::OMPDefaultMemAlloc: 4802 case OMPAllocateDeclAttr::OMPThreadMemAlloc: 4803 case OMPAllocateDeclAttr::OMPHighBWMemAlloc: 4804 case OMPAllocateDeclAttr::OMPLowLatMemAlloc: 4805 // Follow the user decision - use default allocation. 4806 return Address::invalid(); 4807 case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc: 4808 // TODO: implement aupport for user-defined allocators. 4809 return Address::invalid(); 4810 case OMPAllocateDeclAttr::OMPConstMemAlloc: 4811 AS = LangAS::cuda_constant; 4812 break; 4813 case OMPAllocateDeclAttr::OMPPTeamMemAlloc: 4814 AS = LangAS::cuda_shared; 4815 break; 4816 case OMPAllocateDeclAttr::OMPLargeCapMemAlloc: 4817 case OMPAllocateDeclAttr::OMPCGroupMemAlloc: 4818 break; 4819 } 4820 llvm::Type *VarTy = CGF.ConvertTypeForMem(VD->getType()); 4821 auto *GV = new llvm::GlobalVariable( 4822 CGM.getModule(), VarTy, /*isConstant=*/false, 4823 llvm::GlobalValue::InternalLinkage, llvm::Constant::getNullValue(VarTy), 4824 VD->getName(), 4825 /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal, 4826 CGM.getContext().getTargetAddressSpace(AS)); 4827 CharUnits Align = CGM.getContext().getDeclAlign(VD); 4828 GV->setAlignment(Align.getAsAlign()); 4829 return Address( 4830 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 4831 GV, VarTy->getPointerTo(CGM.getContext().getTargetAddressSpace( 4832 VD->getType().getAddressSpace()))), 4833 Align); 4834 } 4835 4836 if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic) 4837 return Address::invalid(); 4838 4839 VD = VD->getCanonicalDecl(); 4840 auto I = FunctionGlobalizedDecls.find(CGF.CurFn); 4841 if (I == FunctionGlobalizedDecls.end()) 4842 return Address::invalid(); 4843 auto VDI = I->getSecond().LocalVarData.find(VD); 4844 if (VDI != I->getSecond().LocalVarData.end()) 4845 return VDI->second.PrivateAddr; 4846 if (VD->hasAttrs()) { 4847 for (specific_attr_iterator<OMPReferencedVarAttr> IT(VD->attr_begin()), 4848 E(VD->attr_end()); 4849 IT != E; ++IT) { 4850 auto VDI = I->getSecond().LocalVarData.find( 4851 cast<VarDecl>(cast<DeclRefExpr>(IT->getRef())->getDecl()) 4852 ->getCanonicalDecl()); 4853 if (VDI != I->getSecond().LocalVarData.end()) 4854 return VDI->second.PrivateAddr; 4855 } 4856 } 4857 4858 return Address::invalid(); 4859} 4860 4861void CGOpenMPRuntimeNVPTX::functionFinished(CodeGenFunction &CGF) { 4862 FunctionGlobalizedDecls.erase(CGF.CurFn); 4863 CGOpenMPRuntime::functionFinished(CGF); 4864} 4865 4866void CGOpenMPRuntimeNVPTX::getDefaultDistScheduleAndChunk( 4867 CodeGenFunction &CGF, const OMPLoopDirective &S, 4868 OpenMPDistScheduleClauseKind &ScheduleKind, 4869 llvm::Value *&Chunk) const { 4870 if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD) { 4871 ScheduleKind = OMPC_DIST_SCHEDULE_static; 4872 Chunk = CGF.EmitScalarConversion(getNVPTXNumThreads(CGF), 4873 CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0), 4874 S.getIterationVariable()->getType(), S.getBeginLoc()); 4875 return; 4876 } 4877 CGOpenMPRuntime::getDefaultDistScheduleAndChunk( 4878 CGF, S, ScheduleKind, Chunk); 4879} 4880 4881void CGOpenMPRuntimeNVPTX::getDefaultScheduleAndChunk( 4882 CodeGenFunction &CGF, const OMPLoopDirective &S, 4883 OpenMPScheduleClauseKind &ScheduleKind, 4884 const Expr *&ChunkExpr) const { 4885 ScheduleKind = OMPC_SCHEDULE_static; 4886 // Chunk size is 1 in this case. 4887 llvm::APInt ChunkSize(32, 1); 4888 ChunkExpr = IntegerLiteral::Create(CGF.getContext(), ChunkSize, 4889 CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0), 4890 SourceLocation()); 4891} 4892 4893void CGOpenMPRuntimeNVPTX::adjustTargetSpecificDataForLambdas( 4894 CodeGenFunction &CGF, const OMPExecutableDirective &D) const { 4895 assert(isOpenMPTargetExecutionDirective(D.getDirectiveKind()) && 4896 " Expected target-based directive."); 4897 const CapturedStmt *CS = D.getCapturedStmt(OMPD_target); 4898 for (const CapturedStmt::Capture &C : CS->captures()) { 4899 // Capture variables captured by reference in lambdas for target-based 4900 // directives. 4901 if (!C.capturesVariable()) 4902 continue; 4903 const VarDecl *VD = C.getCapturedVar(); 4904 const auto *RD = VD->getType() 4905 .getCanonicalType() 4906 .getNonReferenceType() 4907 ->getAsCXXRecordDecl(); 4908 if (!RD || !RD->isLambda()) 4909 continue; 4910 Address VDAddr = CGF.GetAddrOfLocalVar(VD); 4911 LValue VDLVal; 4912 if (VD->getType().getCanonicalType()->isReferenceType()) 4913 VDLVal = CGF.EmitLoadOfReferenceLValue(VDAddr, VD->getType()); 4914 else 4915 VDLVal = CGF.MakeAddrLValue( 4916 VDAddr, VD->getType().getCanonicalType().getNonReferenceType()); 4917 llvm::DenseMap<const VarDecl *, FieldDecl *> Captures; 4918 FieldDecl *ThisCapture = nullptr; 4919 RD->getCaptureFields(Captures, ThisCapture); 4920 if (ThisCapture && CGF.CapturedStmtInfo->isCXXThisExprCaptured()) { 4921 LValue ThisLVal = 4922 CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture); 4923 llvm::Value *CXXThis = CGF.LoadCXXThis(); 4924 CGF.EmitStoreOfScalar(CXXThis, ThisLVal); 4925 } 4926 for (const LambdaCapture &LC : RD->captures()) { 4927 if (LC.getCaptureKind() != LCK_ByRef) 4928 continue; 4929 const VarDecl *VD = LC.getCapturedVar(); 4930 if (!CS->capturesVariable(VD)) 4931 continue; 4932 auto It = Captures.find(VD); 4933 assert(It != Captures.end() && "Found lambda capture without field."); 4934 LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second); 4935 Address VDAddr = CGF.GetAddrOfLocalVar(VD); 4936 if (VD->getType().getCanonicalType()->isReferenceType()) 4937 VDAddr = CGF.EmitLoadOfReferenceLValue(VDAddr, 4938 VD->getType().getCanonicalType()) 4939 .getAddress(CGF); 4940 CGF.EmitStoreOfScalar(VDAddr.getPointer(), VarLVal); 4941 } 4942 } 4943} 4944 4945unsigned CGOpenMPRuntimeNVPTX::getDefaultFirstprivateAddressSpace() const { 4946 return CGM.getContext().getTargetAddressSpace(LangAS::cuda_constant); 4947} 4948 4949bool CGOpenMPRuntimeNVPTX::hasAllocateAttributeForGlobalVar(const VarDecl *VD, 4950 LangAS &AS) { 4951 if (!VD || !VD->hasAttr<OMPAllocateDeclAttr>()) 4952 return false; 4953 const auto *A = VD->getAttr<OMPAllocateDeclAttr>(); 4954 switch(A->getAllocatorType()) { 4955 case OMPAllocateDeclAttr::OMPNullMemAlloc: 4956 case OMPAllocateDeclAttr::OMPDefaultMemAlloc: 4957 // Not supported, fallback to the default mem space. 4958 case OMPAllocateDeclAttr::OMPThreadMemAlloc: 4959 case OMPAllocateDeclAttr::OMPLargeCapMemAlloc: 4960 case OMPAllocateDeclAttr::OMPCGroupMemAlloc: 4961 case OMPAllocateDeclAttr::OMPHighBWMemAlloc: 4962 case OMPAllocateDeclAttr::OMPLowLatMemAlloc: 4963 AS = LangAS::Default; 4964 return true; 4965 case OMPAllocateDeclAttr::OMPConstMemAlloc: 4966 AS = LangAS::cuda_constant; 4967 return true; 4968 case OMPAllocateDeclAttr::OMPPTeamMemAlloc: 4969 AS = LangAS::cuda_shared; 4970 return true; 4971 case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc: 4972 llvm_unreachable("Expected predefined allocator for the variables with the " 4973 "static storage."); 4974 } 4975 return false; 4976} 4977 4978// Get current CudaArch and ignore any unknown values 4979static CudaArch getCudaArch(CodeGenModule &CGM) { 4980 if (!CGM.getTarget().hasFeature("ptx")) 4981 return CudaArch::UNKNOWN; 4982 for (const auto &Feature : CGM.getTarget().getTargetOpts().FeatureMap) { 4983 if (Feature.getValue()) { 4984 CudaArch Arch = StringToCudaArch(Feature.getKey()); 4985 if (Arch != CudaArch::UNKNOWN) 4986 return Arch; 4987 } 4988 } 4989 return CudaArch::UNKNOWN; 4990} 4991 4992/// Check to see if target architecture supports unified addressing which is 4993/// a restriction for OpenMP requires clause "unified_shared_memory". 4994void CGOpenMPRuntimeNVPTX::processRequiresDirective( 4995 const OMPRequiresDecl *D) { 4996 for (const OMPClause *Clause : D->clauselists()) { 4997 if (Clause->getClauseKind() == OMPC_unified_shared_memory) { 4998 CudaArch Arch = getCudaArch(CGM); 4999 switch (Arch) { 5000 case CudaArch::SM_20: 5001 case CudaArch::SM_21: 5002 case CudaArch::SM_30: 5003 case CudaArch::SM_32: 5004 case CudaArch::SM_35: 5005 case CudaArch::SM_37: 5006 case CudaArch::SM_50: 5007 case CudaArch::SM_52: 5008 case CudaArch::SM_53: 5009 case CudaArch::SM_60: 5010 case CudaArch::SM_61: 5011 case CudaArch::SM_62: { 5012 SmallString<256> Buffer; 5013 llvm::raw_svector_ostream Out(Buffer); 5014 Out << "Target architecture " << CudaArchToString(Arch) 5015 << " does not support unified addressing"; 5016 CGM.Error(Clause->getBeginLoc(), Out.str()); 5017 return; 5018 } 5019 case CudaArch::SM_70: 5020 case CudaArch::SM_72: 5021 case CudaArch::SM_75: 5022 case CudaArch::SM_80: 5023 case CudaArch::GFX600: 5024 case CudaArch::GFX601: 5025 case CudaArch::GFX700: 5026 case CudaArch::GFX701: 5027 case CudaArch::GFX702: 5028 case CudaArch::GFX703: 5029 case CudaArch::GFX704: 5030 case CudaArch::GFX801: 5031 case CudaArch::GFX802: 5032 case CudaArch::GFX803: 5033 case CudaArch::GFX810: 5034 case CudaArch::GFX900: 5035 case CudaArch::GFX902: 5036 case CudaArch::GFX904: 5037 case CudaArch::GFX906: 5038 case CudaArch::GFX908: 5039 case CudaArch::GFX909: 5040 case CudaArch::GFX1010: 5041 case CudaArch::GFX1011: 5042 case CudaArch::GFX1012: 5043 case CudaArch::GFX1030: 5044 case CudaArch::UNKNOWN: 5045 break; 5046 case CudaArch::LAST: 5047 llvm_unreachable("Unexpected Cuda arch."); 5048 } 5049 } 5050 } 5051 CGOpenMPRuntime::processRequiresDirective(D); 5052} 5053 5054/// Get number of SMs and number of blocks per SM. 5055static std::pair<unsigned, unsigned> getSMsBlocksPerSM(CodeGenModule &CGM) { 5056 std::pair<unsigned, unsigned> Data; 5057 if (CGM.getLangOpts().OpenMPCUDANumSMs) 5058 Data.first = CGM.getLangOpts().OpenMPCUDANumSMs; 5059 if (CGM.getLangOpts().OpenMPCUDABlocksPerSM) 5060 Data.second = CGM.getLangOpts().OpenMPCUDABlocksPerSM; 5061 if (Data.first && Data.second) 5062 return Data; 5063 switch (getCudaArch(CGM)) { 5064 case CudaArch::SM_20: 5065 case CudaArch::SM_21: 5066 case CudaArch::SM_30: 5067 case CudaArch::SM_32: 5068 case CudaArch::SM_35: 5069 case CudaArch::SM_37: 5070 case CudaArch::SM_50: 5071 case CudaArch::SM_52: 5072 case CudaArch::SM_53: 5073 return {16, 16}; 5074 case CudaArch::SM_60: 5075 case CudaArch::SM_61: 5076 case CudaArch::SM_62: 5077 return {56, 32}; 5078 case CudaArch::SM_70: 5079 case CudaArch::SM_72: 5080 case CudaArch::SM_75: 5081 case CudaArch::SM_80: 5082 return {84, 32}; 5083 case CudaArch::GFX600: 5084 case CudaArch::GFX601: 5085 case CudaArch::GFX700: 5086 case CudaArch::GFX701: 5087 case CudaArch::GFX702: 5088 case CudaArch::GFX703: 5089 case CudaArch::GFX704: 5090 case CudaArch::GFX801: 5091 case CudaArch::GFX802: 5092 case CudaArch::GFX803: 5093 case CudaArch::GFX810: 5094 case CudaArch::GFX900: 5095 case CudaArch::GFX902: 5096 case CudaArch::GFX904: 5097 case CudaArch::GFX906: 5098 case CudaArch::GFX908: 5099 case CudaArch::GFX909: 5100 case CudaArch::GFX1010: 5101 case CudaArch::GFX1011: 5102 case CudaArch::GFX1012: 5103 case CudaArch::GFX1030: 5104 case CudaArch::UNKNOWN: 5105 break; 5106 case CudaArch::LAST: 5107 llvm_unreachable("Unexpected Cuda arch."); 5108 } 5109 llvm_unreachable("Unexpected NVPTX target without ptx feature."); 5110} 5111 5112void CGOpenMPRuntimeNVPTX::clear() { 5113 if (!GlobalizedRecords.empty() && 5114 !CGM.getLangOpts().OpenMPCUDATargetParallel) { 5115 ASTContext &C = CGM.getContext(); 5116 llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> GlobalRecs; 5117 llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> SharedRecs; 5118 RecordDecl *StaticRD = C.buildImplicitRecord( 5119 "_openmp_static_memory_type_$_", RecordDecl::TagKind::TTK_Union); 5120 StaticRD->startDefinition(); 5121 RecordDecl *SharedStaticRD = C.buildImplicitRecord( 5122 "_shared_openmp_static_memory_type_$_", RecordDecl::TagKind::TTK_Union); 5123 SharedStaticRD->startDefinition(); 5124 for (const GlobalPtrSizeRecsTy &Records : GlobalizedRecords) { 5125 if (Records.Records.empty()) 5126 continue; 5127 unsigned Size = 0; 5128 unsigned RecAlignment = 0; 5129 for (const RecordDecl *RD : Records.Records) { 5130 QualType RDTy = C.getRecordType(RD); 5131 unsigned Alignment = C.getTypeAlignInChars(RDTy).getQuantity(); 5132 RecAlignment = std::max(RecAlignment, Alignment); 5133 unsigned RecSize = C.getTypeSizeInChars(RDTy).getQuantity(); 5134 Size = 5135 llvm::alignTo(llvm::alignTo(Size, Alignment) + RecSize, Alignment); 5136 } 5137 Size = llvm::alignTo(Size, RecAlignment); 5138 llvm::APInt ArySize(/*numBits=*/64, Size); 5139 QualType SubTy = C.getConstantArrayType( 5140 C.CharTy, ArySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0); 5141 const bool UseSharedMemory = Size <= SharedMemorySize; 5142 auto *Field = 5143 FieldDecl::Create(C, UseSharedMemory ? SharedStaticRD : StaticRD, 5144 SourceLocation(), SourceLocation(), nullptr, SubTy, 5145 C.getTrivialTypeSourceInfo(SubTy, SourceLocation()), 5146 /*BW=*/nullptr, /*Mutable=*/false, 5147 /*InitStyle=*/ICIS_NoInit); 5148 Field->setAccess(AS_public); 5149 if (UseSharedMemory) { 5150 SharedStaticRD->addDecl(Field); 5151 SharedRecs.push_back(&Records); 5152 } else { 5153 StaticRD->addDecl(Field); 5154 GlobalRecs.push_back(&Records); 5155 } 5156 Records.RecSize->setInitializer(llvm::ConstantInt::get(CGM.SizeTy, Size)); 5157 Records.UseSharedMemory->setInitializer( 5158 llvm::ConstantInt::get(CGM.Int16Ty, UseSharedMemory ? 1 : 0)); 5159 } 5160 // Allocate SharedMemorySize buffer for the shared memory. 5161 // FIXME: nvlink does not handle weak linkage correctly (object with the 5162 // different size are reported as erroneous). 5163 // Restore this code as sson as nvlink is fixed. 5164 if (!SharedStaticRD->field_empty()) { 5165 llvm::APInt ArySize(/*numBits=*/64, SharedMemorySize); 5166 QualType SubTy = C.getConstantArrayType( 5167 C.CharTy, ArySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0); 5168 auto *Field = FieldDecl::Create( 5169 C, SharedStaticRD, SourceLocation(), SourceLocation(), nullptr, SubTy, 5170 C.getTrivialTypeSourceInfo(SubTy, SourceLocation()), 5171 /*BW=*/nullptr, /*Mutable=*/false, 5172 /*InitStyle=*/ICIS_NoInit); 5173 Field->setAccess(AS_public); 5174 SharedStaticRD->addDecl(Field); 5175 } 5176 SharedStaticRD->completeDefinition(); 5177 if (!SharedStaticRD->field_empty()) { 5178 QualType StaticTy = C.getRecordType(SharedStaticRD); 5179 llvm::Type *LLVMStaticTy = CGM.getTypes().ConvertTypeForMem(StaticTy); 5180 auto *GV = new llvm::GlobalVariable( 5181 CGM.getModule(), LLVMStaticTy, 5182 /*isConstant=*/false, llvm::GlobalValue::CommonLinkage, 5183 llvm::Constant::getNullValue(LLVMStaticTy), 5184 "_openmp_shared_static_glob_rd_$_", /*InsertBefore=*/nullptr, 5185 llvm::GlobalValue::NotThreadLocal, 5186 C.getTargetAddressSpace(LangAS::cuda_shared)); 5187 auto *Replacement = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast( 5188 GV, CGM.VoidPtrTy); 5189 for (const GlobalPtrSizeRecsTy *Rec : SharedRecs) { 5190 Rec->Buffer->replaceAllUsesWith(Replacement); 5191 Rec->Buffer->eraseFromParent(); 5192 } 5193 } 5194 StaticRD->completeDefinition(); 5195 if (!StaticRD->field_empty()) { 5196 QualType StaticTy = C.getRecordType(StaticRD); 5197 std::pair<unsigned, unsigned> SMsBlockPerSM = getSMsBlocksPerSM(CGM); 5198 llvm::APInt Size1(32, SMsBlockPerSM.second); 5199 QualType Arr1Ty = 5200 C.getConstantArrayType(StaticTy, Size1, nullptr, ArrayType::Normal, 5201 /*IndexTypeQuals=*/0); 5202 llvm::APInt Size2(32, SMsBlockPerSM.first); 5203 QualType Arr2Ty = 5204 C.getConstantArrayType(Arr1Ty, Size2, nullptr, ArrayType::Normal, 5205 /*IndexTypeQuals=*/0); 5206 llvm::Type *LLVMArr2Ty = CGM.getTypes().ConvertTypeForMem(Arr2Ty); 5207 // FIXME: nvlink does not handle weak linkage correctly (object with the 5208 // different size are reported as erroneous). 5209 // Restore CommonLinkage as soon as nvlink is fixed. 5210 auto *GV = new llvm::GlobalVariable( 5211 CGM.getModule(), LLVMArr2Ty, 5212 /*isConstant=*/false, llvm::GlobalValue::InternalLinkage, 5213 llvm::Constant::getNullValue(LLVMArr2Ty), 5214 "_openmp_static_glob_rd_$_"); 5215 auto *Replacement = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast( 5216 GV, CGM.VoidPtrTy); 5217 for (const GlobalPtrSizeRecsTy *Rec : GlobalRecs) { 5218 Rec->Buffer->replaceAllUsesWith(Replacement); 5219 Rec->Buffer->eraseFromParent(); 5220 } 5221 } 5222 } 5223 if (!TeamsReductions.empty()) { 5224 ASTContext &C = CGM.getContext(); 5225 RecordDecl *StaticRD = C.buildImplicitRecord( 5226 "_openmp_teams_reduction_type_$_", RecordDecl::TagKind::TTK_Union); 5227 StaticRD->startDefinition(); 5228 for (const RecordDecl *TeamReductionRec : TeamsReductions) { 5229 QualType RecTy = C.getRecordType(TeamReductionRec); 5230 auto *Field = FieldDecl::Create( 5231 C, StaticRD, SourceLocation(), SourceLocation(), nullptr, RecTy, 5232 C.getTrivialTypeSourceInfo(RecTy, SourceLocation()), 5233 /*BW=*/nullptr, /*Mutable=*/false, 5234 /*InitStyle=*/ICIS_NoInit); 5235 Field->setAccess(AS_public); 5236 StaticRD->addDecl(Field); 5237 } 5238 StaticRD->completeDefinition(); 5239 QualType StaticTy = C.getRecordType(StaticRD); 5240 llvm::Type *LLVMReductionsBufferTy = 5241 CGM.getTypes().ConvertTypeForMem(StaticTy); 5242 // FIXME: nvlink does not handle weak linkage correctly (object with the 5243 // different size are reported as erroneous). 5244 // Restore CommonLinkage as soon as nvlink is fixed. 5245 auto *GV = new llvm::GlobalVariable( 5246 CGM.getModule(), LLVMReductionsBufferTy, 5247 /*isConstant=*/false, llvm::GlobalValue::InternalLinkage, 5248 llvm::Constant::getNullValue(LLVMReductionsBufferTy), 5249 "_openmp_teams_reductions_buffer_$_"); 5250 KernelTeamsReductionPtr->setInitializer( 5251 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV, 5252 CGM.VoidPtrTy)); 5253 } 5254 CGOpenMPRuntime::clear(); 5255} 5256