1/* Copyright (C) 2005-2022 Free Software Foundation, Inc. 2 Contributed by Richard Henderson <rth@redhat.com>. 3 4 This file is part of the GNU Offloading and Multi Processing Library 5 (libgomp). 6 7 Libgomp is free software; you can redistribute it and/or modify it 8 under the terms of the GNU General Public License as published by 9 the Free Software Foundation; either version 3, or (at your option) 10 any later version. 11 12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY 13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS 14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 more details. 16 17 Under Section 7 of GPL version 3, you are granted additional 18 permissions described in the GCC Runtime Library Exception, version 19 3.1, as published by the Free Software Foundation. 20 21 You should have received a copy of the GNU General Public License and 22 a copy of the GCC Runtime Library Exception along with this program; 23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see 24 <http://www.gnu.org/licenses/>. */ 25 26/* This file contains data types and function declarations that are not 27 part of the official OpenACC or OpenMP user interfaces. There are 28 declarations in here that are part of the GNU Offloading and Multi 29 Processing ABI, in that the compiler is required to know about them 30 and use them. 31 32 The convention is that the all caps prefix "GOMP" is used group items 33 that are part of the external ABI, and the lower case prefix "gomp" 34 is used group items that are completely private to the library. */ 35 36#ifndef LIBGOMP_H 37#define LIBGOMP_H 1 38 39#ifndef _LIBGOMP_CHECKING_ 40/* Define to 1 to perform internal sanity checks. */ 41#define _LIBGOMP_CHECKING_ 0 42#endif 43 44#include "config.h" 45#include <stdint.h> 46#include "libgomp-plugin.h" 47#include "gomp-constants.h" 48 49#ifdef HAVE_PTHREAD_H 50#include <pthread.h> 51#endif 52#include <stdbool.h> 53#include <stdlib.h> 54#include <stdarg.h> 55 56/* Needed for memset in priority_queue.c. */ 57#if _LIBGOMP_CHECKING_ 58# ifdef STRING_WITH_STRINGS 59# include <string.h> 60# include <strings.h> 61# else 62# ifdef HAVE_STRING_H 63# include <string.h> 64# else 65# ifdef HAVE_STRINGS_H 66# include <strings.h> 67# endif 68# endif 69# endif 70#endif 71 72#ifdef HAVE_ATTRIBUTE_VISIBILITY 73# pragma GCC visibility push(hidden) 74#endif 75 76/* If we were a C++ library, we'd get this from <std/atomic>. */ 77enum memmodel 78{ 79 MEMMODEL_RELAXED = 0, 80 MEMMODEL_CONSUME = 1, 81 MEMMODEL_ACQUIRE = 2, 82 MEMMODEL_RELEASE = 3, 83 MEMMODEL_ACQ_REL = 4, 84 MEMMODEL_SEQ_CST = 5 85}; 86 87/* alloc.c */ 88 89#if defined(HAVE_ALIGNED_ALLOC) \ 90 || defined(HAVE_POSIX_MEMALIGN) \ 91 || defined(HAVE_MEMALIGN) 92/* Defined if gomp_aligned_alloc doesn't use fallback version 93 and free can be used instead of gomp_aligned_free. */ 94#define GOMP_HAVE_EFFICIENT_ALIGNED_ALLOC 1 95#endif 96 97#if defined(GOMP_HAVE_EFFICIENT_ALIGNED_ALLOC) && !defined(__AMDGCN__) 98#define GOMP_USE_ALIGNED_WORK_SHARES 1 99#endif 100 101extern void *gomp_malloc (size_t) __attribute__((malloc)); 102extern void *gomp_malloc_cleared (size_t) __attribute__((malloc)); 103extern void *gomp_realloc (void *, size_t); 104extern void *gomp_aligned_alloc (size_t, size_t) 105 __attribute__((malloc, alloc_size (2))); 106extern void gomp_aligned_free (void *); 107 108/* Avoid conflicting prototypes of alloca() in system headers by using 109 GCC's builtin alloca(). */ 110#define gomp_alloca(x) __builtin_alloca(x) 111 112/* Optimized allocators for team-specific data that will die with the team. */ 113 114#ifdef __AMDGCN__ 115/* The arena is initialized in config/gcn/team.c. */ 116#define TEAM_ARENA_SIZE 64*1024 /* Must match the value in plugin-gcn.c. */ 117#define TEAM_ARENA_START 16 /* LDS offset of free pointer. */ 118#define TEAM_ARENA_FREE 24 /* LDS offset of free pointer. */ 119#define TEAM_ARENA_END 32 /* LDS offset of end pointer. */ 120 121static inline void * __attribute__((malloc)) 122team_malloc (size_t size) 123{ 124 /* 4-byte align the size. */ 125 size = (size + 3) & ~3; 126 127 /* Allocate directly from the arena. 128 The compiler does not support DS atomics, yet. */ 129 void *result; 130 asm ("ds_add_rtn_u64 %0, %1, %2\n\ts_waitcnt 0" 131 : "=v"(result) : "v"(TEAM_ARENA_FREE), "v"(size), "e"(1L) : "memory"); 132 133 /* Handle OOM. */ 134 if (result + size > *(void * __lds *)TEAM_ARENA_END) 135 { 136 /* While this is experimental, let's make sure we know when OOM 137 happens. */ 138 const char msg[] = "GCN team arena exhausted\n"; 139 write (2, msg, sizeof(msg)-1); 140 141 /* Fall back to using the heap (slowly). */ 142 result = gomp_malloc (size); 143 } 144 return result; 145} 146 147static inline void * __attribute__((malloc)) 148team_malloc_cleared (size_t size) 149{ 150 char *result = team_malloc (size); 151 152 /* Clear the allocated memory. */ 153 __builtin_memset (result, 0, size); 154 155 return result; 156} 157 158static inline void 159team_free (void *ptr) 160{ 161 /* The whole arena is freed when the kernel exits. 162 However, if we fell back to using heap then we should free it. 163 It would be better if this function could be a no-op, but at least 164 LDS loads are cheap. */ 165 if (ptr < *(void * __lds *)TEAM_ARENA_START 166 || ptr >= *(void * __lds *)TEAM_ARENA_END) 167 free (ptr); 168} 169#else 170#define team_malloc(...) gomp_malloc (__VA_ARGS__) 171#define team_malloc_cleared(...) gomp_malloc_cleared (__VA_ARGS__) 172#define team_free(...) free (__VA_ARGS__) 173#endif 174 175/* error.c */ 176 177extern void gomp_vdebug (int, const char *, va_list); 178extern void gomp_debug (int, const char *, ...) 179 __attribute__ ((format (printf, 2, 3))); 180#define gomp_vdebug(KIND, FMT, VALIST) \ 181 do { \ 182 if (__builtin_expect (gomp_debug_var, 0)) \ 183 (gomp_vdebug) ((KIND), (FMT), (VALIST)); \ 184 } while (0) 185#define gomp_debug(KIND, ...) \ 186 do { \ 187 if (__builtin_expect (gomp_debug_var, 0)) \ 188 (gomp_debug) ((KIND), __VA_ARGS__); \ 189 } while (0) 190extern void gomp_verror (const char *, va_list); 191extern void gomp_error (const char *, ...) 192 __attribute__ ((format (printf, 1, 2))); 193extern void gomp_vfatal (const char *, va_list) 194 __attribute__ ((noreturn)); 195extern void gomp_fatal (const char *, ...) 196 __attribute__ ((noreturn, format (printf, 1, 2))); 197 198struct gomp_task; 199struct gomp_taskgroup; 200struct htab; 201 202#include "priority_queue.h" 203#include "sem.h" 204#include "mutex.h" 205#include "bar.h" 206#include "simple-bar.h" 207#include "ptrlock.h" 208 209 210/* This structure contains the data to control one work-sharing construct, 211 either a LOOP (FOR/DO) or a SECTIONS. */ 212 213enum gomp_schedule_type 214{ 215 GFS_RUNTIME, 216 GFS_STATIC, 217 GFS_DYNAMIC, 218 GFS_GUIDED, 219 GFS_AUTO, 220 GFS_MONOTONIC = 0x80000000U 221}; 222 223struct gomp_doacross_work_share 224{ 225 union { 226 /* chunk_size copy, as ws->chunk_size is multiplied by incr for 227 GFS_DYNAMIC. */ 228 long chunk_size; 229 /* Likewise, but for ull implementation. */ 230 unsigned long long chunk_size_ull; 231 /* For schedule(static,0) this is the number 232 of iterations assigned to the last thread, i.e. number of 233 iterations / number of threads. */ 234 long q; 235 /* Likewise, but for ull implementation. */ 236 unsigned long long q_ull; 237 }; 238 /* Size of each array entry (padded to cache line size). */ 239 unsigned long elt_sz; 240 /* Number of dimensions in sink vectors. */ 241 unsigned int ncounts; 242 /* True if the iterations can be flattened. */ 243 bool flattened; 244 /* Actual array (of elt_sz sized units), aligned to cache line size. 245 This is indexed by team_id for GFS_STATIC and outermost iteration 246 / chunk_size for other schedules. */ 247 unsigned char *array; 248 /* These two are only used for schedule(static,0). */ 249 /* This one is number of iterations % number of threads. */ 250 long t; 251 union { 252 /* And this one is cached t * (q + 1). */ 253 long boundary; 254 /* Likewise, but for the ull implementation. */ 255 unsigned long long boundary_ull; 256 }; 257 /* Pointer to extra memory if needed for lastprivate(conditional). */ 258 void *extra; 259 /* Array of shift counts for each dimension if they can be flattened. */ 260 unsigned int shift_counts[]; 261}; 262 263/* Like struct gomp_work_share, but only the 1st cacheline of it plus 264 flexible array at the end. 265 Keep in sync with struct gomp_work_share. */ 266struct gomp_work_share_1st_cacheline 267{ 268 enum gomp_schedule_type sched; 269 int mode; 270 union { 271 struct { 272 long chunk_size, end, incr; 273 }; 274 struct { 275 unsigned long long chunk_size_ull, end_ull, incr_ull; 276 }; 277 }; 278 union { 279 unsigned *ordered_team_ids; 280 struct gomp_doacross_work_share *doacross; 281 }; 282 unsigned ordered_num_used, ordered_owner, ordered_cur; 283 struct gomp_work_share *next_alloc; 284 char pad[]; 285}; 286 287struct gomp_work_share 288{ 289 /* This member records the SCHEDULE clause to be used for this construct. 290 The user specification of "runtime" will already have been resolved. 291 If this is a SECTIONS construct, this value will always be DYNAMIC. */ 292 enum gomp_schedule_type sched; 293 294 int mode; 295 296 union { 297 struct { 298 /* This is the chunk_size argument to the SCHEDULE clause. */ 299 long chunk_size; 300 301 /* This is the iteration end point. If this is a SECTIONS construct, 302 this is the number of contained sections. */ 303 long end; 304 305 /* This is the iteration step. If this is a SECTIONS construct, this 306 is always 1. */ 307 long incr; 308 }; 309 310 struct { 311 /* The same as above, but for the unsigned long long loop variants. */ 312 unsigned long long chunk_size_ull; 313 unsigned long long end_ull; 314 unsigned long long incr_ull; 315 }; 316 }; 317 318 union { 319 /* This is a circular queue that details which threads will be allowed 320 into the ordered region and in which order. When a thread allocates 321 iterations on which it is going to work, it also registers itself at 322 the end of the array. When a thread reaches the ordered region, it 323 checks to see if it is the one at the head of the queue. If not, it 324 blocks on its RELEASE semaphore. */ 325 unsigned *ordered_team_ids; 326 327 /* This is a pointer to DOACROSS work share data. */ 328 struct gomp_doacross_work_share *doacross; 329 }; 330 331 /* This is the number of threads that have registered themselves in 332 the circular queue ordered_team_ids. */ 333 unsigned ordered_num_used; 334 335 /* This is the team_id of the currently acknowledged owner of the ordered 336 section, or -1u if the ordered section has not been acknowledged by 337 any thread. This is distinguished from the thread that is *allowed* 338 to take the section next. */ 339 unsigned ordered_owner; 340 341 /* This is the index into the circular queue ordered_team_ids of the 342 current thread that's allowed into the ordered reason. */ 343 unsigned ordered_cur; 344 345 /* This is a chain of allocated gomp_work_share blocks, valid only 346 in the first gomp_work_share struct in the block. */ 347 struct gomp_work_share *next_alloc; 348 349 /* The above fields are written once during workshare initialization, 350 or related to ordered worksharing. Make sure the following fields 351 are in a different cache line. */ 352 353 /* This lock protects the update of the following members. */ 354#ifdef GOMP_USE_ALIGNED_WORK_SHARES 355 gomp_mutex_t lock __attribute__((aligned (64))); 356#else 357 char pad[64 - offsetof (struct gomp_work_share_1st_cacheline, pad)]; 358 gomp_mutex_t lock; 359#endif 360 361 /* This is the count of the number of threads that have exited the work 362 share construct. If the construct was marked nowait, they have moved on 363 to other work; otherwise they're blocked on a barrier. The last member 364 of the team to exit the work share construct must deallocate it. */ 365 unsigned threads_completed; 366 367 union { 368 /* This is the next iteration value to be allocated. In the case of 369 GFS_STATIC loops, this the iteration start point and never changes. */ 370 long next; 371 372 /* The same, but with unsigned long long type. */ 373 unsigned long long next_ull; 374 375 /* This is the returned data structure for SINGLE COPYPRIVATE. */ 376 void *copyprivate; 377 }; 378 379 union { 380 /* Link to gomp_work_share struct for next work sharing construct 381 encountered after this one. */ 382 gomp_ptrlock_t next_ws; 383 384 /* gomp_work_share structs are chained in the free work share cache 385 through this. */ 386 struct gomp_work_share *next_free; 387 }; 388 389 /* Task reductions for this work-sharing construct. */ 390 uintptr_t *task_reductions; 391 392 /* If only few threads are in the team, ordered_team_ids can point 393 to this array which fills the padding at the end of this struct. */ 394 unsigned inline_ordered_team_ids[0]; 395}; 396 397extern char gomp_workshare_struct_check1 398 [offsetof (struct gomp_work_share_1st_cacheline, next_alloc) 399 == offsetof (struct gomp_work_share, next_alloc) ? 1 : -1]; 400extern char gomp_workshare_struct_check2 401 [offsetof (struct gomp_work_share, lock) == 64 ? 1 : -1]; 402 403/* This structure contains all of the thread-local data associated with 404 a thread team. This is the data that must be saved when a thread 405 encounters a nested PARALLEL construct. */ 406 407struct gomp_team_state 408{ 409 /* This is the team of which the thread is currently a member. */ 410 struct gomp_team *team; 411 412 /* This is the work share construct which this thread is currently 413 processing. Recall that with NOWAIT, not all threads may be 414 processing the same construct. */ 415 struct gomp_work_share *work_share; 416 417 /* This is the previous work share construct or NULL if there wasn't any. 418 When all threads are done with the current work sharing construct, 419 the previous one can be freed. The current one can't, as its 420 next_ws field is used. */ 421 struct gomp_work_share *last_work_share; 422 423 /* This is the ID of this thread within the team. This value is 424 guaranteed to be between 0 and N-1, where N is the number of 425 threads in the team. */ 426 unsigned team_id; 427 428 /* Nesting level. */ 429 unsigned level; 430 431 /* Active nesting level. Only active parallel regions are counted. */ 432 unsigned active_level; 433 434 /* Place-partition-var, offset and length into gomp_places_list array. */ 435 unsigned place_partition_off; 436 unsigned place_partition_len; 437 438 /* Def-allocator-var ICV. */ 439 uintptr_t def_allocator; 440 441#ifdef HAVE_SYNC_BUILTINS 442 /* Number of single stmts encountered. */ 443 unsigned long single_count; 444#endif 445 446 /* For GFS_RUNTIME loops that resolved to GFS_STATIC, this is the 447 trip number through the loop. So first time a particular loop 448 is encountered this number is 0, the second time through the loop 449 is 1, etc. This is unused when the compiler knows in advance that 450 the loop is statically scheduled. */ 451 unsigned long static_trip; 452}; 453 454struct target_mem_desc; 455 456/* These are the OpenMP 4.0 Internal Control Variables described in 457 section 2.3.1. Those described as having one copy per task are 458 stored within the structure; those described as having one copy 459 for the whole program are (naturally) global variables. */ 460 461struct gomp_task_icv 462{ 463 unsigned long nthreads_var; 464 enum gomp_schedule_type run_sched_var; 465 int run_sched_chunk_size; 466 int default_device_var; 467 unsigned int thread_limit_var; 468 bool dyn_var; 469 unsigned char max_active_levels_var; 470 char bind_var; 471 /* Internal ICV. */ 472 struct target_mem_desc *target_data; 473}; 474 475enum gomp_target_offload_t 476{ 477 GOMP_TARGET_OFFLOAD_DEFAULT, 478 GOMP_TARGET_OFFLOAD_MANDATORY, 479 GOMP_TARGET_OFFLOAD_DISABLED 480}; 481 482#define gomp_supported_active_levels UCHAR_MAX 483 484extern struct gomp_task_icv gomp_global_icv; 485#ifndef HAVE_SYNC_BUILTINS 486extern gomp_mutex_t gomp_managed_threads_lock; 487#endif 488extern bool gomp_cancel_var; 489extern enum gomp_target_offload_t gomp_target_offload_var; 490extern int gomp_max_task_priority_var; 491extern unsigned long long gomp_spin_count_var, gomp_throttled_spin_count_var; 492extern unsigned long gomp_available_cpus, gomp_managed_threads; 493extern unsigned long *gomp_nthreads_var_list, gomp_nthreads_var_list_len; 494extern char *gomp_bind_var_list; 495extern unsigned long gomp_bind_var_list_len; 496extern void **gomp_places_list; 497extern unsigned long gomp_places_list_len; 498extern unsigned int gomp_num_teams_var; 499extern int gomp_nteams_var; 500extern int gomp_teams_thread_limit_var; 501extern int gomp_debug_var; 502extern bool gomp_display_affinity_var; 503extern char *gomp_affinity_format_var; 504extern size_t gomp_affinity_format_len; 505extern uintptr_t gomp_def_allocator; 506extern int goacc_device_num; 507extern char *goacc_device_type; 508extern int goacc_default_dims[GOMP_DIM_MAX]; 509 510enum gomp_task_kind 511{ 512 /* Implicit task. */ 513 GOMP_TASK_IMPLICIT, 514 /* Undeferred task. */ 515 GOMP_TASK_UNDEFERRED, 516 /* Task created by GOMP_task and waiting to be run. */ 517 GOMP_TASK_WAITING, 518 /* Task currently executing or scheduled and about to execute. */ 519 GOMP_TASK_TIED, 520 /* Used for target tasks that have vars mapped and async run started, 521 but not yet completed. Once that completes, they will be readded 522 into the queues as GOMP_TASK_WAITING in order to perform the var 523 unmapping. */ 524 GOMP_TASK_ASYNC_RUNNING, 525 /* Task that has finished executing but is waiting for its 526 completion event to be fulfilled. */ 527 GOMP_TASK_DETACHED 528}; 529 530struct gomp_task_depend_entry 531{ 532 /* Address of dependency. */ 533 void *addr; 534 struct gomp_task_depend_entry *next; 535 struct gomp_task_depend_entry *prev; 536 /* Task that provides the dependency in ADDR. */ 537 struct gomp_task *task; 538 /* Depend entry is of type "IN". */ 539 bool is_in; 540 bool redundant; 541 bool redundant_out; 542}; 543 544struct gomp_dependers_vec 545{ 546 size_t n_elem; 547 size_t allocated; 548 struct gomp_task *elem[]; 549}; 550 551/* Used when in GOMP_taskwait or in gomp_task_maybe_wait_for_dependencies. */ 552 553struct gomp_taskwait 554{ 555 bool in_taskwait; 556 bool in_depend_wait; 557 /* Number of tasks we are waiting for. */ 558 size_t n_depend; 559 gomp_sem_t taskwait_sem; 560}; 561 562/* This structure describes a "task" to be run by a thread. */ 563 564struct gomp_task 565{ 566 /* Parent of this task. */ 567 struct gomp_task *parent; 568 /* Children of this task. */ 569 struct priority_queue children_queue; 570 /* Taskgroup this task belongs in. */ 571 struct gomp_taskgroup *taskgroup; 572 /* Tasks that depend on this task. */ 573 struct gomp_dependers_vec *dependers; 574 struct htab *depend_hash; 575 struct gomp_taskwait *taskwait; 576 /* Number of items in DEPEND. */ 577 size_t depend_count; 578 /* Number of tasks this task depends on. Once this counter reaches 579 0, we have no unsatisfied dependencies, and this task can be put 580 into the various queues to be scheduled. */ 581 size_t num_dependees; 582 583 union { 584 /* Valid only if deferred_p is false. */ 585 gomp_sem_t *completion_sem; 586 /* Valid only if deferred_p is true. Set to the team that executes the 587 task if the task is detached and the completion event has yet to be 588 fulfilled. */ 589 struct gomp_team *detach_team; 590 }; 591 bool deferred_p; 592 593 /* Priority of this task. */ 594 int priority; 595 /* The priority node for this task in each of the different queues. 596 We put this here to avoid allocating space for each priority 597 node. Then we play offsetof() games to convert between pnode[] 598 entries and the gomp_task in which they reside. */ 599 struct priority_node pnode[3]; 600 601 struct gomp_task_icv icv; 602 void (*fn) (void *); 603 void *fn_data; 604 enum gomp_task_kind kind; 605 bool in_tied_task; 606 bool final_task; 607 bool copy_ctors_done; 608 /* Set for undeferred tasks with unsatisfied dependencies which 609 block further execution of their parent until the dependencies 610 are satisfied. */ 611 bool parent_depends_on; 612 /* Dependencies provided and/or needed for this task. DEPEND_COUNT 613 is the number of items available. */ 614 struct gomp_task_depend_entry depend[]; 615}; 616 617/* This structure describes a single #pragma omp taskgroup. */ 618 619struct gomp_taskgroup 620{ 621 struct gomp_taskgroup *prev; 622 /* Queue of tasks that belong in this taskgroup. */ 623 struct priority_queue taskgroup_queue; 624 uintptr_t *reductions; 625 bool in_taskgroup_wait; 626 bool cancelled; 627 bool workshare; 628 gomp_sem_t taskgroup_sem; 629 size_t num_children; 630}; 631 632/* Various state of OpenMP async offloading tasks. */ 633enum gomp_target_task_state 634{ 635 GOMP_TARGET_TASK_DATA, 636 GOMP_TARGET_TASK_BEFORE_MAP, 637 GOMP_TARGET_TASK_FALLBACK, 638 GOMP_TARGET_TASK_READY_TO_RUN, 639 GOMP_TARGET_TASK_RUNNING, 640 GOMP_TARGET_TASK_FINISHED 641}; 642 643/* This structure describes a target task. */ 644 645struct gomp_target_task 646{ 647 struct gomp_device_descr *devicep; 648 void (*fn) (void *); 649 size_t mapnum; 650 size_t *sizes; 651 unsigned short *kinds; 652 unsigned int flags; 653 enum gomp_target_task_state state; 654 struct target_mem_desc *tgt; 655 struct gomp_task *task; 656 struct gomp_team *team; 657 /* Device-specific target arguments. */ 658 void **args; 659 void *hostaddrs[]; 660}; 661 662/* This structure describes a "team" of threads. These are the threads 663 that are spawned by a PARALLEL constructs, as well as the work sharing 664 constructs that the team encounters. */ 665 666struct gomp_team 667{ 668 /* This is the number of threads in the current team. */ 669 unsigned nthreads; 670 671 /* This is number of gomp_work_share structs that have been allocated 672 as a block last time. */ 673 unsigned work_share_chunk; 674 675 /* This is the saved team state that applied to a master thread before 676 the current thread was created. */ 677 struct gomp_team_state prev_ts; 678 679 /* This semaphore should be used by the master thread instead of its 680 "native" semaphore in the thread structure. Required for nested 681 parallels, as the master is a member of two teams. */ 682 gomp_sem_t master_release; 683 684 /* This points to an array with pointers to the release semaphore 685 of the threads in the team. */ 686 gomp_sem_t **ordered_release; 687 688 /* List of work shares on which gomp_fini_work_share hasn't been 689 called yet. If the team hasn't been cancelled, this should be 690 equal to each thr->ts.work_share, but otherwise it can be a possibly 691 long list of workshares. */ 692 struct gomp_work_share *work_shares_to_free; 693 694 /* List of gomp_work_share structs chained through next_free fields. 695 This is populated and taken off only by the first thread in the 696 team encountering a new work sharing construct, in a critical 697 section. */ 698 struct gomp_work_share *work_share_list_alloc; 699 700 /* List of gomp_work_share structs freed by free_work_share. New 701 entries are atomically added to the start of the list, and 702 alloc_work_share can safely only move all but the first entry 703 to work_share_list alloc, as free_work_share can happen concurrently 704 with alloc_work_share. */ 705 struct gomp_work_share *work_share_list_free; 706 707#ifdef HAVE_SYNC_BUILTINS 708 /* Number of simple single regions encountered by threads in this 709 team. */ 710 unsigned long single_count; 711#else 712 /* Mutex protecting addition of workshares to work_share_list_free. */ 713 gomp_mutex_t work_share_list_free_lock; 714#endif 715 716 /* This barrier is used for most synchronization of the team. */ 717 gomp_barrier_t barrier; 718 719 /* Initial work shares, to avoid allocating any gomp_work_share 720 structs in the common case. */ 721 struct gomp_work_share work_shares[8]; 722 723 gomp_mutex_t task_lock; 724 /* Scheduled tasks. */ 725 struct priority_queue task_queue; 726 /* Number of all GOMP_TASK_{WAITING,TIED} tasks in the team. */ 727 unsigned int task_count; 728 /* Number of GOMP_TASK_WAITING tasks currently waiting to be scheduled. */ 729 unsigned int task_queued_count; 730 /* Number of GOMP_TASK_{WAITING,TIED} tasks currently running 731 directly in gomp_barrier_handle_tasks; tasks spawned 732 from e.g. GOMP_taskwait or GOMP_taskgroup_end don't count, even when 733 that is called from a task run from gomp_barrier_handle_tasks. 734 task_running_count should be always <= team->nthreads, 735 and if current task isn't in_tied_task, then it will be 736 even < team->nthreads. */ 737 unsigned int task_running_count; 738 int work_share_cancelled; 739 int team_cancelled; 740 741 /* Number of tasks waiting for their completion event to be fulfilled. */ 742 unsigned int task_detach_count; 743 744 /* This array contains structures for implicit tasks. */ 745 struct gomp_task implicit_task[]; 746}; 747 748/* This structure contains all data that is private to libgomp and is 749 allocated per thread. */ 750 751struct gomp_thread 752{ 753 /* This is the function that the thread should run upon launch. */ 754 void (*fn) (void *data); 755 void *data; 756 757 /* This is the current team state for this thread. The ts.team member 758 is NULL only if the thread is idle. */ 759 struct gomp_team_state ts; 760 761 /* This is the task that the thread is currently executing. */ 762 struct gomp_task *task; 763 764 /* This semaphore is used for ordered loops. */ 765 gomp_sem_t release; 766 767 /* Place this thread is bound to plus one, or zero if not bound 768 to any place. */ 769 unsigned int place; 770 771 /* User pthread thread pool */ 772 struct gomp_thread_pool *thread_pool; 773 774#ifdef LIBGOMP_USE_PTHREADS 775 /* omp_get_num_teams () - 1. */ 776 unsigned int num_teams; 777 778 /* omp_get_team_num (). */ 779 unsigned int team_num; 780#endif 781 782#if defined(LIBGOMP_USE_PTHREADS) \ 783 && (!defined(HAVE_TLS) \ 784 || !defined(__GLIBC__) \ 785 || !defined(USING_INITIAL_EXEC_TLS)) 786 /* pthread_t of the thread containing this gomp_thread. 787 On Linux when using initial-exec TLS, 788 (typeof (pthread_t)) gomp_thread () - pthread_self () 789 is constant in all threads, so we can optimize and not 790 store it. */ 791#define GOMP_NEEDS_THREAD_HANDLE 1 792 pthread_t handle; 793#endif 794}; 795 796 797struct gomp_thread_pool 798{ 799 /* This array manages threads spawned from the top level, which will 800 return to the idle loop once the current PARALLEL construct ends. */ 801 struct gomp_thread **threads; 802 unsigned threads_size; 803 unsigned threads_used; 804 /* The last team is used for non-nested teams to delay their destruction to 805 make sure all the threads in the team move on to the pool's barrier before 806 the team's barrier is destroyed. */ 807 struct gomp_team *last_team; 808 /* Number of threads running in this contention group. */ 809 unsigned long threads_busy; 810 811 /* This barrier holds and releases threads waiting in thread pools. */ 812 gomp_simple_barrier_t threads_dock; 813}; 814 815enum gomp_cancel_kind 816{ 817 GOMP_CANCEL_PARALLEL = 1, 818 GOMP_CANCEL_LOOP = 2, 819 GOMP_CANCEL_FOR = GOMP_CANCEL_LOOP, 820 GOMP_CANCEL_DO = GOMP_CANCEL_LOOP, 821 GOMP_CANCEL_SECTIONS = 4, 822 GOMP_CANCEL_TASKGROUP = 8 823}; 824 825/* ... and here is that TLS data. */ 826 827#if defined __nvptx__ 828extern struct gomp_thread *nvptx_thrs __attribute__((shared)); 829static inline struct gomp_thread *gomp_thread (void) 830{ 831 int tid; 832 asm ("mov.u32 %0, %%tid.y;" : "=r" (tid)); 833 return nvptx_thrs + tid; 834} 835#elif defined __AMDGCN__ 836static inline struct gomp_thread *gcn_thrs (void) 837{ 838 /* The value is at the bottom of LDS. */ 839 struct gomp_thread * __lds *thrs = (struct gomp_thread * __lds *)4; 840 return *thrs; 841} 842static inline void set_gcn_thrs (struct gomp_thread *val) 843{ 844 /* The value is at the bottom of LDS. */ 845 struct gomp_thread * __lds *thrs = (struct gomp_thread * __lds *)4; 846 *thrs = val; 847} 848static inline struct gomp_thread *gomp_thread (void) 849{ 850 int tid = __builtin_gcn_dim_pos(1); 851 return gcn_thrs () + tid; 852} 853#elif defined HAVE_TLS || defined USE_EMUTLS 854extern __thread struct gomp_thread gomp_tls_data; 855static inline struct gomp_thread *gomp_thread (void) 856{ 857 return &gomp_tls_data; 858} 859#else 860extern pthread_key_t gomp_tls_key; 861static inline struct gomp_thread *gomp_thread (void) 862{ 863 return pthread_getspecific (gomp_tls_key); 864} 865#endif 866 867extern struct gomp_task_icv *gomp_new_icv (void); 868 869/* Here's how to access the current copy of the ICVs. */ 870 871static inline struct gomp_task_icv *gomp_icv (bool write) 872{ 873 struct gomp_task *task = gomp_thread ()->task; 874 if (task) 875 return &task->icv; 876 else if (write) 877 return gomp_new_icv (); 878 else 879 return &gomp_global_icv; 880} 881 882#ifdef LIBGOMP_USE_PTHREADS 883/* The attributes to be used during thread creation. */ 884extern pthread_attr_t gomp_thread_attr; 885 886extern pthread_key_t gomp_thread_destructor; 887#endif 888 889/* Function prototypes. */ 890 891/* affinity.c */ 892 893extern void gomp_init_affinity (void); 894#ifdef LIBGOMP_USE_PTHREADS 895extern void gomp_init_thread_affinity (pthread_attr_t *, unsigned int); 896#endif 897extern void **gomp_affinity_alloc (unsigned long, bool); 898extern void gomp_affinity_init_place (void *); 899extern bool gomp_affinity_add_cpus (void *, unsigned long, unsigned long, 900 long, bool); 901extern bool gomp_affinity_remove_cpu (void *, unsigned long); 902extern bool gomp_affinity_copy_place (void *, void *, long); 903extern bool gomp_affinity_same_place (void *, void *); 904extern bool gomp_affinity_finalize_place_list (bool); 905extern bool gomp_affinity_init_level (int, unsigned long, bool); 906extern void gomp_affinity_print_place (void *); 907extern void gomp_get_place_proc_ids_8 (int, int64_t *); 908extern void gomp_display_affinity_place (char *, size_t, size_t *, int); 909 910/* affinity-fmt.c */ 911 912extern bool gomp_print_string (const char *str, size_t len); 913extern void gomp_set_affinity_format (const char *, size_t); 914extern void gomp_display_string (char *, size_t, size_t *, const char *, 915 size_t); 916#ifdef LIBGOMP_USE_PTHREADS 917typedef pthread_t gomp_thread_handle; 918#else 919typedef struct {} gomp_thread_handle; 920#endif 921extern size_t gomp_display_affinity (char *, size_t, const char *, 922 gomp_thread_handle, 923 struct gomp_team_state *, unsigned int); 924extern void gomp_display_affinity_thread (gomp_thread_handle, 925 struct gomp_team_state *, 926 unsigned int) __attribute__((cold)); 927 928/* iter.c */ 929 930extern int gomp_iter_static_next (long *, long *); 931extern bool gomp_iter_dynamic_next_locked (long *, long *); 932extern bool gomp_iter_guided_next_locked (long *, long *); 933 934#ifdef HAVE_SYNC_BUILTINS 935extern bool gomp_iter_dynamic_next (long *, long *); 936extern bool gomp_iter_guided_next (long *, long *); 937#endif 938 939/* iter_ull.c */ 940 941extern int gomp_iter_ull_static_next (unsigned long long *, 942 unsigned long long *); 943extern bool gomp_iter_ull_dynamic_next_locked (unsigned long long *, 944 unsigned long long *); 945extern bool gomp_iter_ull_guided_next_locked (unsigned long long *, 946 unsigned long long *); 947 948#if defined HAVE_SYNC_BUILTINS && defined __LP64__ 949extern bool gomp_iter_ull_dynamic_next (unsigned long long *, 950 unsigned long long *); 951extern bool gomp_iter_ull_guided_next (unsigned long long *, 952 unsigned long long *); 953#endif 954 955/* ordered.c */ 956 957extern void gomp_ordered_first (void); 958extern void gomp_ordered_last (void); 959extern void gomp_ordered_next (void); 960extern void gomp_ordered_static_init (void); 961extern void gomp_ordered_static_next (void); 962extern void gomp_ordered_sync (void); 963extern void gomp_doacross_init (unsigned, long *, long, size_t); 964extern void gomp_doacross_ull_init (unsigned, unsigned long long *, 965 unsigned long long, size_t); 966 967/* parallel.c */ 968 969extern unsigned gomp_resolve_num_threads (unsigned, unsigned); 970 971/* proc.c (in config/) */ 972 973extern void gomp_init_num_threads (void); 974extern unsigned gomp_dynamic_max_threads (void); 975 976/* task.c */ 977 978extern void gomp_init_task (struct gomp_task *, struct gomp_task *, 979 struct gomp_task_icv *); 980extern void gomp_end_task (void); 981extern void gomp_barrier_handle_tasks (gomp_barrier_state_t); 982extern void gomp_task_maybe_wait_for_dependencies (void **); 983extern bool gomp_create_target_task (struct gomp_device_descr *, 984 void (*) (void *), size_t, void **, 985 size_t *, unsigned short *, unsigned int, 986 void **, void **, 987 enum gomp_target_task_state); 988extern struct gomp_taskgroup *gomp_parallel_reduction_register (uintptr_t *, 989 unsigned); 990extern void gomp_workshare_taskgroup_start (void); 991extern void gomp_workshare_task_reduction_register (uintptr_t *, uintptr_t *); 992 993static void inline 994gomp_finish_task (struct gomp_task *task) 995{ 996 if (__builtin_expect (task->depend_hash != NULL, 0)) 997 free (task->depend_hash); 998} 999 1000/* team.c */ 1001 1002extern struct gomp_team *gomp_new_team (unsigned); 1003extern void gomp_team_start (void (*) (void *), void *, unsigned, 1004 unsigned, struct gomp_team *, 1005 struct gomp_taskgroup *); 1006extern void gomp_team_end (void); 1007extern void gomp_free_thread (void *); 1008extern int gomp_pause_host (void); 1009 1010/* target.c */ 1011 1012extern void gomp_init_targets_once (void); 1013extern int gomp_get_num_devices (void); 1014extern bool gomp_target_task_fn (void *); 1015 1016/* Splay tree definitions. */ 1017typedef struct splay_tree_node_s *splay_tree_node; 1018typedef struct splay_tree_s *splay_tree; 1019typedef struct splay_tree_key_s *splay_tree_key; 1020 1021struct target_var_desc { 1022 /* Splay key. */ 1023 splay_tree_key key; 1024 /* True if data should be copied from device to host at the end. */ 1025 bool copy_from; 1026 /* True if data always should be copied from device to host at the end. */ 1027 bool always_copy_from; 1028 /* True if this is for OpenACC 'attach'. */ 1029 bool is_attach; 1030 /* If GOMP_MAP_TO_PSET had a NULL pointer; used for Fortran descriptors, 1031 which were initially unallocated. */ 1032 bool has_null_ptr_assoc; 1033 /* Relative offset against key host_start. */ 1034 uintptr_t offset; 1035 /* Actual length. */ 1036 uintptr_t length; 1037}; 1038 1039struct target_mem_desc { 1040 /* Reference count. */ 1041 uintptr_t refcount; 1042 /* All the splay nodes allocated together. */ 1043 splay_tree_node array; 1044 /* Start of the target region. */ 1045 uintptr_t tgt_start; 1046 /* End of the targer region. */ 1047 uintptr_t tgt_end; 1048 /* Handle to free. */ 1049 void *to_free; 1050 /* Previous target_mem_desc. */ 1051 struct target_mem_desc *prev; 1052 /* Number of items in following list. */ 1053 size_t list_count; 1054 1055 /* Corresponding target device descriptor. */ 1056 struct gomp_device_descr *device_descr; 1057 1058 /* List of target items to remove (or decrease refcount) 1059 at the end of region. */ 1060 struct target_var_desc list[]; 1061}; 1062 1063/* Special value for refcount - mask to indicate existence of special 1064 values. Right now we allocate 3 bits. */ 1065#define REFCOUNT_SPECIAL (~(uintptr_t) 0x7) 1066 1067/* Special value for refcount - infinity. */ 1068#define REFCOUNT_INFINITY (REFCOUNT_SPECIAL | 0) 1069/* Special value for refcount - tgt_offset contains target address of the 1070 artificial pointer to "omp declare target link" object. */ 1071#define REFCOUNT_LINK (REFCOUNT_SPECIAL | 1) 1072 1073/* Special value for refcount - structure element sibling list items. 1074 All such key refounts have REFCOUNT_STRUCTELEM bits set, with _FLAG_FIRST 1075 and _FLAG_LAST indicating first and last in the created sibling sequence. */ 1076#define REFCOUNT_STRUCTELEM (REFCOUNT_SPECIAL | 4) 1077#define REFCOUNT_STRUCTELEM_P(V) \ 1078 (((V) & REFCOUNT_STRUCTELEM) == REFCOUNT_STRUCTELEM) 1079/* The first leading key with _FLAG_FIRST set houses the actual reference count 1080 in the structelem_refcount field. Other siblings point to this counter value 1081 through its structelem_refcount_ptr field. */ 1082#define REFCOUNT_STRUCTELEM_FLAG_FIRST (1) 1083/* The last key in the sibling sequence has this set. This is required to 1084 indicate the sequence boundary, when we remove the structure sibling list 1085 from the map. */ 1086#define REFCOUNT_STRUCTELEM_FLAG_LAST (2) 1087 1088#define REFCOUNT_STRUCTELEM_FIRST_P(V) \ 1089 (REFCOUNT_STRUCTELEM_P (V) && ((V) & REFCOUNT_STRUCTELEM_FLAG_FIRST)) 1090#define REFCOUNT_STRUCTELEM_LAST_P(V) \ 1091 (REFCOUNT_STRUCTELEM_P (V) && ((V) & REFCOUNT_STRUCTELEM_FLAG_LAST)) 1092 1093/* Special offset values. */ 1094#define OFFSET_INLINED (~(uintptr_t) 0) 1095#define OFFSET_POINTER (~(uintptr_t) 1) 1096#define OFFSET_STRUCT (~(uintptr_t) 2) 1097 1098/* Auxiliary structure for infrequently-used or API-specific data. */ 1099 1100struct splay_tree_aux { 1101 /* Pointer to the original mapping of "omp declare target link" object. */ 1102 splay_tree_key link_key; 1103 /* For a block with attached pointers, the attachment counters for each. 1104 Only used for OpenACC. */ 1105 uintptr_t *attach_count; 1106}; 1107 1108struct splay_tree_key_s { 1109 /* Address of the host object. */ 1110 uintptr_t host_start; 1111 /* Address immediately after the host object. */ 1112 uintptr_t host_end; 1113 /* Descriptor of the target memory. */ 1114 struct target_mem_desc *tgt; 1115 /* Offset from tgt->tgt_start to the start of the target object. */ 1116 uintptr_t tgt_offset; 1117 /* Reference count. */ 1118 uintptr_t refcount; 1119 union { 1120 /* Dynamic reference count. */ 1121 uintptr_t dynamic_refcount; 1122 1123 /* Unified reference count for structure element siblings, this is used 1124 when REFCOUNT_STRUCTELEM_FIRST_P(k->refcount) == true, the first sibling 1125 in a structure element sibling list item sequence. */ 1126 uintptr_t structelem_refcount; 1127 1128 /* When REFCOUNT_STRUCTELEM_P (k->refcount) == true, this field points 1129 into the (above) structelem_refcount field of the _FIRST splay_tree_key, 1130 the first key in the created sequence. All structure element siblings 1131 share a single refcount in this manner. Since these two fields won't be 1132 used at the same time, they are stashed in a union. */ 1133 uintptr_t *structelem_refcount_ptr; 1134 }; 1135 struct splay_tree_aux *aux; 1136}; 1137 1138/* The comparison function. */ 1139 1140static inline int 1141splay_compare (splay_tree_key x, splay_tree_key y) 1142{ 1143 if (x->host_start == x->host_end 1144 && y->host_start == y->host_end) 1145 return 0; 1146 if (x->host_end <= y->host_start) 1147 return -1; 1148 if (x->host_start >= y->host_end) 1149 return 1; 1150 return 0; 1151} 1152 1153#include "splay-tree.h" 1154 1155typedef struct acc_dispatch_t 1156{ 1157 /* Execute. */ 1158 __typeof (GOMP_OFFLOAD_openacc_exec) *exec_func; 1159 1160 /* Create/destroy TLS data. */ 1161 __typeof (GOMP_OFFLOAD_openacc_create_thread_data) *create_thread_data_func; 1162 __typeof (GOMP_OFFLOAD_openacc_destroy_thread_data) 1163 *destroy_thread_data_func; 1164 1165 struct { 1166 /* Once created and put into the "active" list, asyncqueues are then never 1167 destructed and removed from the "active" list, other than if the TODO 1168 device is shut down. */ 1169 gomp_mutex_t lock; 1170 int nasyncqueue; 1171 struct goacc_asyncqueue **asyncqueue; 1172 struct goacc_asyncqueue_list *active; 1173 1174 __typeof (GOMP_OFFLOAD_openacc_async_construct) *construct_func; 1175 __typeof (GOMP_OFFLOAD_openacc_async_destruct) *destruct_func; 1176 __typeof (GOMP_OFFLOAD_openacc_async_test) *test_func; 1177 __typeof (GOMP_OFFLOAD_openacc_async_synchronize) *synchronize_func; 1178 __typeof (GOMP_OFFLOAD_openacc_async_serialize) *serialize_func; 1179 __typeof (GOMP_OFFLOAD_openacc_async_queue_callback) *queue_callback_func; 1180 1181 __typeof (GOMP_OFFLOAD_openacc_async_exec) *exec_func; 1182 __typeof (GOMP_OFFLOAD_openacc_async_dev2host) *dev2host_func; 1183 __typeof (GOMP_OFFLOAD_openacc_async_host2dev) *host2dev_func; 1184 } async; 1185 1186 __typeof (GOMP_OFFLOAD_openacc_get_property) *get_property_func; 1187 1188 /* NVIDIA target specific routines. */ 1189 struct { 1190 __typeof (GOMP_OFFLOAD_openacc_cuda_get_current_device) 1191 *get_current_device_func; 1192 __typeof (GOMP_OFFLOAD_openacc_cuda_get_current_context) 1193 *get_current_context_func; 1194 __typeof (GOMP_OFFLOAD_openacc_cuda_get_stream) *get_stream_func; 1195 __typeof (GOMP_OFFLOAD_openacc_cuda_set_stream) *set_stream_func; 1196 } cuda; 1197} acc_dispatch_t; 1198 1199/* Various state of the accelerator device. */ 1200enum gomp_device_state 1201{ 1202 GOMP_DEVICE_UNINITIALIZED, 1203 GOMP_DEVICE_INITIALIZED, 1204 GOMP_DEVICE_FINALIZED 1205}; 1206 1207/* This structure describes accelerator device. 1208 It contains name of the corresponding libgomp plugin, function handlers for 1209 interaction with the device, ID-number of the device, and information about 1210 mapped memory. */ 1211struct gomp_device_descr 1212{ 1213 /* Immutable data, which is only set during initialization, and which is not 1214 guarded by the lock. */ 1215 1216 /* The name of the device. */ 1217 const char *name; 1218 1219 /* Capabilities of device (supports OpenACC, OpenMP). */ 1220 unsigned int capabilities; 1221 1222 /* This is the ID number of device among devices of the same type. */ 1223 int target_id; 1224 1225 /* This is the TYPE of device. */ 1226 enum offload_target_type type; 1227 1228 /* Function handlers. */ 1229 __typeof (GOMP_OFFLOAD_get_name) *get_name_func; 1230 __typeof (GOMP_OFFLOAD_get_caps) *get_caps_func; 1231 __typeof (GOMP_OFFLOAD_get_type) *get_type_func; 1232 __typeof (GOMP_OFFLOAD_get_num_devices) *get_num_devices_func; 1233 __typeof (GOMP_OFFLOAD_init_device) *init_device_func; 1234 __typeof (GOMP_OFFLOAD_fini_device) *fini_device_func; 1235 __typeof (GOMP_OFFLOAD_version) *version_func; 1236 __typeof (GOMP_OFFLOAD_load_image) *load_image_func; 1237 __typeof (GOMP_OFFLOAD_unload_image) *unload_image_func; 1238 __typeof (GOMP_OFFLOAD_alloc) *alloc_func; 1239 __typeof (GOMP_OFFLOAD_free) *free_func; 1240 __typeof (GOMP_OFFLOAD_dev2host) *dev2host_func; 1241 __typeof (GOMP_OFFLOAD_host2dev) *host2dev_func; 1242 __typeof (GOMP_OFFLOAD_dev2dev) *dev2dev_func; 1243 __typeof (GOMP_OFFLOAD_can_run) *can_run_func; 1244 __typeof (GOMP_OFFLOAD_run) *run_func; 1245 __typeof (GOMP_OFFLOAD_async_run) *async_run_func; 1246 1247 /* Splay tree containing information about mapped memory regions. */ 1248 struct splay_tree_s mem_map; 1249 1250 /* Mutex for the mutable data. */ 1251 gomp_mutex_t lock; 1252 1253 /* Current state of the device. OpenACC allows to move from INITIALIZED state 1254 back to UNINITIALIZED state. OpenMP allows only to move from INITIALIZED 1255 to FINALIZED state (at program shutdown). */ 1256 enum gomp_device_state state; 1257 1258 /* OpenACC-specific data and functions. */ 1259 /* This is mutable because of its mutable target_data member. */ 1260 acc_dispatch_t openacc; 1261}; 1262 1263/* Kind of the pragma, for which gomp_map_vars () is called. */ 1264enum gomp_map_vars_kind 1265{ 1266 GOMP_MAP_VARS_OPENACC = 1, 1267 GOMP_MAP_VARS_TARGET = 2, 1268 GOMP_MAP_VARS_DATA = 4, 1269 GOMP_MAP_VARS_ENTER_DATA = 8 1270}; 1271 1272extern void gomp_acc_declare_allocate (bool, size_t, void **, size_t *, 1273 unsigned short *); 1274struct gomp_coalesce_buf; 1275extern void gomp_copy_host2dev (struct gomp_device_descr *, 1276 struct goacc_asyncqueue *, void *, const void *, 1277 size_t, bool, struct gomp_coalesce_buf *); 1278extern void gomp_copy_dev2host (struct gomp_device_descr *, 1279 struct goacc_asyncqueue *, void *, const void *, 1280 size_t); 1281extern uintptr_t gomp_map_val (struct target_mem_desc *, void **, size_t); 1282extern void gomp_attach_pointer (struct gomp_device_descr *, 1283 struct goacc_asyncqueue *, splay_tree, 1284 splay_tree_key, uintptr_t, size_t, 1285 struct gomp_coalesce_buf *, bool); 1286extern void gomp_detach_pointer (struct gomp_device_descr *, 1287 struct goacc_asyncqueue *, splay_tree_key, 1288 uintptr_t, bool, struct gomp_coalesce_buf *); 1289extern struct target_mem_desc *goacc_map_vars (struct gomp_device_descr *, 1290 struct goacc_asyncqueue *, 1291 size_t, void **, void **, 1292 size_t *, void *, bool, 1293 enum gomp_map_vars_kind); 1294extern void goacc_unmap_vars (struct target_mem_desc *, bool, 1295 struct goacc_asyncqueue *); 1296extern void gomp_init_device (struct gomp_device_descr *); 1297extern bool gomp_fini_device (struct gomp_device_descr *); 1298extern void gomp_unload_device (struct gomp_device_descr *); 1299extern bool gomp_remove_var (struct gomp_device_descr *, splay_tree_key); 1300extern void gomp_remove_var_async (struct gomp_device_descr *, splay_tree_key, 1301 struct goacc_asyncqueue *); 1302 1303/* work.c */ 1304 1305extern void gomp_init_work_share (struct gomp_work_share *, size_t, unsigned); 1306extern void gomp_fini_work_share (struct gomp_work_share *); 1307extern bool gomp_work_share_start (size_t); 1308extern void gomp_work_share_end (void); 1309extern bool gomp_work_share_end_cancel (void); 1310extern void gomp_work_share_end_nowait (void); 1311 1312static inline void 1313gomp_work_share_init_done (void) 1314{ 1315 struct gomp_thread *thr = gomp_thread (); 1316 if (__builtin_expect (thr->ts.last_work_share != NULL, 1)) 1317 gomp_ptrlock_set (&thr->ts.last_work_share->next_ws, thr->ts.work_share); 1318} 1319 1320#ifdef HAVE_ATTRIBUTE_VISIBILITY 1321# pragma GCC visibility pop 1322#endif 1323 1324/* Now that we're back to default visibility, include the globals. */ 1325#include "libgomp_g.h" 1326 1327/* Include omp.h by parts. */ 1328#include "omp-lock.h" 1329#define _LIBGOMP_OMP_LOCK_DEFINED 1 1330#include "omp.h.in" 1331 1332#if !defined (HAVE_ATTRIBUTE_VISIBILITY) \ 1333 || !defined (HAVE_ATTRIBUTE_ALIAS) \ 1334 || !defined (HAVE_AS_SYMVER_DIRECTIVE) \ 1335 || !defined (PIC) \ 1336 || !defined (HAVE_SYMVER_SYMBOL_RENAMING_RUNTIME_SUPPORT) 1337# undef LIBGOMP_GNU_SYMBOL_VERSIONING 1338#endif 1339 1340#ifdef LIBGOMP_GNU_SYMBOL_VERSIONING 1341extern void gomp_init_lock_30 (omp_lock_t *) __GOMP_NOTHROW; 1342extern void gomp_destroy_lock_30 (omp_lock_t *) __GOMP_NOTHROW; 1343extern void gomp_set_lock_30 (omp_lock_t *) __GOMP_NOTHROW; 1344extern void gomp_unset_lock_30 (omp_lock_t *) __GOMP_NOTHROW; 1345extern int gomp_test_lock_30 (omp_lock_t *) __GOMP_NOTHROW; 1346extern void gomp_init_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; 1347extern void gomp_destroy_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; 1348extern void gomp_set_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; 1349extern void gomp_unset_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; 1350extern int gomp_test_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; 1351 1352extern void gomp_init_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; 1353extern void gomp_destroy_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; 1354extern void gomp_set_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; 1355extern void gomp_unset_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; 1356extern int gomp_test_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; 1357extern void gomp_init_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; 1358extern void gomp_destroy_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; 1359extern void gomp_set_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; 1360extern void gomp_unset_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; 1361extern int gomp_test_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; 1362 1363# define omp_lock_symver(fn) \ 1364 __asm (".symver g" #fn "_30, " #fn "@@OMP_3.0"); \ 1365 __asm (".symver g" #fn "_25, " #fn "@OMP_1.0"); 1366#else 1367# define gomp_init_lock_30 omp_init_lock 1368# define gomp_destroy_lock_30 omp_destroy_lock 1369# define gomp_set_lock_30 omp_set_lock 1370# define gomp_unset_lock_30 omp_unset_lock 1371# define gomp_test_lock_30 omp_test_lock 1372# define gomp_init_nest_lock_30 omp_init_nest_lock 1373# define gomp_destroy_nest_lock_30 omp_destroy_nest_lock 1374# define gomp_set_nest_lock_30 omp_set_nest_lock 1375# define gomp_unset_nest_lock_30 omp_unset_nest_lock 1376# define gomp_test_nest_lock_30 omp_test_nest_lock 1377#endif 1378 1379#ifdef HAVE_ATTRIBUTE_VISIBILITY 1380# define attribute_hidden __attribute__ ((visibility ("hidden"))) 1381#else 1382# define attribute_hidden 1383#endif 1384 1385#if __GNUC__ >= 9 1386# define HAVE_ATTRIBUTE_COPY 1387#endif 1388 1389#ifdef HAVE_ATTRIBUTE_COPY 1390# define attribute_copy(arg) __attribute__ ((copy (arg))) 1391#else 1392# define attribute_copy(arg) 1393#endif 1394 1395#ifdef HAVE_ATTRIBUTE_ALIAS 1396# define strong_alias(fn, al) \ 1397 extern __typeof (fn) al __attribute__ ((alias (#fn))) attribute_copy (fn); 1398 1399# define ialias_ulp ialias_str1(__USER_LABEL_PREFIX__) 1400# define ialias_str1(x) ialias_str2(x) 1401# define ialias_str2(x) #x 1402# define ialias(fn) \ 1403 extern __typeof (fn) gomp_ialias_##fn \ 1404 __attribute__ ((alias (#fn))) attribute_hidden attribute_copy (fn); 1405# define ialias_redirect(fn) \ 1406 extern __typeof (fn) fn __asm__ (ialias_ulp "gomp_ialias_" #fn) attribute_hidden; 1407# define ialias_call(fn) gomp_ialias_ ## fn 1408#else 1409# define ialias(fn) 1410# define ialias_redirect(fn) 1411# define ialias_call(fn) fn 1412#endif 1413 1414/* Helper function for priority_node_to_task() and 1415 task_to_priority_node(). 1416 1417 Return the offset from a task to its priority_node entry. The 1418 priority_node entry is has a type of TYPE. */ 1419 1420static inline size_t 1421priority_queue_offset (enum priority_queue_type type) 1422{ 1423 return offsetof (struct gomp_task, pnode[(int) type]); 1424} 1425 1426/* Return the task associated with a priority NODE of type TYPE. */ 1427 1428static inline struct gomp_task * 1429priority_node_to_task (enum priority_queue_type type, 1430 struct priority_node *node) 1431{ 1432 return (struct gomp_task *) ((char *) node - priority_queue_offset (type)); 1433} 1434 1435/* Return the priority node of type TYPE for a given TASK. */ 1436 1437static inline struct priority_node * 1438task_to_priority_node (enum priority_queue_type type, 1439 struct gomp_task *task) 1440{ 1441 return (struct priority_node *) ((char *) task 1442 + priority_queue_offset (type)); 1443} 1444 1445#ifdef LIBGOMP_USE_PTHREADS 1446static inline gomp_thread_handle 1447gomp_thread_self (void) 1448{ 1449 return pthread_self (); 1450} 1451 1452static inline gomp_thread_handle 1453gomp_thread_to_pthread_t (struct gomp_thread *thr) 1454{ 1455 struct gomp_thread *this_thr = gomp_thread (); 1456 if (thr == this_thr) 1457 return pthread_self (); 1458#ifdef GOMP_NEEDS_THREAD_HANDLE 1459 return thr->handle; 1460#else 1461 /* On Linux with initial-exec TLS, the pthread_t of the thread containing 1462 thr can be computed from thr, this_thr and pthread_self (), 1463 as the distance between this_thr and pthread_self () is constant. */ 1464 return pthread_self () + ((uintptr_t) thr - (uintptr_t) this_thr); 1465#endif 1466} 1467#else 1468static inline gomp_thread_handle 1469gomp_thread_self (void) 1470{ 1471 return (gomp_thread_handle) {}; 1472} 1473 1474static inline gomp_thread_handle 1475gomp_thread_to_pthread_t (struct gomp_thread *thr) 1476{ 1477 (void) thr; 1478 return gomp_thread_self (); 1479} 1480#endif 1481 1482#endif /* LIBGOMP_H */ 1483