• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-13-stable/contrib/llvm-project/openmp/runtime/src/

Lines Matching refs:team

71 static void __kmp_initialize_team(kmp_team_t *team, int new_nproc,
75 static void __kmp_partition_places(kmp_team_t *team,
81 void __kmp_setup_icv_copy(kmp_team_t *team, int new_nproc,
484 /* Print out the storage map for the major kmp_team_t team data structures
487 static void __kmp_print_team_storage_map(const char *header, kmp_team_t *team,
489 int num_disp_buff = team->t.t_max_nproc > 1 ? __kmp_dispatch_num_buffers : 2;
490 __kmp_print_storage_map_gtid(-1, team, team + 1, sizeof(kmp_team_t), "%s_%d",
493 __kmp_print_storage_map_gtid(-1, &team->t.t_bar[0],
494 &team->t.t_bar[bs_last_barrier],
498 __kmp_print_storage_map_gtid(-1, &team->t.t_bar[bs_plain_barrier],
499 &team->t.t_bar[bs_plain_barrier + 1],
503 __kmp_print_storage_map_gtid(-1, &team->t.t_bar[bs_forkjoin_barrier],
504 &team->t.t_bar[bs_forkjoin_barrier + 1],
509 __kmp_print_storage_map_gtid(-1, &team->t.t_bar[bs_reduction_barrier],
510 &team->t.t_bar[bs_reduction_barrier + 1],
516 -1, &team->t.t_dispatch[0], &team->t.t_dispatch[num_thr],
520 -1, &team->t.t_threads[0], &team->t.t_threads[num_thr],
523 __kmp_print_storage_map_gtid(-1, &team->t.t_disp_buffer[0],
524 &team->t.t_disp_buffer[num_disp_buff],
659 kmp_team_t *team = __kmp_team_from_gtid(gtid);
671 if (!team->t.t_serialized) {
673 KMP_WAIT(&team->t.t_ordered.dt.t_value, __kmp_tid_from_gtid(gtid), KMP_EQ,
685 kmp_team_t *team = __kmp_team_from_gtid(gtid);
693 if (!team->t.t_serialized) {
696 /* use the tid of the next thread in this team */
698 team->t.t_ordered.dt.t_value = ((tid + 1) % team->t.t_nproc);
711 kmp_team_t *team;
718 team = th->th.th_team;
723 if (team->t.t_serialized) {
729 /* try to set team count to thread count--success means thread got the
732 if (team->t.t_construct == old_this) {
733 status = __kmp_atomic_compare_store_acq(&team->t.t_construct, old_this,
739 team->t.t_active_level ==
740 1) { // Only report metadata by master of active team at level 1
771 * set_nproc is the number of threads requested for the team
942 ("__kmp_reserve_threads: T#%d serializing team after reclaiming "
954 /* Allocate threads from the thread pool and assign them to the new team. We are
957 static void __kmp_fork_team_threads(kmp_root_t *root, kmp_team_t *team,
962 KA_TRACE(10, ("__kmp_fork_team_threads: new_nprocs = %d\n", team->t.t_nproc));
968 master_th->th.th_team = team;
969 master_th->th.th_team_nproc = team->t.t_nproc;
972 master_th->th.th_dispatch = &team->t.t_dispatch[0];
974 /* make sure we are not the optimized hot team */
980 int level = team->t.t_active_level - 1; // index in array of hot teams
986 if (team->t.t_pkfn != (microtask_t)__kmp_teams_master &&
987 master_th->th.th_teams_level == team->t.t_level) {
990 } // team->t.t_level will be increased inside parallel
994 // hot team has already been allocated for given level
995 KMP_DEBUG_ASSERT(hot_teams[level].hot_team == team);
996 use_hot_team = 1; // the team is ready to use
999 hot_teams[level].hot_team = team; // remember new hot team
1000 hot_teams[level].hot_team_nth = team->t.t_nproc;
1007 use_hot_team = team == root->r.r_hot_team;
1012 team->t.t_threads[0] = master_th;
1013 __kmp_initialize_info(master_th, team, 0, master_gtid);
1016 for (i = 1; i < team->t.t_nproc; i++) {
1018 /* fork or reallocate a new thread and install it in team */
1019 kmp_info_t *thr = __kmp_allocate_thread(root, team, i);
1020 team->t.t_threads[i] = thr;
1022 KMP_DEBUG_ASSERT(thr->th.th_team == team);
1023 /* align team and thread arrived states */
1026 __kmp_gtid_from_tid(0, team), team->t.t_id, 0,
1027 __kmp_gtid_from_tid(i, team), team->t.t_id, i,
1028 team->t.t_bar[bs_forkjoin_barrier].b_arrived,
1029 team->t.t_bar[bs_plain_barrier].b_arrived));
1035 kmp_balign_t *balign = team->t.t_threads[i]->th.th_bar;
1037 balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
1040 balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
1047 __kmp_partition_places(team);
1051 if (__kmp_display_affinity && team->t.t_display_affinity != 1) {
1052 for (i = 0; i < team->t.t_nproc; i++) {
1053 kmp_info_t *thr = team->t.t_threads[i];
1054 if (thr->th.th_prev_num_threads != team->t.t_nproc ||
1055 thr->th.th_prev_level != team->t.t_level) {
1056 team->t.t_display_affinity = 1;
1066 // Propagate any changes to the floating point control registers out to the team
1067 // We try to avoid unnecessary writes to the relevant cache line in the team
1069 inline static void propagateFPControl(kmp_team_t *team) {
1082 // our objective is the same. We have to ensure that the values in the team
1087 // threads in the team to have to read it again.
1088 KMP_CHECK_UPDATE(team->t.t_x87_fpu_control_word, x87_fpu_control_word);
1089 KMP_CHECK_UPDATE(team->t.t_mxcsr, mxcsr);
1092 KMP_CHECK_UPDATE(team->t.t_fp_control_saved, TRUE);
1094 // Similarly here. Don't write to this cache-line in the team structure
1096 KMP_CHECK_UPDATE(team->t.t_fp_control_saved, FALSE);
1101 // the team.
1102 inline static void updateHWFPControl(kmp_team_t *team) {
1103 if (__kmp_inherit_fp_control && team->t.t_fp_control_saved) {
1104 // Only reset the fp control regs if they have been changed in the team.
1112 if (team->t.t_x87_fpu_control_word != x87_fpu_control_word) {
1114 __kmp_load_x87_fpu_control_word(&team->t.t_x87_fpu_control_word);
1117 if (team->t.t_mxcsr != mxcsr) {
1118 __kmp_load_mxcsr(&team->t.t_mxcsr);
1127 static void __kmp_alloc_argv_entries(int argc, kmp_team_t *team,
1130 /* Run a parallel region that has been serialized, so runs only in a team of the
1150 /* utilize the serialized team held by this thread */
1161 "team %p, new task_team = NULL\n",
1204 /* this serial team was already used
1220 /* setup new serialized team and install it */
1228 ("__kmpc_serialized_parallel: T#%d allocated new serial team %p\n",
1237 ("__kmpc_serialized_parallel: T#%d reusing cached serial team %p\n",
1241 /* we have to initialize this serial team */
1262 team->t.t_serialized? */
1307 /* this serialized team is already being used,
1325 "of serial team %p to %d\n",
1397 kmp_team_t *team;
1495 // The team is actual (hot), all workers are ready at the fork barrier.
1496 // No lock needed to initialize the team a bit, then free workers.
1594 /* Change number of threads in the team if requested */
1603 // Keep extra threads hot in the team for possible next parallels
1617 KF_TRACE(10, ("__kmp_fork_call: before internal fork: root=%p, team=%p, "
1621 KF_TRACE(10, ("__kmp_fork_call: after internal fork: root=%p, team=%p, "
1668 KC_TRACE(10, ("__kmp_fork_call: T#%d serializing team; requested %d"
1685 // execution it will be freed later after team of threads created
1718 // Get args from parent team for teams construct
1788 team = master_th->th.th_team;
1789 // team->t.t_pkfn = microtask;
1790 team->t.t_invoke = invoker;
1791 __kmp_alloc_argv_entries(argc, team, TRUE);
1792 team->t.t_argc = argc;
1793 argv = (void **)team->t.t_argv;
1799 // Get args from parent team for teams construct
1804 team->t.t_level--;
1946 // Figure out the proc_bind_policy for the new team.
1983 /* allocate a new parallel team */
1985 team = __kmp_allocate_team(root, nthreads, nthreads,
1992 /* allocate a new parallel team */
1994 team = __kmp_allocate_team(root, nthreads, nthreads,
2003 10, ("__kmp_fork_call: after __kmp_allocate_team - team = %p\n", team));
2005 /* setup the new team */
2006 KMP_CHECK_UPDATE(team->t.t_master_tid, master_tid);
2007 KMP_CHECK_UPDATE(team->t.t_master_this_cons, master_this_cons);
2008 KMP_CHECK_UPDATE(team->t.t_ident, loc);
2009 KMP_CHECK_UPDATE(team->t.t_parent, parent_team);
2010 KMP_CHECK_UPDATE_SYNC(team->t.t_pkfn, microtask);
2012 KMP_CHECK_UPDATE_SYNC(team->t.ompt_team_info.master_return_address,
2015 KMP_CHECK_UPDATE(team->t.t_invoke, invoker); // TODO move to root, maybe
2019 KMP_CHECK_UPDATE(team->t.t_level, new_level);
2021 KMP_CHECK_UPDATE(team->t.t_active_level, new_level);
2025 KMP_CHECK_UPDATE(team->t.t_level, new_level);
2027 KMP_CHECK_UPDATE(team->t.t_active_level, new_level);
2031 KMP_CHECK_UPDATE(team->t.t_sched.sched, new_sched.sched);
2033 KMP_CHECK_UPDATE(team->t.t_cancel_request, cancel_noreq);
2034 KMP_CHECK_UPDATE(team->t.t_def_allocator, master_th->th.th_def_allocator);
2036 // Update the floating point rounding in the team if required.
2037 propagateFPControl(team);
2040 // Set master's task team to team's task team. Unless this is hot team, it
2044 KA_TRACE(20, ("__kmp_fork_call: Master T#%d pushing task_team %p / team "
2045 "%p, new task_team %p / team %p\n",
2048 team->t.t_task_team[master_th->th.th_task_state], team));
2079 team == master_th->th.th_hot_teams[active_level].hot_team) {
2080 // Restore master's nested state if nested hot team
2093 (team == root->r.r_hot_team));
2099 ("__kmp_fork_call: T#%d(%d:%d)->(%d:0) created a team of %d threads\n",
2100 gtid, parent_team->t.t_id, team->t.t_master_tid, team->t.t_id,
2101 team->t.t_nproc));
2102 KMP_DEBUG_ASSERT(team != root->r.r_hot_team ||
2103 (team->t.t_master_tid == 0 &&
2104 (team->t.t_parent == root->r.r_root_team ||
2105 team->t.t_parent->t.t_serialized)));
2109 argv = (void **)team->t.t_argv;
2118 // Get args from parent team for teams construct
2119 KMP_CHECK_UPDATE(argv[i], team->t.t_parent->t.t_argv[i]);
2124 KMP_CHECK_UPDATE(team->t.t_master_active, master_active);
2128 __kmp_fork_team_threads(root, team, master_th, gtid);
2129 __kmp_setup_icv_copy(team, nthreads,
2139 if (team->t.t_active_level == 1 // only report frames at level 1
2151 team->t.t_region_time = tmp_time;
2158 __kmp_itt_region_forking(gtid, team->t.t_nproc, 0);
2164 KMP_DEBUG_ASSERT(team == __kmp_threads[gtid]->th.th_team);
2167 ("__kmp_internal_fork : root=%p, team=%p, master_th=%p, gtid=%d\n",
2168 root, team, master_th, gtid));
2172 team->t.t_stack_id =
2181 __kmp_internal_fork(loc, gtid, team);
2182 KF_TRACE(10, ("__kmp_internal_fork : after : root=%p, team=%p, "
2184 root, team, master_th, gtid));
2194 team->t.t_id, team->t.t_pkfn));
2205 if (!team->t.t_invoke(gtid)) {
2217 team->t.t_id, team->t.t_pkfn));
2233 kmp_team_t *team) {
2236 ((team->t.t_serialized) ? ompt_state_work_serial
2241 kmp_team_t *team, ompt_data_t *parallel_data,
2250 __kmp_join_restore_state(thread, team);
2262 kmp_team_t *team;
2273 team = master_th->th.th_team;
2274 parent_team = team->t.t_parent;
2279 void *team_microtask = (void *)team->t.t_pkfn;
2287 KA_TRACE(20, ("__kmp_join_call: T#%d, old team = %p old task_team = %p, "
2289 __kmp_gtid_from_thread(master_th), team,
2290 team->t.t_task_team[master_th->th.th_task_state],
2293 team->t.t_task_team[master_th->th.th_task_state]);
2297 if (team->t.t_serialized) {
2300 int level = team->t.t_level;
2305 team->t.t_level++;
2310 team->t.t_serialized++;
2324 master_active = team->t.t_master_active;
2328 // But there is barrier for external team (league).
2329 __kmp_internal_join(loc, gtid, team);
2338 ompt_data_t *parallel_data = &(team->t.ompt_team_info.parallel_data);
2339 void *codeptr = team->t.ompt_team_info.master_return_address;
2345 (__itt_caller)team->t
2350 if (team->t.t_active_level == 1 &&
2357 __kmp_itt_frame_submit(gtid, team->t.t_region_time,
2367 team->t.t_pkfn != (microtask_t)__kmp_teams_master &&
2368 team->t.t_level == master_th->th.th_teams_level + 1) {
2369 // AC: We need to leave the team structure intact at the end of parallel
2370 // inside the teams construct, so that at the next parallel same (hot) team
2377 int ompt_team_size = team->t.t_nproc;
2389 team->t.t_level--;
2390 team->t.t_active_level--;
2393 // Restore number of threads in the team if needed. This code relies on
2400 kmp_info_t **other_threads = team->t.t_threads;
2401 team->t.t_nproc = new_num;
2405 // Adjust states of non-used threads of the team
2411 balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
2414 balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
2434 /* do cleanup and restore the parent team */
2435 master_th->th.th_info.ds.ds_tid = team->t.t_master_tid;
2436 master_th->th.th_local.this_construct = team->t.t_master_this_cons;
2438 master_th->th.th_dispatch = &parent_team->t.t_dispatch[team->t.t_master_tid];
2446 team->t.t_level > master_th->th.th_teams_level) {
2459 int ompt_team_size = (flags == ompt_task_initial) ? 0 : team->t.t_nproc;
2469 KF_TRACE(10, ("__kmp_join_call1: T#%d, this_thread=%p team=%p\n", 0,
2470 master_th, team));
2475 master_th->th.th_first_place = team->t.t_first_place;
2476 master_th->th.th_last_place = team->t.t_last_place;
2478 master_th->th.th_def_allocator = team->t.t_def_allocator;
2480 updateHWFPControl(team);
2485 __kmp_free_team(root, team USE_NESTED_HOT_ARG(
2489 region otherwise assertions may fail occasionally since the old team may be
2498 /* restore serialized team, if need be */
2511 // Remember master's state if we re-use this nested hot team
2520 // Copy the task team from the parent team to the master thread
2524 ("__kmp_join_call: Master T#%d restoring task_team %p / team %p\n",
2552 serial team stack. If so, do it. */
2569 if (push) { /* push a record on the serial team's stack */
2606 // If this omp_set_num_threads() call will cause the hot team size to be
2625 // When decreasing team size, threads no longer in the team should unref
2626 // task team.
2720 // kmp_team_t *team;
2825 kmp_team_t *team;
2837 team = thr->th.th_team;
2838 ii = team->t.t_level;
2861 dd = team->t.t_serialized;
2864 for (dd = team->t.t_serialized; (dd > 0) && (ii > level); dd--, ii--) {
2866 if ((team->t.t_serialized) && (!dd)) {
2867 team = team->t.t_parent;
2871 team = team->t.t_parent;
2872 dd = team->t.t_serialized;
2877 return (dd > 1) ? (0) : (team->t.t_master_tid);
2883 kmp_team_t *team;
2895 team = thr->th.th_team;
2896 ii = team->t.t_level;
2917 for (dd = team->t.t_serialized; (dd > 0) && (ii > level); dd--, ii--) {
2919 if (team->t.t_serialized && (!dd)) {
2920 team = team->t.t_parent;
2924 team = team->t.t_parent;
2929 return team->t.t_nproc;
2967 at least argc number of *t_argv entries for the requested team. */
2968 static void __kmp_alloc_argv_entries(int argc, kmp_team_t *team, int realloc) {
2970 KMP_DEBUG_ASSERT(team);
2971 if (!realloc || argc > team->t.t_max_argc) {
2973 KA_TRACE(100, ("__kmp_alloc_argv_entries: team %d: needed entries=%d, "
2975 team->t.t_id, argc, (realloc) ? team->t.t_max_argc : 0));
2977 if (realloc && team->t.t_argv != &team->t.t_inline_argv[0])
2978 __kmp_free((void *)team->t.t_argv);
2982 team->t.t_max_argc = KMP_INLINE_ARGV_ENTRIES;
2983 KA_TRACE(100, ("__kmp_alloc_argv_entries: team %d: inline allocate %d "
2985 team->t.t_id, team->t.t_max_argc));
2986 team->t.t_argv = &team->t.t_inline_argv[0];
2989 -1, &team->t.t_inline_argv[0],
2990 &team->t.t_inline_argv[KMP_INLINE_ARGV_ENTRIES],
2992 team->t.t_id);
2996 team->t.t_max_argc = (argc <= (KMP_MIN_MALLOC_ARGV_ENTRIES >> 1))
2999 KA_TRACE(100, ("__kmp_alloc_argv_entries: team %d: dynamic allocate %d "
3001 team->t.t_id, team->t.t_max_argc));
3002 team->t.t_argv =
3003 (void **)__kmp_page_allocate(sizeof(void *) * team->t.t_max_argc);
3005 __kmp_print_storage_map_gtid(-1, &team->t.t_argv[0],
3006 &team->t.t_argv[team->t.t_max_argc],
3007 sizeof(void *) * team->t.t_max_argc,
3008 "team_%d.t_argv", team->t.t_id);
3014 static void __kmp_allocate_team_arrays(kmp_team_t *team, int max_nth) {
3017 team->t.t_threads =
3019 team->t.t_disp_buffer = (dispatch_shared_info_t *)__kmp_allocate(
3021 team->t.t_dispatch =
3023 team->t.t_implicit_task_taskdata =
3025 team->t.t_max_nproc = max_nth;
3029 team->t.t_disp_buffer[i].buffer_index = i;
3030 team->t.t_disp_buffer[i].doacross_buf_idx = i;
3034 static void __kmp_free_team_arrays(kmp_team_t *team) {
3037 for (i = 0; i < team->t.t_max_nproc; ++i) {
3038 if (team->t.t_dispatch[i].th_disp_buffer != NULL) {
3039 __kmp_free(team->t.t_dispatch[i].th_disp_buffer);
3040 team->t.t_dispatch[i].th_disp_buffer = NULL;
3044 __kmp_dispatch_free_hierarchies(team);
3046 __kmp_free(team->t.t_threads);
3047 __kmp_free(team->t.t_disp_buffer);
3048 __kmp_free(team->t.t_dispatch);
3049 __kmp_free(team->t.t_implicit_task_taskdata);
3050 team->t.t_threads = NULL;
3051 team->t.t_disp_buffer = NULL;
3052 team->t.t_dispatch = NULL;
3053 team->t.t_implicit_task_taskdata = 0;
3056 static void __kmp_reallocate_team_arrays(kmp_team_t *team, int max_nth) {
3057 kmp_info_t **oldThreads = team->t.t_threads;
3059 __kmp_free(team->t.t_disp_buffer);
3060 __kmp_free(team->t.t_dispatch);
3061 __kmp_free(team->t.t_implicit_task_taskdata);
3062 __kmp_allocate_team_arrays(team, max_nth);
3064 KMP_MEMCPY(team->t.t_threads, oldThreads,
3065 team->t.t_nproc * sizeof(kmp_info_t *));
3104 static kmp_internal_control_t __kmp_get_x_global_icvs(const kmp_team_t *team) {
3108 0; // probably =team->t.t_serial like in save_inter_controls
3109 copy_icvs(&gx_icvs, &team->t.t_threads[0]->th.th_current_task->td_icvs);
3133 /* setup the root team for this task */
3134 /* allocate the root team structure */
3150 // team.
3159 /* initialize root team */
3167 ("__kmp_initialize_root: init root team %d arrived: join=%u, plain=%u\n",
3170 /* setup the hot team for this task */
3171 /* allocate the hot team structure */
3193 /* initialize hot team */
3212 static void __kmp_print_structure_team_accum( // Add team to list of teams.
3214 kmp_team_p const *team // Team to add.
3219 // List is sorted in ascending order by team id.
3225 if (team == NULL) {
3229 __kmp_print_structure_team_accum(list, team->t.t_parent);
3230 __kmp_print_structure_team_accum(list, team->t.t_next_pool);
3232 // Search list for the team.
3234 while (l->next != NULL && l->entry != team) {
3243 while (l->next != NULL && l->entry->t.t_id <= team->t.t_id) {
3247 // Insert team.
3252 l->entry = team;
3257 static void __kmp_print_structure_team(char const *title, kmp_team_p const *team
3261 if (team != NULL) {
3262 __kmp_printf("%2x %p\n", team->t.t_id, team);
3362 kmp_team_p const *team = list->entry;
3364 __kmp_printf("Team %2x %p:\n", team->t.t_id, team);
3365 __kmp_print_structure_team(" Parent Team: ", team->t.t_parent);
3366 __kmp_printf(" Master TID: %2d\n", team->t.t_master_tid);
3367 __kmp_printf(" Max threads: %2d\n", team->t.t_max_nproc);
3368 __kmp_printf(" Levels of serial: %2d\n", team->t.t_serialized);
3369 __kmp_printf(" Number threads: %2d\n", team->t.t_nproc);
3370 for (i = 0; i < team->t.t_nproc; ++i) {
3372 __kmp_print_structure_thread("", team->t.t_threads[i]);
3374 __kmp_print_structure_team(" Next in pool: ", team->t.t_next_pool);
3388 // Free team list.
3684 /* setup the serial team held in reserve by the root thread */
3705 // AC: the team created in reserve, not for execution (it is unused for now).
3806 kmp_team_t *team = hot_teams[level].hot_team;
3811 kmp_info_t *th = team->t.t_threads[i];
3819 __kmp_free_team(root, team, NULL);
3936 kmp_team_t *team = thread->th.th_team;
3945 __kmp_task_team_wait(thread, team USE_ITT_BUILD_ARG(NULL));
3991 kmp_team_t *team = this_thr->th.th_team;
3994 "__kmp_task_info: gtid=%d tid=%d t_thread=%p team=%p steam=%p curtask=%p "
3996 gtid, tid, this_thr, team, steam, this_thr->th.th_current_task,
3997 team->t.t_implicit_task_taskdata[tid].td_parent);
4004 static void __kmp_initialize_info(kmp_info_t *this_thr, kmp_team_t *team,
4009 kmp_info_t *master = team->t.t_threads[0];
4012 KMP_DEBUG_ASSERT(team);
4013 KMP_DEBUG_ASSERT(team->t.t_threads);
4014 KMP_DEBUG_ASSERT(team->t.t_dispatch);
4020 TCW_SYNC_PTR(this_thr->th.th_team, team);
4036 /* setup the thread's cache of the team structure */
4037 this_thr->th.th_team_nproc = team->t.t_nproc;
4039 this_thr->th.th_team_serialized = team->t.t_serialized;
4042 KMP_DEBUG_ASSERT(team->t.t_implicit_task_taskdata);
4048 team, tid, TRUE);
4056 this_thr->th.th_dispatch = &team->t.t_dispatch[tid];
4101 // Use team max_nproc since this will never change for the team.
4104 (team->t.t_max_nproc == 1 ? 1 : __kmp_dispatch_num_buffers);
4106 team->t.t_max_nproc));
4108 KMP_DEBUG_ASSERT(team->t.t_dispatch);
4109 KMP_DEBUG_ASSERT(dispatch == &team->t.t_dispatch[tid]);
4120 &dispatch->th_disp_buffer[team->t.t_max_nproc == 1
4125 gtid, team->t.t_id, gtid);
4157 /* allocate a new thread for the requesting team. this is only called from
4162 kmp_info_t *__kmp_allocate_thread(kmp_root_t *root, kmp_team_t *team,
4169 KMP_DEBUG_ASSERT(root && team);
4198 __kmp_initialize_info(new_thr, team, new_tid,
4282 // add the reserve serialized team, initialized from the team's master thread
4284 kmp_internal_control_t r_icvs = __kmp_get_x_global_icvs(team);
4295 serial_team->t.t_serialized = 0; // AC: the team created in reserve, not for
4303 __kmp_initialize_info(new_thr, team, new_tid, new_gtid);
4316 /* Initialize these only once when thread is grabbed for a team allocation */
4325 balign[b].bb.team = NULL;
4391 /* Reinitialize team for reuse.
4392 The hot team code calls this case at every fork barrier, so EPCC barrier
4393 test are extremely sensitive to changes in it, esp. writes to the team
4396 static void __kmp_reinitialize_team(kmp_team_t *team,
4399 KF_TRACE(10, ("__kmp_reinitialize_team: enter this_thread=%p team=%p\n",
4400 team->t.t_threads[0], team));
4401 KMP_DEBUG_ASSERT(team && new_icvs);
4403 KMP_CHECK_UPDATE(team->t.t_ident, loc);
4405 KMP_CHECK_UPDATE(team->t.t_id, KMP_GEN_TEAM_ID());
4407 __kmp_init_implicit_task(loc, team->t.t_threads[0], team, 0, FALSE);
4408 copy_icvs(&team->t.t_implicit_task_taskdata[0].td_icvs, new_icvs);
4410 KF_TRACE(10, ("__kmp_reinitialize_team: exit this_thread=%p team=%p\n",
4411 team->t.t_threads[0], team));
4414 /* Initialize the team data structure.
4417 static void __kmp_initialize_team(kmp_team_t *team, int new_nproc,
4420 KF_TRACE(10, ("__kmp_initialize_team: enter: team=%p\n", team));
4423 KMP_DEBUG_ASSERT(team);
4424 KMP_DEBUG_ASSERT(new_nproc <= team->t.t_max_nproc);
4425 KMP_DEBUG_ASSERT(team->t.t_threads);
4428 team->t.t_master_tid = 0; /* not needed */
4429 /* team->t.t_master_bar; not needed */
4430 team->t.t_serialized = new_nproc > 1 ? 0 : 1;
4431 team->t.t_nproc = new_nproc;
4433 /* team->t.t_parent = NULL; TODO not needed & would mess up hot team */
4434 team->t.t_next_pool = NULL;
4435 /* memset( team->t.t_threads, 0, sizeof(kmp_info_t*)*new_nproc ); would mess
4436 * up hot team */
4438 TCW_SYNC_PTR(team->t.t_pkfn, NULL); /* not needed */
4439 team->t.t_invoke = NULL; /* not needed */
4441 // TODO???: team->t.t_max_active_levels = new_max_active_levels;
4442 team->t.t_sched.sched = new_icvs->sched.sched;
4445 team->t.t_fp_control_saved = FALSE; /* not needed */
4446 team->t.t_x87_fpu_control_word = 0; /* not needed */
4447 team->t.t_mxcsr = 0; /* not needed */
4450 team->t.t_construct = 0;
4452 team->t.t_ordered.dt.t_value = 0;
4453 team->t.t_master_active = FALSE;
4456 team->t.t_copypriv_data = NULL; /* not necessary, but nice for debugging */
4459 team->t.t_copyin_counter = 0; /* for barrier-free copyin implementation */
4462 team->t.t_control_stack_top = NULL;
4464 __kmp_reinitialize_team(team, new_icvs, loc);
4467 KF_TRACE(10, ("__kmp_initialize_team: exit: team=%p\n", team));
4495 static void __kmp_partition_places(kmp_team_t *team, int update_master_only) {
4496 // Copy the master thread's place partition to the team struct
4497 kmp_info_t *master_th = team->t.t_threads[0];
4499 kmp_proc_bind_t proc_bind = team->t.t_proc_bind;
4503 team->t.t_first_place = first_place;
4504 team->t.t_last_place = last_place;
4508 proc_bind, __kmp_gtid_from_thread(team->t.t_threads[0]),
4509 team->t.t_id, masters_place, first_place, last_place));
4516 KMP_DEBUG_ASSERT(team->t.t_nproc == 1);
4521 int n_th = team->t.t_nproc;
4523 kmp_info_t *th = team->t.t_threads[f];
4529 team->t.t_display_affinity != 1) {
4530 team->t.t_display_affinity = 1;
4535 __kmp_gtid_from_thread(team->t.t_threads[f]), team->t.t_id,
4542 int n_th = team->t.t_nproc;
4552 kmp_info_t *th = team->t.t_threads[f];
4566 team->t.t_display_affinity != 1) {
4567 team->t.t_display_affinity = 1;
4572 __kmp_gtid_from_thread(team->t.t_threads[f]),
4573 team->t.t_id, f, place, first_place, last_place));
4584 kmp_info_t *th = team->t.t_threads[f];
4591 team->t.t_display_affinity != 1) {
4592 team->t.t_display_affinity = 1;
4625 __kmp_gtid_from_thread(team->t.t_threads[f]), team->t.t_id, f,
4634 int n_th = team->t.t_nproc;
4657 kmp_info_t *th = team->t.t_threads[f];
4663 team->t.t_display_affinity != 1) {
4664 team->t.t_display_affinity = 1;
4702 __kmp_gtid_from_thread(team->t.t_threads[f]), team->t.t_id,
4752 th = team->t.t_threads[f];
4758 team->t.t_display_affinity != 1) {
4759 team->t.t_display_affinity = 1;
4764 __kmp_gtid_from_thread(team->t.t_threads[f]),
4765 team->t.t_id, f, th->th.th_new_place,
4783 kmp_info_t *th = team->t.t_threads[f];
4790 team->t.t_display_affinity != 1) {
4791 team->t.t_display_affinity = 1;
4823 __kmp_gtid_from_thread(team->t.t_threads[f]),
4824 team->t.t_id, f, th->th.th_new_place,
4835 KA_TRACE(20, ("__kmp_partition_places: exit T#%d\n", team->t.t_id));
4840 /* allocate a new team data structure to use. take one off of the free pool if
4852 kmp_team_t *team;
4864 team = master->th.th_team;
4865 level = team->t.t_active_level;
4869 team->t.t_pkfn ==
4872 team->t.t_level)) { // or nested parallel inside the teams
4880 .hot_team) { // hot team has already been allocated for given level
4887 // Optimization to use a "hot" team
4891 team = hot_teams[level].hot_team;
4893 team = root->r.r_hot_team;
4897 KA_TRACE(20, ("__kmp_allocate_team: hot team task_team[0] = %p "
4899 team->t.t_task_team[0], team->t.t_task_team[1]));
4906 if (team->t.t_nproc == new_nproc) { // Check changes in number of threads
4907 KA_TRACE(20, ("__kmp_allocate_team: reusing hot team\n"));
4909 // team size was already reduced, so we check the special flag
4910 if (team->t.t_size_changed == -1) {
4911 team->t.t_size_changed = 1;
4913 KMP_CHECK_UPDATE(team->t.t_size_changed, 0);
4916 // TODO???: team->t.t_max_active_levels = new_max_active_levels;
4919 KMP_CHECK_UPDATE(team->t.t_sched.sched, new_sched.sched);
4921 __kmp_reinitialize_team(team, new_icvs,
4924 KF_TRACE(10, ("__kmp_allocate_team2: T#%d, this_thread=%p team=%p\n", 0,
4925 team->t.t_threads[0], team));
4926 __kmp_push_current_task_to_thread(team->t.t_threads[0], team, 0);
4929 if ((team->t.t_size_changed == 0) &&
4930 (team->t.t_proc_bind == new_proc_bind)) {
4933 team, 1); // add flag to update only master for spread
4935 KA_TRACE(200, ("__kmp_allocate_team: reusing hot team #%d bindings: "
4937 team->t.t_id, new_proc_bind, team->t.t_first_place,
4938 team->t.t_last_place));
4940 KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
4941 __kmp_partition_places(team);
4944 KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
4946 } else if (team->t.t_nproc > new_nproc) {
4948 ("__kmp_allocate_team: decreasing hot team thread count to %d\n",
4951 team->t.t_size_changed = 1;
4954 // AC: saved number of threads should correspond to team's value in this
4955 // mode, can be bigger in mode 1, when hot team has threads in reserve
4956 KMP_DEBUG_ASSERT(hot_teams[level].hot_team_nth == team->t.t_nproc);
4960 for (f = new_nproc; f < team->t.t_nproc; f++) {
4961 KMP_DEBUG_ASSERT(team->t.t_threads[f]);
4963 // When decreasing team size, threads no longer in the team should
4964 // unref task team.
4965 team->t.t_threads[f]->th.th_task_team = NULL;
4967 __kmp_free_thread(team->t.t_threads[f]);
4968 team->t.t_threads[f] = NULL;
4973 // When keeping extra threads in team, switch threads to wait on own
4975 for (f = new_nproc; f < team->t.t_nproc; ++f) {
4976 KMP_DEBUG_ASSERT(team->t.t_threads[f]);
4977 kmp_balign_t *balign = team->t.t_threads[f]->th.th_bar;
4987 team->t.t_nproc = new_nproc;
4988 // TODO???: team->t.t_max_active_levels = new_max_active_levels;
4989 KMP_CHECK_UPDATE(team->t.t_sched.sched, new_icvs->sched.sched);
4990 __kmp_reinitialize_team(team, new_icvs,
4995 team->t.t_threads[f]->th.th_team_nproc = new_nproc;
5000 KF_TRACE(10, ("__kmp_allocate_team: T#%d, this_thread=%p team=%p\n", 0,
5001 team->t.t_threads[0], team));
5003 __kmp_push_current_task_to_thread(team->t.t_threads[0], team, 0);
5006 for (f = 0; f < team->t.t_nproc; f++) {
5007 KMP_DEBUG_ASSERT(team->t.t_threads[f] &&
5008 team->t.t_threads[f]->th.th_team_nproc ==
5009 team->t.t_nproc);
5013 KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
5015 __kmp_partition_places(team);
5017 } else { // team->t.t_nproc < new_nproc
5026 ("__kmp_allocate_team: increasing hot team thread count to %d\n",
5029 team->t.t_size_changed = 1;
5035 kmp_info_t **other_threads = team->t.t_threads;
5036 for (f = team->t.t_nproc; f < avail_threads; ++f) {
5037 // Adjust barrier data of reserved threads (if any) of the team
5042 balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
5045 balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
5053 team->t.t_nproc = new_nproc; // just get reserved threads involved
5056 team->t.t_nproc =
5059 hot_teams[level].hot_team_nth = new_nproc; // adjust hot team max size
5061 if (team->t.t_max_nproc < new_nproc) {
5063 __kmp_reallocate_team_arrays(team, new_nproc);
5064 __kmp_reinitialize_team(team, new_icvs, NULL);
5075 /* allocate new threads for the hot team */
5076 for (f = team->t.t_nproc; f < new_nproc; f++) {
5077 kmp_info_t *new_worker = __kmp_allocate_thread(root, team, f);
5079 team->t.t_threads[f] = new_worker;
5082 ("__kmp_allocate_team: team %d init T#%d arrived: "
5084 team->t.t_id, __kmp_gtid_from_tid(f, team), team->t.t_id, f,
5085 team->t.t_bar[bs_forkjoin_barrier].b_arrived,
5086 team->t.t_bar[bs_plain_barrier].b_arrived));
5092 balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
5096 balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
5113 int old_nproc = team->t.t_nproc; // save old value and use to update only
5115 __kmp_initialize_team(team, new_nproc, new_icvs,
5119 KMP_DEBUG_ASSERT(team->t.t_nproc == new_nproc);
5120 for (f = 0; f < team->t.t_nproc; ++f)
5121 __kmp_initialize_info(team->t.t_threads[f], team, f,
5122 __kmp_gtid_from_tid(f, team));
5124 if (level) { // set th_task_state for new threads in nested hot team
5130 for (f = old_nproc; f < team->t.t_nproc; ++f)
5131 team->t.t_threads[f]->th.th_task_state =
5132 team->t.t_threads[0]->th.th_task_state_memo_stack[level];
5133 } else { // set th_task_state for new threads in non-nested hot team
5135 team->t.t_threads[0]->th.th_task_state; // copy master's state
5136 for (f = old_nproc; f < team->t.t_nproc; ++f)
5137 team->t.t_threads[f]->th.th_task_state = old_state;
5141 for (f = 0; f < team->t.t_nproc; ++f) {
5142 KMP_DEBUG_ASSERT(team->t.t_threads[f] &&
5143 team->t.t_threads[f]->th.th_team_nproc ==
5144 team->t.t_nproc);
5148 KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
5150 __kmp_partition_places(team);
5154 kmp_info_t *master = team->t.t_threads[0];
5158 kmp_info_t *thr = team->t.t_threads[f];
5167 // team.
5169 kmp_info_t *thr = team->t.t_threads[f];
5173 balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
5176 balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
5184 __kmp_alloc_argv_entries(argc, team, TRUE);
5185 KMP_CHECK_UPDATE(team->t.t_argc, argc);
5186 // The hot team re-uses the previous task team,
5189 KF_TRACE(10, (" hot_team = %p\n", team));
5193 KA_TRACE(20, ("__kmp_allocate_team: hot team task_team[0] = %p "
5195 team->t.t_task_team[0], team->t.t_task_team[1]));
5200 __ompt_team_assign_id(team, ompt_parallel_data);
5205 return team;
5208 /* next, let's try to take one from the team pool */
5210 for (team = CCAST(kmp_team_t *, __kmp_team_pool); (team);) {
5213 if (team->t.t_max_nproc >= max_nproc) {
5214 /* take this team from the team pool */
5215 __kmp_team_pool = team->t.t_next_pool;
5217 /* setup the team for fresh use */
5218 __kmp_initialize_team(team, new_nproc, new_icvs, NULL);
5222 &team->t.t_task_team[0], &team->t.t_task_team[1]));
5223 team->t.t_task_team[0] = NULL;
5224 team->t.t_task_team[1] = NULL;
5227 __kmp_alloc_argv_entries(argc, team, TRUE);
5228 KMP_CHECK_UPDATE(team->t.t_argc, argc);
5231 20, ("__kmp_allocate_team: team %d init arrived: join=%u, plain=%u\n",
5232 team->t.t_id, KMP_INIT_BARRIER_STATE, KMP_INIT_BARRIER_STATE));
5236 team->t.t_bar[b].b_arrived = KMP_INIT_BARRIER_STATE;
5238 team->t.t_bar[b].b_master_arrived = 0;
5239 team->t.t_bar[b].b_team_arrived = 0;
5244 team->t.t_proc_bind = new_proc_bind;
5246 KA_TRACE(20, ("__kmp_allocate_team: using team from pool %d.\n",
5247 team->t.t_id));
5250 __ompt_team_assign_id(team, ompt_parallel_data);
5255 return team;
5258 /* reap team if it is too small, then loop back and check the next one */
5261 /* TODO: Use technique to find the right size hot-team, don't reap them */
5262 team = __kmp_reap_team(team);
5263 __kmp_team_pool = team;
5266 /* nothing available in the pool, no matter, make a new team! */
5268 team = (kmp_team_t *)__kmp_allocate(sizeof(kmp_team_t));
5271 team->t.t_max_nproc = max_nproc;
5274 __kmp_allocate_team_arrays(team, max_nproc);
5276 KA_TRACE(20, ("__kmp_allocate_team: making a new team\n"));
5277 __kmp_initialize_team(team, new_nproc, new_icvs, NULL);
5281 &team->t.t_task_team[0], &team->t.t_task_team[1]));
5282 team->t.t_task_team[0] = NULL; // to be removed, as __kmp_allocate zeroes
5284 team->t.t_task_team[1] = NULL; // to be removed, as __kmp_allocate zeroes
5288 __kmp_print_team_storage_map("team", team, team->t.t_id, new_nproc);
5292 __kmp_alloc_argv_entries(argc, team, FALSE);
5293 team->t.t_argc = argc;
5296 ("__kmp_allocate_team: team %d init arrived: join=%u, plain=%u\n",
5297 team->t.t_id, KMP_INIT_BARRIER_STATE, KMP_INIT_BARRIER_STATE));
5301 team->t.t_bar[b].b_arrived = KMP_INIT_BARRIER_STATE;
5303 team->t.t_bar[b].b_master_arrived = 0;
5304 team->t.t_bar[b].b_team_arrived = 0;
5309 team->t.t_proc_bind = new_proc_bind;
5312 __ompt_team_assign_id(team, ompt_parallel_data);
5313 team->t.ompt_serialized_team_info = NULL;
5318 KA_TRACE(20, ("__kmp_allocate_team: done creating a new team %d.\n",
5319 team->t.t_id));
5321 return team;
5327 /* free the team. return it to the team pool. release all the threads
5330 kmp_team_t *team USE_NESTED_HOT_ARG(kmp_info_t *master)) {
5332 KA_TRACE(20, ("__kmp_free_team: T#%d freeing team %d\n", __kmp_get_gtid(),
5333 team->t.t_id));
5337 KMP_DEBUG_ASSERT(team);
5338 KMP_DEBUG_ASSERT(team->t.t_nproc <= team->t.t_max_nproc);
5339 KMP_DEBUG_ASSERT(team->t.t_threads);
5341 int use_hot_team = team == root->r.r_hot_team;
5346 level = team->t.t_active_level - 1;
5352 if (team->t.t_pkfn != (microtask_t)__kmp_teams_master &&
5353 master->th.th_teams_level == team->t.t_level) {
5356 } // team->t.t_level will be increased inside parallel
5360 KMP_DEBUG_ASSERT(team == hot_teams[level].hot_team);
5366 /* team is done working */
5367 TCW_SYNC_PTR(team->t.t_pkfn,
5370 team->t.t_copyin_counter = 0; // init counter for possible reuse
5372 // Do not reset pointer to parent team to NULL for hot teams.
5374 /* if we are non-hot team, release our threads */
5378 for (f = 1; f < team->t.t_nproc; ++f) {
5379 KMP_DEBUG_ASSERT(team->t.t_threads[f]);
5380 kmp_info_t *th = team->t.t_threads[f];
5402 kmp_task_team_t *task_team = team->t.t_task_team[tt_idx];
5404 for (f = 0; f < team->t.t_nproc; ++f) { // threads unref task teams
5405 KMP_DEBUG_ASSERT(team->t.t_threads[f]);
5406 team->t.t_threads[f]->th.th_task_team = NULL;
5410 ("__kmp_free_team: T#%d deactivating task_team %p on team %d\n",
5411 __kmp_get_gtid(), task_team, team->t.t_id));
5415 team->t.t_task_team[tt_idx] = NULL;
5420 // Reset pointer to parent team only for non-hot teams.
5421 team->t.t_parent = NULL;
5422 team->t.t_level = 0;
5423 team->t.t_active_level = 0;
5426 for (f = 1; f < team->t.t_nproc; ++f) {
5427 KMP_DEBUG_ASSERT(team->t.t_threads[f]);
5428 __kmp_free_thread(team->t.t_threads[f]);
5429 team->t.t_threads[f] = NULL;
5432 /* put the team back in the team pool */
5433 /* TODO limit size of team pool, call reap_team if pool too large */
5434 team->t.t_next_pool = CCAST(kmp_team_t *, __kmp_team_pool);
5435 __kmp_team_pool = (volatile kmp_team_t *)team;
5436 } else { // Check if team was created for the masters in a teams construct
5438 KMP_DEBUG_ASSERT(team->t.t_threads[1] &&
5439 team->t.t_threads[1]->th.th_cg_roots);
5440 if (team->t.t_threads[1]->th.th_cg_roots->cg_root == team->t.t_threads[1]) {
5441 // Clean up the CG root nodes on workers so that this team can be re-used
5442 for (f = 1; f < team->t.t_nproc; ++f) {
5443 kmp_info_t *thr = team->t.t_threads[f];
5467 /* reap the team. destroy it, reclaim all its resources and free its memory */
5468 kmp_team_t *__kmp_reap_team(kmp_team_t *team) {
5469 kmp_team_t *next_pool = team->t.t_next_pool;
5471 KMP_DEBUG_ASSERT(team);
5472 KMP_DEBUG_ASSERT(team->t.t_dispatch);
5473 KMP_DEBUG_ASSERT(team->t.t_disp_buffer);
5474 KMP_DEBUG_ASSERT(team->t.t_threads);
5475 KMP_DEBUG_ASSERT(team->t.t_argv);
5480 __kmp_free_team_arrays(team);
5481 if (team->t.t_argv != &team->t.t_inline_argv[0])
5482 __kmp_free((void *)team->t.t_argv);
5483 __kmp_free(team);
5506 // applications. Previously, as threads were freed from the hot team, they
5507 // would be placed back on the free list in inverse order. If the hot team
5509 // back on the hot team in reverse order. This could cause bad cache
5510 // locality problems on programs where the size of the hot team regularly
5524 // uninitialized (NULL team).
5530 balign[b].bb.team = NULL;
5567 * with higher probability when hot team is disabled but can occurs even when
5568 * the hot team is enabled */
5673 /* No tid yet since not part of a team */
6002 // Get the next team from the pool.
6003 kmp_team_t *team = CCAST(kmp_team_t *, __kmp_team_pool);
6004 __kmp_team_pool = team->t.t_next_pool;
6006 team->t.t_next_pool = NULL;
6007 __kmp_reap_team(team);
6933 kmp_team_t *team) {
6945 KMP_DEBUG_ASSERT(team->t.t_dispatch);
6946 // KMP_DEBUG_ASSERT( this_thr->th.th_dispatch == &team->t.t_dispatch[
6952 __kmp_push_parallel(gtid, team->t.t_ident);
6958 kmp_team_t *team) {
6960 __kmp_pop_parallel(gtid, team->t.t_ident);
6969 kmp_team_t *team = this_thr->th.th_team;
6971 __kmp_run_before_invoked_task(gtid, tid, this_thr, team);
6976 team->t.t_stack_id); // inform ittnotify about entering user's code
6992 team->t.t_implicit_task_taskdata[tid].ompt_task_info.frame.exit_frame.ptr);
6998 &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data);
6999 my_parallel_data = &(team->t.ompt_team_info.parallel_data);
7001 ompt_team_size = team->t.t_nproc;
7019 rc = __kmp_invoke_microtask((microtask_t)TCR_SYNC_PTR(team->t.t_pkfn), gtid,
7020 tid, (int)team->t.t_argc, (void **)team->t.t_argv
7042 team->t.t_stack_id); // inform ittnotify about leaving user's code
7045 __kmp_run_after_invoked_task(gtid, tid, this_thr, team);
7053 kmp_team_t *team = thr->th.th_team;
7054 ident_t *loc = team->t.t_ident;
7078 __kmp_fork_call(loc, gtid, fork_context_intel, team->t.t_argc,
7084 // If the team size was reduced from the limit, set it to the new size
7100 kmp_team_t *team = this_thr->th.th_team;
7106 __kmp_run_before_invoked_task(gtid, 0, this_thr, team);
7110 &team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data;
7111 ompt_data_t *parallel_data = &team->t.ompt_team_info.parallel_data;
7114 ompt_scope_begin, parallel_data, task_data, team->t.t_nproc, tid,
7123 __kmp_run_after_invoked_task(gtid, 0, this_thr, team);
7128 encountered by this team. since this should be enclosed in the forkjoin
7177 } // prevent team size to exceed thread-limit-var
7211 void __kmp_internal_fork(ident_t *id, int gtid, kmp_team_t *team) {
7218 KMP_DEBUG_ASSERT(team);
7219 KMP_DEBUG_ASSERT(this_thr->th.th_team == team);
7223 team->t.t_construct = 0; /* no single directives seen yet */
7224 team->t.t_ordered.dt.t_value =
7228 KMP_DEBUG_ASSERT(team->t.t_disp_buffer);
7229 if (team->t.t_max_nproc > 1) {
7232 team->t.t_disp_buffer[i].buffer_index = i;
7233 team->t.t_disp_buffer[i].doacross_buf_idx = i;
7236 team->t.t_disp_buffer[0].buffer_index = 0;
7237 team->t.t_disp_buffer[0].doacross_buf_idx = 0;
7241 KMP_ASSERT(this_thr->th.th_team == team);
7244 for (f = 0; f < team->t.t_nproc; f++) {
7245 KMP_DEBUG_ASSERT(team->t.t_threads[f] &&
7246 team->t.t_threads[f]->th.th_team_nproc == team->t.t_nproc);
7254 void __kmp_internal_join(ident_t *id, int gtid, kmp_team_t *team) {
7257 KMP_DEBUG_ASSERT(team);
7258 KMP_DEBUG_ASSERT(this_thr->th.th_team == team);
7266 __kmp_threads[gtid]->th.th_team_nproc != team->t.t_nproc) {
7270 "team->t.t_nproc=%d\n",
7271 gtid, __kmp_threads[gtid]->th.th_team_nproc, team,
7272 team->t.t_nproc);
7276 __kmp_threads[gtid]->th.th_team_nproc == team->t.t_nproc);
7312 KMP_ASSERT(this_thr->th.th_team == team);
7319 // Return the worker threads actively spinning in the hot team, if we
7365 // Threads that are active in the thread pool, active in the hot team for this
7368 // team, but are currently contributing to the system load, and must be
7377 "hot team active = %d\n",
7404 // large as the #active omp thread that are available to add to the team.
7652 /* Getting team information common for all team API */
7658 kmp_team_t *team = thr->th.th_team;
7660 int ii = team->t.t_level;
7661 teams_serialized = team->t.t_serialized;
7665 for (teams_serialized = team->t.t_serialized;
7668 if (team->t.t_serialized && (!teams_serialized)) {
7669 team = team->t.t_parent;
7673 team = team->t.t_parent;
7677 return team;
7684 kmp_team_t *team = __kmp_aux_get_team_info(serialized);
7685 if (team) {
7687 return 0; // teams region is serialized ( 1 team of 1 thread ).
7689 return team->t.t_master_tid;
7697 kmp_team_t *team = __kmp_aux_get_team_info(serialized);
7698 if (team) {
7702 return team->t.t_parent->t.t_nproc;
8071 // another choice of getting a team size (with 1 dynamic deference) is slower
8146 // If the team is serialized (team_size == 1), ignore the forced reduction