• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-13-stable/contrib/llvm-project/openmp/runtime/src/

Lines Matching refs:root

85 static int __kmp_load_balance_nproc(kmp_root_t *root, int set_nproc);
233 ("__kmp_get_global_thread_id_reg: Encountered new root thread. "
775 static int __kmp_reserve_threads(kmp_root_t *root, kmp_team_t *parent_team,
781 KMP_DEBUG_ASSERT(root && parent_team);
792 new_nthreads = __kmp_load_balance_nproc(root, set_nthreads);
808 (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc);
844 (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc) >
847 (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc);
875 (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc) >
878 (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc);
910 (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc) >
914 (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc) -
957 static void __kmp_fork_team_threads(kmp_root_t *root, kmp_team_t *team,
1007 use_hot_team = team == root->r.r_hot_team;
1019 kmp_info_t *thr = __kmp_allocate_thread(root, team, i);
1400 kmp_root_t *root;
1416 /* Some systems prefer the stack for the root thread(s) to start with */
1437 root = master_th->th.th_root;
1438 master_active = root->r.r_active;
1466 (*p_hot_teams)[0].hot_team = root->r.r_hot_team;
1580 KMP_ATOMIC_INC(&root->r.r_in_parallel);
1617 KF_TRACE(10, ("__kmp_fork_call: before internal fork: root=%p, team=%p, "
1619 root, parent_team, master_th, gtid));
1621 KF_TRACE(10, ("__kmp_fork_call: after internal fork: root=%p, team=%p, "
1623 root, parent_team, master_th, gtid));
1666 (root->r.r_in_parallel && !enter_teams)) ||
1681 nthreads = __kmp_reserve_threads(root, parent_team, master_tid,
1927 // TODO: GEH - cannot do this assertion because root thread not set up as
1934 KMP_ATOMIC_INC(&root->r.r_in_parallel);
1985 team = __kmp_allocate_team(root, nthreads, nthreads,
1994 team = __kmp_allocate_team(root, nthreads, nthreads,
2015 KMP_CHECK_UPDATE(team->t.t_invoke, invoker); // TODO move to root, maybe
2093 (team == root->r.r_hot_team));
2102 KMP_DEBUG_ASSERT(team != root->r.r_hot_team ||
2104 (team->t.t_parent == root->r.r_root_team ||
2125 if (!root->r.r_active) // Only do assignment if it prevents cache ping-pong
2126 root->r.r_active = TRUE;
2128 __kmp_fork_team_threads(root, team, master_th, gtid);
2167 ("__kmp_internal_fork : root=%p, team=%p, master_th=%p, gtid=%d\n",
2168 root, team, master_th, gtid));
2182 KF_TRACE(10, ("__kmp_internal_fork : after : root=%p, team=%p, "
2184 root, team, master_th, gtid));
2265 kmp_root_t *root;
2272 root = master_th->th.th_root;
2391 KMP_ATOMIC_DEC(&root->r.r_in_parallel);
2448 KMP_ATOMIC_DEC(&root->r.r_in_parallel);
2450 KMP_DEBUG_ASSERT(root->r.r_in_parallel >= 0);
2482 if (root->r.r_active != master_active)
2483 root->r.r_active = master_active;
2485 __kmp_free_team(root, team USE_NESTED_HOT_ARG(
2501 parent_team != root->r.r_root_team) {
2502 __kmp_free_team(root,
2529 // TODO: GEH - cannot do this assertion because root thread not set up as
2587 kmp_root_t *root;
2609 root = thread->th.th_root;
2610 if (__kmp_init_parallel && (!root->r.r_active) &&
2611 (root->r.r_hot_team->t.t_nproc > new_nth)
2616 kmp_team_t *hot_team = root->r.r_hot_team;
3115 static void __kmp_initialize_root(kmp_root_t *root) {
3123 KMP_DEBUG_ASSERT(root);
3124 KMP_ASSERT(!root->r.r_begin);
3126 /* setup the root state structure */
3127 __kmp_init_lock(&root->r.r_begin_lock);
3128 root->r.r_begin = FALSE;
3129 root->r.r_active = FALSE;
3130 root->r.r_in_parallel = 0;
3131 root->r.r_blocktime = __kmp_dflt_blocktime;
3133 /* setup the root team for this task */
3134 /* allocate the root team structure */
3138 __kmp_allocate_team(root,
3142 ompt_data_none, // root parallel id
3149 // Non-NULL value should be assigned to make the debugger display the root
3156 root->r.r_root_team = root_team;
3159 /* initialize root team */
3167 ("__kmp_initialize_root: init root team %d arrived: join=%u, plain=%u\n",
3175 __kmp_allocate_team(root,
3179 ompt_data_none, // root parallel id
3187 root->r.r_hot_team = hot_team;
3340 kmp_root_t const *root = __kmp_root[gtid];
3341 if (root != NULL) {
3342 __kmp_printf("GTID %2d %p:\n", gtid, root);
3343 __kmp_print_structure_team(" Root Team: ", root->r.r_root_team);
3344 __kmp_print_structure_team(" Hot Team: ", root->r.r_hot_team);
3346 root->r.r_uber_thread);
3347 __kmp_printf(" Active?: %2d\n", root->r.r_active);
3349 KMP_ATOMIC_LD_RLX(&root->r.r_in_parallel));
3351 __kmp_print_structure_team_accum(list, root->r.r_root_team);
3352 __kmp_print_structure_team_accum(list, root->r.r_hot_team);
3440 /* reclaim array entries for root threads that are already dead, returns number
3461 entries for root threads that are already dead.
3469 After any dead root reclamation, if the clipping value allows array expansion
3471 that expansion. If not, nothing is done beyond the possible initial root
3488 /* reclaim array entries for root threads that are already dead */
3509 // 2) New foreign root(s) are encountered. We always register new foreign
3561 /* Register the current thread as a root thread and obtain our gtid. We must
3566 kmp_root_t *root;
3643 if (!(root = __kmp_root[gtid])) {
3644 root = __kmp_root[gtid] = (kmp_root_t *)__kmp_allocate(sizeof(kmp_root_t));
3645 KMP_DEBUG_ASSERT(!root->r.r_root_team);
3655 __kmp_initialize_root(root);
3657 /* setup new root thread structure */
3658 if (root->r.r_uber_thread) {
3659 root_thread = root->r.r_uber_thread;
3669 root_thread->th.th_root = root;
3684 /* setup the serial team held in reserve by the root thread */
3689 root, 1, 1,
3691 ompt_data_none, // root parallel id
3702 root->r.r_root_team->t.t_threads[0] = root_thread;
3703 root->r.r_hot_team->t.t_threads[0] = root_thread;
3707 root->r.r_uber_thread = root_thread;
3710 __kmp_initialize_info(root_thread, root->r.r_root_team, 0, gtid);
3728 gtid, __kmp_gtid_from_tid(0, root->r.r_hot_team),
3729 root->r.r_hot_team->t.t_id, 0, KMP_INIT_BARRIER_STATE,
3740 KMP_DEBUG_ASSERT(root->r.r_hot_team->t.t_bar[bs_forkjoin_barrier].b_arrived ==
3798 static int __kmp_free_hot_teams(kmp_root_t *root, kmp_info_t *thr, int level,
3812 n += __kmp_free_hot_teams(root, th, level + 1, max_level);
3819 __kmp_free_team(root, team, NULL);
3824 // Resets a root thread and clear its root and hot teams.
3826 static int __kmp_reset_root(int gtid, kmp_root_t *root) {
3827 kmp_team_t *root_team = root->r.r_root_team;
3828 kmp_team_t *hot_team = root->r.r_hot_team;
3832 KMP_DEBUG_ASSERT(!root->r.r_active);
3834 root->r.r_root_team = NULL;
3835 root->r.r_hot_team = NULL;
3838 __kmp_free_team(root, root_team USE_NESTED_HOT_ARG(NULL));
3845 n += __kmp_free_hot_teams(root, th, 1, __kmp_hot_teams_max_level);
3854 __kmp_free_team(root, hot_team USE_NESTED_HOT_ARG(NULL));
3857 // threads in the teams that had this root as ancestor have stopped trying to
3864 /* Close Handle of root duplicated in __kmp_create_worker (tr #62919) */
3868 (LPVOID) & (root->r.r_uber_thread->th),
3869 root->r.r_uber_thread->th.th_info.ds.ds_thread));
3870 __kmp_free_handle(root->r.r_uber_thread->th.th_info.ds.ds_thread);
3883 &(root->r.r_uber_thread->th.ompt_thread_info.thread_data));
3889 i = root->r.r_uber_thread->th.th_cg_roots->cg_nthreads--;
3892 root->r.r_uber_thread, root->r.r_uber_thread->th.th_cg_roots,
3893 root->r.r_uber_thread->th.th_cg_roots->cg_nthreads));
3896 KMP_DEBUG_ASSERT(root->r.r_uber_thread ==
3897 root->r.r_uber_thread->th.th_cg_roots->cg_root);
3898 KMP_DEBUG_ASSERT(root->r.r_uber_thread->th.th_cg_roots->up == NULL);
3899 __kmp_free(root->r.r_uber_thread->th.th_cg_roots);
3900 root->r.r_uber_thread->th.th_cg_roots = NULL;
3902 __kmp_reap_thread(root->r.r_uber_thread, 1);
3904 // We canot put root thread to __kmp_thread_pool, so we have to reap it
3906 root->r.r_uber_thread = NULL;
3907 /* mark root as no longer in use */
3908 root->r.r_begin = FALSE;
3926 kmp_root_t *root = __kmp_root[gtid];
3930 KMP_ASSERT(root == __kmp_threads[gtid]->th.th_root);
3931 KMP_ASSERT(root->r.r_active == FALSE);
3948 __kmp_reset_root(gtid, root);
3965 Unregisters a root thread that is not the current thread. Returns the number
3968 kmp_root_t *root = __kmp_root[gtid];
3974 KMP_ASSERT(root == __kmp_threads[gtid]->th.th_root);
3975 KMP_ASSERT(root->r.r_active == FALSE);
3977 r = __kmp_reset_root(gtid, root);
4071 if (this_thr != master && // Master's CG root is initialized elsewhere
4072 this_thr->th.th_cg_roots != master->th.th_cg_roots) { // CG root not set
4073 // Make new thread's CG root same as master's
4087 // Increment new thread's CG root's counter to add the new thread
4162 kmp_info_t *__kmp_allocate_thread(kmp_root_t *root, kmp_team_t *team,
4169 KMP_DEBUG_ASSERT(root && team);
4287 (kmp_team_t *)__kmp_allocate_team(root, 1, 1,
4289 ompt_data_none, // root parallel id
4843 __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
4853 int use_hot_team = !root->r.r_active;
4893 team = root->r.r_hot_team;
4922 root->r.r_uber_thread->th.th_ident);
4991 root->r.r_uber_thread->th.th_ident);
5077 kmp_info_t *new_worker = __kmp_allocate_thread(root, team, f);
5116 root->r.r_uber_thread->th.th_ident);
5329 void __kmp_free_team(kmp_root_t *root,
5336 KMP_DEBUG_ASSERT(root);
5341 int use_hot_team = team == root->r.r_hot_team;
5437 // See if first worker is a CG root
5441 // Clean up the CG root nodes on workers so that this team can be re-used
5446 // Pop current CG root off list
5456 // Restore current task's thread_limit from CG root
5796 tries to clean up a dead root thread's data structures, resulting in GVS
5936 /* In Win static library, we can't tell when a root actually dies, so we
5937 reclaim the data structures for any root threads that have died but not
6101 ("__kmp_internal_end_library: root still active, abort T#%d\n",
6210 ("__kmp_internal_end_thread: root still active, abort T#%d\n",
6760 // for each root thread that is currently registered with the RTL.
6811 // root thread that is currently registered with the RTL (which has not
6859 int gtid = __kmp_entry_gtid(); // this might be a new root
7061 // This thread is a new CG root. Set up the proper variables.
7063 tmp->cg_root = thr; // Make thr the CG root
7321 static int __kmp_active_hot_team_nproc(kmp_root_t *root) {
7326 if (root->r.r_active) {
7329 hot_team = root->r.r_hot_team;
7346 static int __kmp_load_balance_nproc(kmp_root_t *root, int set_nproc) {
7353 KB_TRACE(20, ("__kmp_load_balance_nproc: called root:%p set_nproc:%d\n", root,
7355 KMP_DEBUG_ASSERT(root);
7356 KMP_DEBUG_ASSERT(root->r.r_root_team->t.t_threads[0]
7366 // particular root (if we are at the outer par level), and the currently
7371 hot_team_active = __kmp_active_hot_team_nproc(root);
7389 (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc);
7535 kmp_root_t *root;
7540 root = __kmp_threads[gtid]->th.th_root;
7543 if (root->r.r_begin)
7545 __kmp_acquire_lock(&root->r.r_begin_lock, gtid);
7546 if (root->r.r_begin) {
7547 __kmp_release_lock(&root->r.r_begin_lock, gtid);
7551 root->r.r_begin = TRUE;
7553 __kmp_release_lock(&root->r.r_begin_lock, gtid);
7560 kmp_root_t *root;
7568 root = thread->th.th_root;
7572 if (root->r.r_in_parallel) { /* Must be called in serial section of top-level