Lines Matching refs:nthreads

1204           1); // 1 indicates setup the current team regardless of nthreads
2793 kmp_int32 nthreads, victim_tid = -2, use_own_tasks = 1, new_victim = 0,
2810 nthreads = task_team->tt.tt_nproc;
2812 KMP_DEBUG_ASSERT(nthreads > 1 || task_team->tt.tt_found_proxy_tasks);
2822 if ((task == NULL) && (nthreads > 1)) { // Steal a task
2840 victim_tid = __kmp_get_random(thread) % (nthreads - 1);
2979 if (nthreads == 1)
3022 int nthreads, i, is_init_thread;
3030 nthreads = task_team->tt.tt_nproc;
3031 KMP_DEBUG_ASSERT(nthreads > 0);
3032 KMP_DEBUG_ASSERT(nthreads == this_thr->th.th_team->t.t_nproc);
3053 for (i = 0; i < nthreads; i++) {
3179 kmp_int32 nthreads, maxthreads;
3188 nthreads = task_team->tt.tt_nproc;
3202 if (maxthreads < nthreads) {
3212 __kmp_gtid_from_thread(thread), task_team, nthreads, maxthreads));
3218 nthreads * sizeof(kmp_thread_data_t));
3220 KMP_MEMCPY_S((void *)new_data, nthreads * sizeof(kmp_thread_data_t),
3225 for (i = maxthreads; i < nthreads; i++) {
3236 __kmp_gtid_from_thread(thread), task_team, nthreads));
3242 nthreads * sizeof(kmp_thread_data_t));
3246 for (i = 0; i < nthreads; i++) {
3252 task_team->tt.tt_max_threads = nthreads;
3259 for (i = 0; i < nthreads; i++) {
3263 if (thread_data->td.td_deque_last_stolen >= nthreads) {
3302 int nthreads;
3335 task_team->tt.tt_nproc = nthreads = team->t.t_nproc;
3337 KMP_ATOMIC_ST_REL(&task_team->tt.tt_unfinished_threads, nthreads);
3804 kmp_int32 nthreads = team->t.t_nproc;
3807 // This should be similar to start_k = __kmp_get_random( thread ) % nthreads
3816 k = (k + 1) % nthreads;