Lines Matching defs:thread_data

30                                    kmp_thread_data_t *thread_data);
41 // thread_data: thread data for task team thread containing stack
45 kmp_thread_data_t *thread_data,
47 kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
93 // after a thread_data structure is created.
97 // thread_data: thread data for task team thread containing stack
99 kmp_thread_data_t *thread_data) {
100 kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
115 // __kmp_free_task_stack: free the task stack when thread_data is destroyed.
118 // thread_data: thread info for thread containing stack
120 kmp_thread_data_t *thread_data) {
121 kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
151 kmp_thread_data_t *thread_data =
153 kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
210 kmp_thread_data_t *thread_data =
212 kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
299 kmp_thread_data_t *thread_data) {
300 kmp_int32 size = TASK_DEQUE_SIZE(thread_data->td);
304 "%d] for thread_data %p\n",
305 __kmp_gtid_from_thread(thread), size, new_size, thread_data));
311 for (i = thread_data->td.td_deque_head, j = 0; j < size;
312 i = (i + 1) & TASK_DEQUE_MASK(thread_data->td), j++)
313 new_deque[j] = thread_data->td.td_deque[i];
315 __kmp_free(thread_data->td.td_deque);
317 thread_data->td.td_deque_head = 0;
318 thread_data->td.td_deque_tail = size;
319 thread_data->td.td_deque = new_deque;
320 thread_data->td.td_deque_size = new_size;
329 kmp_thread_data_t *thread_data;
363 thread_data = &task_team->tt.tt_threads_data[tid];
366 if (thread_data->td.td_deque == NULL) {
367 __kmp_alloc_task_deque(thread, thread_data);
372 if (TCR_4(thread_data->td.td_deque_ntasks) >=
373 TASK_DEQUE_SIZE(thread_data->td)) {
382 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
385 __kmp_realloc_task_deque(thread, thread_data);
390 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
392 if (TCR_4(thread_data->td.td_deque_ntasks) >=
393 TASK_DEQUE_SIZE(thread_data->td)) {
397 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
404 __kmp_realloc_task_deque(thread, thread_data);
409 KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) <
410 TASK_DEQUE_SIZE(thread_data->td));
412 thread_data->td.td_deque[thread_data->td.td_deque_tail] =
415 thread_data->td.td_deque_tail =
416 (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td);
417 TCW_4(thread_data->td.td_deque_ntasks,
418 TCR_4(thread_data->td.td_deque_ntasks) + 1); // Adjust task count
422 gtid, taskdata, thread_data->td.td_deque_ntasks,
423 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
425 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
1216 kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[tid];
1218 if (thread_data->td.td_deque == NULL) {
1219 __kmp_alloc_task_deque(thread, thread_data);
2577 kmp_thread_data_t *thread_data;
2584 thread_data = &task_team->tt.tt_threads_data[__kmp_tid_from_gtid(gtid)];
2587 gtid, thread_data->td.td_deque_ntasks,
2588 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2590 if (TCR_4(thread_data->td.td_deque_ntasks) == 0) {
2594 gtid, thread_data->td.td_deque_ntasks,
2595 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2599 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
2601 if (TCR_4(thread_data->td.td_deque_ntasks) == 0) {
2602 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
2606 gtid, thread_data->td.td_deque_ntasks,
2607 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2611 tail = (thread_data->td.td_deque_tail - 1) &
2612 TASK_DEQUE_MASK(thread_data->td); // Wrap index.
2613 taskdata = thread_data->td.td_deque[tail];
2618 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
2622 gtid, thread_data->td.td_deque_ntasks,
2623 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2627 thread_data->td.td_deque_tail = tail;
2628 TCW_4(thread_data->td.td_deque_ntasks, thread_data->td.td_deque_ntasks - 1);
2630 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
2634 gtid, taskdata, thread_data->td.td_deque_ntasks,
2635 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2643 // task_team thread_data before calling this routine.
3126 kmp_thread_data_t *thread_data) {
3127 __kmp_init_bootstrap_lock(&thread_data->td.td_deque_lock);
3128 KMP_DEBUG_ASSERT(thread_data->td.td_deque == NULL);
3131 thread_data->td.td_deque_last_stolen = -1;
3133 KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) == 0);
3134 KMP_DEBUG_ASSERT(thread_data->td.td_deque_head == 0);
3135 KMP_DEBUG_ASSERT(thread_data->td.td_deque_tail == 0);
3139 ("__kmp_alloc_task_deque: T#%d allocating deque[%d] for thread_data %p\n",
3140 __kmp_gtid_from_thread(thread), INITIAL_TASK_DEQUE_SIZE, thread_data));
3144 thread_data->td.td_deque = (kmp_taskdata_t **)__kmp_allocate(
3146 thread_data->td.td_deque_size = INITIAL_TASK_DEQUE_SIZE;
3152 static void __kmp_free_task_deque(kmp_thread_data_t *thread_data) {
3153 if (thread_data->td.td_deque != NULL) {
3154 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
3155 TCW_4(thread_data->td.td_deque_ntasks, 0);
3156 __kmp_free(thread_data->td.td_deque);
3157 thread_data->td.td_deque = NULL;
3158 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
3163 if (thread_data->td.td_susp_tied_tasks.ts_entries != TASK_STACK_EMPTY) {
3164 __kmp_free_task_stack(__kmp_thread_from_gtid(gtid), thread_data);
3226 kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
3227 __kmp_init_task_stack(__kmp_gtid_from_thread(thread), thread_data);
3247 kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
3248 __kmp_init_task_stack(__kmp_gtid_from_thread(thread), thread_data);
3260 kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
3261 thread_data->td.td_thr = team->t.t_threads[i];
3263 if (thread_data->td.td_deque_last_stolen >= nthreads) {
3267 thread_data->td.td_deque_last_stolen = -1;
3630 kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[tid];
3632 if (thread_data->td.td_deque == NULL) {
3641 if (TCR_4(thread_data->td.td_deque_ntasks) >=
3642 TASK_DEQUE_SIZE(thread_data->td)) {
3650 if (TASK_DEQUE_SIZE(thread_data->td) / INITIAL_TASK_DEQUE_SIZE >= pass)
3653 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
3654 __kmp_realloc_task_deque(thread, thread_data);
3658 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
3660 if (TCR_4(thread_data->td.td_deque_ntasks) >=
3661 TASK_DEQUE_SIZE(thread_data->td)) {
3668 if (TASK_DEQUE_SIZE(thread_data->td) / INITIAL_TASK_DEQUE_SIZE >= pass)
3671 __kmp_realloc_task_deque(thread, thread_data);
3677 thread_data->td.td_deque[thread_data->td.td_deque_tail] = taskdata;
3679 thread_data->td.td_deque_tail =
3680 (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td);
3681 TCW_4(thread_data->td.td_deque_ntasks,
3682 TCR_4(thread_data->td.td_deque_ntasks) + 1);
3689 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);