Lines Matching refs:thread_data

36                                    kmp_thread_data_t *thread_data);
51 // thread_data: thread data for task team thread containing stack
55 kmp_thread_data_t *thread_data,
57 kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
103 // after a thread_data structure is created.
107 // thread_data: thread data for task team thread containing stack
109 kmp_thread_data_t *thread_data) {
110 kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
125 // __kmp_free_task_stack: free the task stack when thread_data is destroyed.
128 // thread_data: thread info for thread containing stack
130 kmp_thread_data_t *thread_data) {
131 kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
161 kmp_thread_data_t *thread_data =
163 kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
220 kmp_thread_data_t *thread_data =
222 kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
313 kmp_thread_data_t *thread_data) {
314 kmp_int32 size = TASK_DEQUE_SIZE(thread_data->td);
315 KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) == size);
319 "%d] for thread_data %p\n",
320 __kmp_gtid_from_thread(thread), size, new_size, thread_data));
326 for (i = thread_data->td.td_deque_head, j = 0; j < size;
327 i = (i + 1) & TASK_DEQUE_MASK(thread_data->td), j++)
328 new_deque[j] = thread_data->td.td_deque[i];
330 __kmp_free(thread_data->td.td_deque);
332 thread_data->td.td_deque_head = 0;
333 thread_data->td.td_deque_tail = size;
334 thread_data->td.td_deque = new_deque;
335 thread_data->td.td_deque_size = new_size;
340 kmp_thread_data_t *thread_data = &l->td;
341 __kmp_init_bootstrap_lock(&thread_data->td.td_deque_lock);
342 thread_data->td.td_deque_last_stolen = -1;
344 "for thread_data %p\n",
345 __kmp_get_gtid(), INITIAL_TASK_DEQUE_SIZE, thread_data));
346 thread_data->td.td_deque = (kmp_taskdata_t **)__kmp_allocate(
348 thread_data->td.td_deque_size = INITIAL_TASK_DEQUE_SIZE;
359 kmp_thread_data_t *thread_data;
363 thread_data = &lst->td;
368 thread_data = &list->td;
382 thread_data = &list->td;
388 thread_data = &next_queue->td;
392 thread_data = &list->td;
398 return thread_data;
406 kmp_thread_data_t *thread_data = NULL;
418 thread_data = &list->td;
423 // Other thread initialized a queue. Check if it fits and get thread_data.
424 thread_data = __kmp_get_priority_deque_data(task_team, pri);
430 thread_data = &lst->td;
433 thread_data = __kmp_get_priority_deque_data(task_team, pri);
437 KMP_DEBUG_ASSERT(thread_data);
439 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
441 if (TCR_4(thread_data->td.td_deque_ntasks) >=
442 TASK_DEQUE_SIZE(thread_data->td)) {
446 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
453 __kmp_realloc_task_deque(thread, thread_data);
456 KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) <
457 TASK_DEQUE_SIZE(thread_data->td));
459 thread_data->td.td_deque[thread_data->td.td_deque_tail] = taskdata;
461 thread_data->td.td_deque_tail =
462 (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td);
463 TCW_4(thread_data->td.td_deque_ntasks,
464 TCR_4(thread_data->td.td_deque_ntasks) + 1); // Adjust task count
469 gtid, taskdata, thread_data->td.td_deque_ntasks,
470 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
471 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
495 kmp_thread_data_t *thread_data;
535 thread_data = &task_team->tt.tt_threads_data[tid];
540 if (UNLIKELY(thread_data->td.td_deque == NULL)) {
541 __kmp_alloc_task_deque(thread, thread_data);
546 if (TCR_4(thread_data->td.td_deque_ntasks) >=
547 TASK_DEQUE_SIZE(thread_data->td)) {
556 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
558 if (TCR_4(thread_data->td.td_deque_ntasks) >=
559 TASK_DEQUE_SIZE(thread_data->td)) {
561 __kmp_realloc_task_deque(thread, thread_data);
567 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
569 if (TCR_4(thread_data->td.td_deque_ntasks) >=
570 TASK_DEQUE_SIZE(thread_data->td)) {
574 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
581 __kmp_realloc_task_deque(thread, thread_data);
586 KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) <
587 TASK_DEQUE_SIZE(thread_data->td));
589 thread_data->td.td_deque[thread_data->td.td_deque_tail] =
592 thread_data->td.td_deque_tail =
593 (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td);
594 TCW_4(thread_data->td.td_deque_ntasks,
595 TCR_4(thread_data->td.td_deque_ntasks) + 1); // Adjust task count
600 gtid, taskdata, thread_data->td.td_deque_ntasks,
601 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
603 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
1527 kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[tid];
1529 if (thread_data->td.td_deque == NULL) {
1530 __kmp_alloc_task_deque(thread, thread_data);
3055 kmp_thread_data_t *thread_data;
3079 thread_data = &list->td;
3080 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
3081 deque_ntasks = thread_data->td.td_deque_ntasks;
3083 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
3085 __kmp_get_gtid(), thread_data));
3090 int target = thread_data->td.td_deque_head;
3092 taskdata = thread_data->td.td_deque[target];
3095 thread_data->td.td_deque_head =
3096 (target + 1) & TASK_DEQUE_MASK(thread_data->td);
3100 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
3103 gtid, thread_data, task_team, deque_ntasks, target,
3104 thread_data->td.td_deque_tail));
3112 target = (target + 1) & TASK_DEQUE_MASK(thread_data->td);
3113 taskdata = thread_data->td.td_deque[target];
3122 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
3126 gtid, thread_data, task_team, deque_ntasks,
3127 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
3134 target = (target + 1) & TASK_DEQUE_MASK(thread_data->td);
3135 thread_data->td.td_deque[prev] = thread_data->td.td_deque[target];
3139 thread_data->td.td_deque_tail ==
3140 (kmp_uint32)((target + 1) & TASK_DEQUE_MASK(thread_data->td)));
3141 thread_data->td.td_deque_tail = target; // tail -= 1 (wrapped))
3143 thread_data->td.td_deque_ntasks = deque_ntasks - 1;
3144 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
3155 kmp_thread_data_t *thread_data;
3162 thread_data = &task_team->tt.tt_threads_data[__kmp_tid_from_gtid(gtid)];
3165 gtid, thread_data->td.td_deque_ntasks,
3166 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
3168 if (TCR_4(thread_data->td.td_deque_ntasks) == 0) {
3172 gtid, thread_data->td.td_deque_ntasks,
3173 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
3177 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
3179 if (TCR_4(thread_data->td.td_deque_ntasks) == 0) {
3180 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
3184 gtid, thread_data->td.td_deque_ntasks,
3185 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
3189 tail = (thread_data->td.td_deque_tail - 1) &
3190 TASK_DEQUE_MASK(thread_data->td); // Wrap index.
3191 taskdata = thread_data->td.td_deque[tail];
3196 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
3200 gtid, thread_data->td.td_deque_ntasks,
3201 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
3205 thread_data->td.td_deque_tail = tail;
3206 TCW_4(thread_data->td.td_deque_ntasks, thread_data->td.td_deque_ntasks - 1);
3208 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
3212 gtid, taskdata, thread_data->td.td_deque_ntasks,
3213 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
3221 // task_team thread_data before calling this routine.
3758 kmp_thread_data_t *thread_data) {
3759 __kmp_init_bootstrap_lock(&thread_data->td.td_deque_lock);
3760 KMP_DEBUG_ASSERT(thread_data->td.td_deque == NULL);
3763 thread_data->td.td_deque_last_stolen = -1;
3765 KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) == 0);
3766 KMP_DEBUG_ASSERT(thread_data->td.td_deque_head == 0);
3767 KMP_DEBUG_ASSERT(thread_data->td.td_deque_tail == 0);
3771 ("__kmp_alloc_task_deque: T#%d allocating deque[%d] for thread_data %p\n",
3772 __kmp_gtid_from_thread(thread), INITIAL_TASK_DEQUE_SIZE, thread_data));
3776 thread_data->td.td_deque = (kmp_taskdata_t **)__kmp_allocate(
3778 thread_data->td.td_deque_size = INITIAL_TASK_DEQUE_SIZE;
3784 static void __kmp_free_task_deque(kmp_thread_data_t *thread_data) {
3785 if (thread_data->td.td_deque != NULL) {
3786 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
3787 TCW_4(thread_data->td.td_deque_ntasks, 0);
3788 __kmp_free(thread_data->td.td_deque);
3789 thread_data->td.td_deque = NULL;
3790 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
3795 if (thread_data->td.td_susp_tied_tasks.ts_entries != TASK_STACK_EMPTY) {
3796 __kmp_free_task_stack(__kmp_thread_from_gtid(gtid), thread_data);
3858 kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
3859 __kmp_init_task_stack(__kmp_gtid_from_thread(thread), thread_data);
3877 kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
3878 __kmp_init_task_stack(__kmp_gtid_from_thread(thread), thread_data);
3890 kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
3891 thread_data->td.td_thr = team->t.t_threads[i];
3893 if (thread_data->td.td_deque_last_stolen >= nthreads) {
3897 thread_data->td.td_deque_last_stolen = -1;
4234 kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[j];
4235 if (thread_data->td.td_deque == NULL) {
4236 __kmp_alloc_task_deque(__kmp_hidden_helper_threads[j], thread_data);
4365 kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[tid];
4367 if (thread_data->td.td_deque == NULL) {
4376 if (TCR_4(thread_data->td.td_deque_ntasks) >=
4377 TASK_DEQUE_SIZE(thread_data->td)) {
4385 if (TASK_DEQUE_SIZE(thread_data->td) / INITIAL_TASK_DEQUE_SIZE >= pass)
4388 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
4389 if (TCR_4(thread_data->td.td_deque_ntasks) >=
4390 TASK_DEQUE_SIZE(thread_data->td)) {
4392 __kmp_realloc_task_deque(thread, thread_data);
4397 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
4399 if (TCR_4(thread_data->td.td_deque_ntasks) >=
4400 TASK_DEQUE_SIZE(thread_data->td)) {
4407 if (TASK_DEQUE_SIZE(thread_data->td) / INITIAL_TASK_DEQUE_SIZE >= pass)
4410 __kmp_realloc_task_deque(thread, thread_data);
4416 thread_data->td.td_deque[thread_data->td.td_deque_tail] = taskdata;
4418 thread_data->td.td_deque_tail =
4419 (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td);
4420 TCW_4(thread_data->td.td_deque_ntasks,
4421 TCR_4(thread_data->td.td_deque_ntasks) + 1);
4428 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);