1/*
2 * ompt-specific.cpp -- OMPT internal functions
3 */
4
5//===----------------------------------------------------------------------===//
6//
7// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8// See https://llvm.org/LICENSE.txt for license information.
9// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10//
11//===----------------------------------------------------------------------===//
12
13//******************************************************************************
14// include files
15//******************************************************************************
16
17#include "kmp.h"
18#include "ompt-specific.h"
19
20#if KMP_OS_UNIX
21#include <dlfcn.h>
22#endif
23
24#if KMP_OS_WINDOWS
25#define THREAD_LOCAL __declspec(thread)
26#else
27#define THREAD_LOCAL __thread
28#endif
29
30#define OMPT_WEAK_ATTRIBUTE KMP_WEAK_ATTRIBUTE
31
32//******************************************************************************
33// macros
34//******************************************************************************
35
36#define LWT_FROM_TEAM(team) (team)->t.ompt_serialized_team_info
37
38#define OMPT_THREAD_ID_BITS 16
39
40//******************************************************************************
41// private operations
42//******************************************************************************
43
44//----------------------------------------------------------
45// traverse the team and task hierarchy
46// note: __ompt_get_teaminfo and __ompt_get_task_info_object
47//       traverse the hierarchy similarly and need to be
48//       kept consistent
49//----------------------------------------------------------
50
51ompt_team_info_t *__ompt_get_teaminfo(int depth, int *size) {
52  kmp_info_t *thr = ompt_get_thread();
53
54  if (thr) {
55    kmp_team *team = thr->th.th_team;
56    if (team == NULL)
57      return NULL;
58
59    ompt_lw_taskteam_t *next_lwt = LWT_FROM_TEAM(team), *lwt = NULL;
60
61    while (depth > 0) {
62      // next lightweight team (if any)
63      if (lwt)
64        lwt = lwt->parent;
65
66      // next heavyweight team (if any) after
67      // lightweight teams are exhausted
68      if (!lwt && team) {
69        if (next_lwt) {
70          lwt = next_lwt;
71          next_lwt = NULL;
72        } else {
73          team = team->t.t_parent;
74          if (team) {
75            next_lwt = LWT_FROM_TEAM(team);
76          }
77        }
78      }
79
80      depth--;
81    }
82
83    if (lwt) {
84      // lightweight teams have one task
85      if (size)
86        *size = 1;
87
88      // return team info for lightweight team
89      return &lwt->ompt_team_info;
90    } else if (team) {
91      // extract size from heavyweight team
92      if (size)
93        *size = team->t.t_nproc;
94
95      // return team info for heavyweight team
96      return &team->t.ompt_team_info;
97    }
98  }
99
100  return NULL;
101}
102
103ompt_task_info_t *__ompt_get_task_info_object(int depth) {
104  ompt_task_info_t *info = NULL;
105  kmp_info_t *thr = ompt_get_thread();
106
107  if (thr) {
108    kmp_taskdata_t *taskdata = thr->th.th_current_task;
109    ompt_lw_taskteam_t *lwt = NULL,
110                       *next_lwt = LWT_FROM_TEAM(taskdata->td_team);
111
112    while (depth > 0) {
113      // next lightweight team (if any)
114      if (lwt)
115        lwt = lwt->parent;
116
117      // next heavyweight team (if any) after
118      // lightweight teams are exhausted
119      if (!lwt && taskdata) {
120        if (next_lwt) {
121          lwt = next_lwt;
122          next_lwt = NULL;
123        } else {
124          taskdata = taskdata->td_parent;
125          if (taskdata) {
126            next_lwt = LWT_FROM_TEAM(taskdata->td_team);
127          }
128        }
129      }
130      depth--;
131    }
132
133    if (lwt) {
134      info = &lwt->ompt_task_info;
135    } else if (taskdata) {
136      info = &taskdata->ompt_task_info;
137    }
138  }
139
140  return info;
141}
142
143ompt_task_info_t *__ompt_get_scheduling_taskinfo(int depth) {
144  ompt_task_info_t *info = NULL;
145  kmp_info_t *thr = ompt_get_thread();
146
147  if (thr) {
148    kmp_taskdata_t *taskdata = thr->th.th_current_task;
149
150    ompt_lw_taskteam_t *lwt = NULL,
151                       *next_lwt = LWT_FROM_TEAM(taskdata->td_team);
152
153    while (depth > 0) {
154      // next lightweight team (if any)
155      if (lwt)
156        lwt = lwt->parent;
157
158      // next heavyweight team (if any) after
159      // lightweight teams are exhausted
160      if (!lwt && taskdata) {
161        // first try scheduling parent (for explicit task scheduling)
162        if (taskdata->ompt_task_info.scheduling_parent) {
163          taskdata = taskdata->ompt_task_info.scheduling_parent;
164        } else if (next_lwt) {
165          lwt = next_lwt;
166          next_lwt = NULL;
167        } else {
168          // then go for implicit tasks
169          taskdata = taskdata->td_parent;
170          if (taskdata) {
171            next_lwt = LWT_FROM_TEAM(taskdata->td_team);
172          }
173        }
174      }
175      depth--;
176    }
177
178    if (lwt) {
179      info = &lwt->ompt_task_info;
180    } else if (taskdata) {
181      info = &taskdata->ompt_task_info;
182    }
183  }
184
185  return info;
186}
187
188//******************************************************************************
189// interface operations
190//******************************************************************************
191
192//----------------------------------------------------------
193// thread support
194//----------------------------------------------------------
195
196ompt_data_t *__ompt_get_thread_data_internal() {
197  if (__kmp_get_gtid() >= 0) {
198    kmp_info_t *thread = ompt_get_thread();
199    if (thread == NULL)
200      return NULL;
201    return &(thread->th.ompt_thread_info.thread_data);
202  }
203  return NULL;
204}
205
206//----------------------------------------------------------
207// state support
208//----------------------------------------------------------
209
210void __ompt_thread_assign_wait_id(void *variable) {
211  kmp_info_t *ti = ompt_get_thread();
212
213  if (ti)
214    ti->th.ompt_thread_info.wait_id = (ompt_wait_id_t)(uintptr_t)variable;
215}
216
217int __ompt_get_state_internal(ompt_wait_id_t *omp_wait_id) {
218  kmp_info_t *ti = ompt_get_thread();
219
220  if (ti) {
221    if (omp_wait_id)
222      *omp_wait_id = ti->th.ompt_thread_info.wait_id;
223    return ti->th.ompt_thread_info.state;
224  }
225  return ompt_state_undefined;
226}
227
228//----------------------------------------------------------
229// parallel region support
230//----------------------------------------------------------
231
232int __ompt_get_parallel_info_internal(int ancestor_level,
233                                      ompt_data_t **parallel_data,
234                                      int *team_size) {
235  if (__kmp_get_gtid() >= 0) {
236    ompt_team_info_t *info;
237    if (team_size) {
238      info = __ompt_get_teaminfo(ancestor_level, team_size);
239    } else {
240      info = __ompt_get_teaminfo(ancestor_level, NULL);
241    }
242    if (parallel_data) {
243      *parallel_data = info ? &(info->parallel_data) : NULL;
244    }
245    return info ? 2 : 0;
246  } else {
247    return 0;
248  }
249}
250
251//----------------------------------------------------------
252// lightweight task team support
253//----------------------------------------------------------
254
255void __ompt_lw_taskteam_init(ompt_lw_taskteam_t *lwt, kmp_info_t *thr, int gtid,
256                             ompt_data_t *ompt_pid, void *codeptr) {
257  // initialize parallel_data with input, return address to parallel_data on
258  // exit
259  lwt->ompt_team_info.parallel_data = *ompt_pid;
260  lwt->ompt_team_info.master_return_address = codeptr;
261  lwt->ompt_task_info.task_data.value = 0;
262  lwt->ompt_task_info.frame.enter_frame = ompt_data_none;
263  lwt->ompt_task_info.frame.exit_frame = ompt_data_none;
264  lwt->ompt_task_info.scheduling_parent = NULL;
265  lwt->ompt_task_info.deps = NULL;
266  lwt->ompt_task_info.ndeps = 0;
267  lwt->heap = 0;
268  lwt->parent = 0;
269}
270
271void __ompt_lw_taskteam_link(ompt_lw_taskteam_t *lwt, kmp_info_t *thr,
272                             int on_heap, bool always) {
273  ompt_lw_taskteam_t *link_lwt = lwt;
274  if (always ||
275      thr->th.th_team->t.t_serialized >
276          1) { // we already have a team, so link the new team and swap values
277    if (on_heap) { // the lw_taskteam cannot stay on stack, allocate it on heap
278      link_lwt =
279          (ompt_lw_taskteam_t *)__kmp_allocate(sizeof(ompt_lw_taskteam_t));
280    }
281    link_lwt->heap = on_heap;
282
283    // would be swap in the (on_stack) case.
284    ompt_team_info_t tmp_team = lwt->ompt_team_info;
285    link_lwt->ompt_team_info = *OMPT_CUR_TEAM_INFO(thr);
286    *OMPT_CUR_TEAM_INFO(thr) = tmp_team;
287
288    ompt_task_info_t tmp_task = lwt->ompt_task_info;
289    link_lwt->ompt_task_info = *OMPT_CUR_TASK_INFO(thr);
290    *OMPT_CUR_TASK_INFO(thr) = tmp_task;
291
292    // link the taskteam into the list of taskteams:
293    ompt_lw_taskteam_t *my_parent =
294        thr->th.th_team->t.ompt_serialized_team_info;
295    link_lwt->parent = my_parent;
296    thr->th.th_team->t.ompt_serialized_team_info = link_lwt;
297  } else {
298    // this is the first serialized team, so we just store the values in the
299    // team and drop the taskteam-object
300    *OMPT_CUR_TEAM_INFO(thr) = lwt->ompt_team_info;
301    *OMPT_CUR_TASK_INFO(thr) = lwt->ompt_task_info;
302  }
303}
304
305void __ompt_lw_taskteam_unlink(kmp_info_t *thr) {
306  ompt_lw_taskteam_t *lwtask = thr->th.th_team->t.ompt_serialized_team_info;
307  if (lwtask) {
308    thr->th.th_team->t.ompt_serialized_team_info = lwtask->parent;
309
310    ompt_team_info_t tmp_team = lwtask->ompt_team_info;
311    lwtask->ompt_team_info = *OMPT_CUR_TEAM_INFO(thr);
312    *OMPT_CUR_TEAM_INFO(thr) = tmp_team;
313
314    ompt_task_info_t tmp_task = lwtask->ompt_task_info;
315    lwtask->ompt_task_info = *OMPT_CUR_TASK_INFO(thr);
316    *OMPT_CUR_TASK_INFO(thr) = tmp_task;
317
318    if (lwtask->heap) {
319      __kmp_free(lwtask);
320      lwtask = NULL;
321    }
322  }
323  //    return lwtask;
324}
325
326//----------------------------------------------------------
327// task support
328//----------------------------------------------------------
329
330int __ompt_get_task_info_internal(int ancestor_level, int *type,
331                                  ompt_data_t **task_data,
332                                  ompt_frame_t **task_frame,
333                                  ompt_data_t **parallel_data,
334                                  int *thread_num) {
335  if (__kmp_get_gtid() < 0)
336    return 0;
337
338  if (ancestor_level < 0)
339    return 0;
340
341  // copied from __ompt_get_scheduling_taskinfo
342  ompt_task_info_t *info = NULL;
343  ompt_team_info_t *team_info = NULL;
344  kmp_info_t *thr = ompt_get_thread();
345  int level = ancestor_level;
346
347  if (thr) {
348    kmp_taskdata_t *taskdata = thr->th.th_current_task;
349    if (taskdata == NULL)
350      return 0;
351    kmp_team *team = thr->th.th_team, *prev_team = NULL;
352    if (team == NULL)
353      return 0;
354    ompt_lw_taskteam_t *lwt = NULL,
355                       *next_lwt = LWT_FROM_TEAM(taskdata->td_team),
356                       *prev_lwt = NULL;
357
358    while (ancestor_level > 0) {
359      // needed for thread_num
360      prev_team = team;
361      prev_lwt = lwt;
362      // next lightweight team (if any)
363      if (lwt)
364        lwt = lwt->parent;
365
366      // next heavyweight team (if any) after
367      // lightweight teams are exhausted
368      if (!lwt && taskdata) {
369        // first try scheduling parent (for explicit task scheduling)
370        if (taskdata->ompt_task_info.scheduling_parent) {
371          taskdata = taskdata->ompt_task_info.scheduling_parent;
372        } else if (next_lwt) {
373          lwt = next_lwt;
374          next_lwt = NULL;
375        } else {
376          // then go for implicit tasks
377          taskdata = taskdata->td_parent;
378          if (team == NULL)
379            return 0;
380          team = team->t.t_parent;
381          if (taskdata) {
382            next_lwt = LWT_FROM_TEAM(taskdata->td_team);
383          }
384        }
385      }
386      ancestor_level--;
387    }
388
389    if (lwt) {
390      info = &lwt->ompt_task_info;
391      team_info = &lwt->ompt_team_info;
392      if (type) {
393        *type = ompt_task_implicit;
394      }
395    } else if (taskdata) {
396      info = &taskdata->ompt_task_info;
397      team_info = &team->t.ompt_team_info;
398      if (type) {
399        if (taskdata->td_parent) {
400          *type = (taskdata->td_flags.tasktype ? ompt_task_explicit
401                                               : ompt_task_implicit) |
402                  TASK_TYPE_DETAILS_FORMAT(taskdata);
403        } else {
404          *type = ompt_task_initial;
405        }
406      }
407    }
408    if (task_data) {
409      *task_data = info ? &info->task_data : NULL;
410    }
411    if (task_frame) {
412      // OpenMP spec asks for the scheduling task to be returned.
413      *task_frame = info ? &info->frame : NULL;
414    }
415    if (parallel_data) {
416      *parallel_data = team_info ? &(team_info->parallel_data) : NULL;
417    }
418    if (thread_num) {
419      if (level == 0)
420        *thread_num = __kmp_get_tid();
421      else if (prev_lwt)
422        *thread_num = 0;
423      else
424        *thread_num = prev_team->t.t_master_tid;
425      //        *thread_num = team->t.t_master_tid;
426    }
427    return info ? 2 : 0;
428  }
429  return 0;
430}
431
432int __ompt_get_task_memory_internal(void **addr, size_t *size, int blocknum) {
433  if (blocknum != 0)
434    return 0; // support only a single block
435
436  kmp_info_t *thr = ompt_get_thread();
437  if (!thr)
438    return 0;
439
440  kmp_taskdata_t *taskdata = thr->th.th_current_task;
441  kmp_task_t *task = KMP_TASKDATA_TO_TASK(taskdata);
442
443  if (taskdata->td_flags.tasktype != TASK_EXPLICIT)
444    return 0; // support only explicit task
445
446  void *ret_addr;
447  int64_t ret_size = taskdata->td_size_alloc - sizeof(kmp_taskdata_t);
448
449  // kmp_task_t->data1 is an optional member
450  if (taskdata->td_flags.destructors_thunk)
451    ret_addr = &task->data1 + 1;
452  else
453    ret_addr = &task->part_id + 1;
454
455  ret_size -= (char *)(ret_addr) - (char *)(task);
456  if (ret_size < 0)
457    return 0;
458
459  *addr = ret_addr;
460  *size = ret_size;
461  return 1;
462}
463
464//----------------------------------------------------------
465// team support
466//----------------------------------------------------------
467
468void __ompt_team_assign_id(kmp_team_t *team, ompt_data_t ompt_pid) {
469  team->t.ompt_team_info.parallel_data = ompt_pid;
470}
471
472//----------------------------------------------------------
473// misc
474//----------------------------------------------------------
475
476static uint64_t __ompt_get_unique_id_internal() {
477  static uint64_t thread = 1;
478  static THREAD_LOCAL uint64_t ID = 0;
479  if (ID == 0) {
480    uint64_t new_thread = KMP_TEST_THEN_INC64((kmp_int64 *)&thread);
481    ID = new_thread << (sizeof(uint64_t) * 8 - OMPT_THREAD_ID_BITS);
482  }
483  return ++ID;
484}
485
486ompt_sync_region_t __ompt_get_barrier_kind(enum barrier_type bt,
487                                           kmp_info_t *thr) {
488  if (bt == bs_forkjoin_barrier)
489    return ompt_sync_region_barrier_implicit;
490
491  if (bt != bs_plain_barrier)
492    return ompt_sync_region_barrier_implementation;
493
494  if (!thr->th.th_ident)
495    return ompt_sync_region_barrier;
496
497  kmp_int32 flags = thr->th.th_ident->flags;
498
499  if ((flags & KMP_IDENT_BARRIER_EXPL) != 0)
500    return ompt_sync_region_barrier_explicit;
501
502  if ((flags & KMP_IDENT_BARRIER_IMPL) != 0)
503    return ompt_sync_region_barrier_implicit;
504
505  return ompt_sync_region_barrier_implementation;
506}
507