1345153Sdim/*
2345153Sdim * kmp_error.cpp -- KPTS functions for error checking at runtime
3345153Sdim */
4345153Sdim
5345153Sdim//===----------------------------------------------------------------------===//
6345153Sdim//
7353358Sdim// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8353358Sdim// See https://llvm.org/LICENSE.txt for license information.
9353358Sdim// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10345153Sdim//
11345153Sdim//===----------------------------------------------------------------------===//
12345153Sdim
13345153Sdim#include "kmp.h"
14345153Sdim#include "kmp_error.h"
15345153Sdim#include "kmp_i18n.h"
16345153Sdim#include "kmp_str.h"
17345153Sdim
18345153Sdim/* ------------------------------------------------------------------------ */
19345153Sdim
20345153Sdim#define MIN_STACK 100
21345153Sdim
22345153Sdimstatic char const *cons_text_c[] = {
23345153Sdim    "(none)", "\"parallel\"", "work-sharing", /* this is not called "for"
24345153Sdim                                                 because of lowering of
25345153Sdim                                                 "sections" pragmas */
26345153Sdim    "\"ordered\" work-sharing", /* this is not called "for ordered" because of
27345153Sdim                                   lowering of "sections" pragmas */
28345153Sdim    "\"sections\"",
29345153Sdim    "work-sharing", /* this is not called "single" because of lowering of
30345153Sdim                       "sections" pragmas */
31353358Sdim    "\"critical\"", "\"ordered\"", /* in PARALLEL */
32345153Sdim    "\"ordered\"", /* in PDO */
33345153Sdim    "\"master\"", "\"reduce\"", "\"barrier\""};
34345153Sdim
35345153Sdim#define get_src(ident) ((ident) == NULL ? NULL : (ident)->psource)
36345153Sdim
37345153Sdim#define PUSH_MSG(ct, ident)                                                    \
38345153Sdim  "\tpushing on stack: %s (%s)\n", cons_text_c[(ct)], get_src((ident))
39345153Sdim#define POP_MSG(p)                                                             \
40345153Sdim  "\tpopping off stack: %s (%s)\n", cons_text_c[(p)->stack_data[tos].type],    \
41345153Sdim      get_src((p)->stack_data[tos].ident)
42345153Sdim
43345153Sdimstatic int const cons_text_c_num = sizeof(cons_text_c) / sizeof(char const *);
44345153Sdim
45345153Sdim/* --------------- START OF STATIC LOCAL ROUTINES ------------------------- */
46345153Sdim
47345153Sdimstatic void __kmp_check_null_func(void) { /* nothing to do */
48345153Sdim}
49345153Sdim
50345153Sdimstatic void __kmp_expand_cons_stack(int gtid, struct cons_header *p) {
51345153Sdim  int i;
52345153Sdim  struct cons_data *d;
53345153Sdim
54345153Sdim  /* TODO for monitor perhaps? */
55345153Sdim  if (gtid < 0)
56345153Sdim    __kmp_check_null_func();
57345153Sdim
58345153Sdim  KE_TRACE(10, ("expand cons_stack (%d %d)\n", gtid, __kmp_get_gtid()));
59345153Sdim
60345153Sdim  d = p->stack_data;
61345153Sdim
62345153Sdim  p->stack_size = (p->stack_size * 2) + 100;
63345153Sdim
64345153Sdim  /* TODO free the old data */
65345153Sdim  p->stack_data = (struct cons_data *)__kmp_allocate(sizeof(struct cons_data) *
66345153Sdim                                                     (p->stack_size + 1));
67345153Sdim
68345153Sdim  for (i = p->stack_top; i >= 0; --i)
69345153Sdim    p->stack_data[i] = d[i];
70345153Sdim
71345153Sdim  /* NOTE: we do not free the old stack_data */
72345153Sdim}
73345153Sdim
74345153Sdim// NOTE: Function returns allocated memory, caller must free it!
75345153Sdimstatic char *__kmp_pragma(int ct, ident_t const *ident) {
76345153Sdim  char const *cons = NULL; // Construct name.
77345153Sdim  char *file = NULL; // File name.
78345153Sdim  char *func = NULL; // Function (routine) name.
79345153Sdim  char *line = NULL; // Line number.
80345153Sdim  kmp_str_buf_t buffer;
81345153Sdim  kmp_msg_t prgm;
82345153Sdim  __kmp_str_buf_init(&buffer);
83345153Sdim  if (0 < ct && ct < cons_text_c_num) {
84345153Sdim    cons = cons_text_c[ct];
85345153Sdim  } else {
86345153Sdim    KMP_DEBUG_ASSERT(0);
87345153Sdim  }
88345153Sdim  if (ident != NULL && ident->psource != NULL) {
89345153Sdim    char *tail = NULL;
90345153Sdim    __kmp_str_buf_print(&buffer, "%s",
91345153Sdim                        ident->psource); // Copy source to buffer.
92345153Sdim    // Split string in buffer to file, func, and line.
93345153Sdim    tail = buffer.str;
94345153Sdim    __kmp_str_split(tail, ';', NULL, &tail);
95345153Sdim    __kmp_str_split(tail, ';', &file, &tail);
96345153Sdim    __kmp_str_split(tail, ';', &func, &tail);
97345153Sdim    __kmp_str_split(tail, ';', &line, &tail);
98345153Sdim  }
99345153Sdim  prgm = __kmp_msg_format(kmp_i18n_fmt_Pragma, cons, file, func, line);
100345153Sdim  __kmp_str_buf_free(&buffer);
101345153Sdim  return prgm.str;
102345153Sdim} // __kmp_pragma
103345153Sdim
104345153Sdim/* ----------------- END OF STATIC LOCAL ROUTINES ------------------------- */
105345153Sdim
106345153Sdimvoid __kmp_error_construct(kmp_i18n_id_t id, // Message identifier.
107345153Sdim                           enum cons_type ct, // Construct type.
108345153Sdim                           ident_t const *ident // Construct ident.
109345153Sdim                           ) {
110345153Sdim  char *construct = __kmp_pragma(ct, ident);
111345153Sdim  __kmp_fatal(__kmp_msg_format(id, construct), __kmp_msg_null);
112345153Sdim  KMP_INTERNAL_FREE(construct);
113345153Sdim}
114345153Sdim
115345153Sdimvoid __kmp_error_construct2(kmp_i18n_id_t id, // Message identifier.
116345153Sdim                            enum cons_type ct, // First construct type.
117345153Sdim                            ident_t const *ident, // First construct ident.
118345153Sdim                            struct cons_data const *cons // Second construct.
119345153Sdim                            ) {
120345153Sdim  char *construct1 = __kmp_pragma(ct, ident);
121345153Sdim  char *construct2 = __kmp_pragma(cons->type, cons->ident);
122345153Sdim  __kmp_fatal(__kmp_msg_format(id, construct1, construct2), __kmp_msg_null);
123345153Sdim  KMP_INTERNAL_FREE(construct1);
124345153Sdim  KMP_INTERNAL_FREE(construct2);
125345153Sdim}
126345153Sdim
127345153Sdimstruct cons_header *__kmp_allocate_cons_stack(int gtid) {
128345153Sdim  struct cons_header *p;
129345153Sdim
130345153Sdim  /* TODO for monitor perhaps? */
131345153Sdim  if (gtid < 0) {
132345153Sdim    __kmp_check_null_func();
133345153Sdim  }
134345153Sdim  KE_TRACE(10, ("allocate cons_stack (%d)\n", gtid));
135345153Sdim  p = (struct cons_header *)__kmp_allocate(sizeof(struct cons_header));
136345153Sdim  p->p_top = p->w_top = p->s_top = 0;
137345153Sdim  p->stack_data = (struct cons_data *)__kmp_allocate(sizeof(struct cons_data) *
138345153Sdim                                                     (MIN_STACK + 1));
139345153Sdim  p->stack_size = MIN_STACK;
140345153Sdim  p->stack_top = 0;
141345153Sdim  p->stack_data[0].type = ct_none;
142345153Sdim  p->stack_data[0].prev = 0;
143345153Sdim  p->stack_data[0].ident = NULL;
144345153Sdim  return p;
145345153Sdim}
146345153Sdim
147345153Sdimvoid __kmp_free_cons_stack(void *ptr) {
148345153Sdim  struct cons_header *p = (struct cons_header *)ptr;
149345153Sdim  if (p != NULL) {
150345153Sdim    if (p->stack_data != NULL) {
151345153Sdim      __kmp_free(p->stack_data);
152345153Sdim      p->stack_data = NULL;
153345153Sdim    }
154345153Sdim    __kmp_free(p);
155345153Sdim  }
156345153Sdim}
157345153Sdim
158345153Sdim#if KMP_DEBUG
159345153Sdimstatic void dump_cons_stack(int gtid, struct cons_header *p) {
160345153Sdim  int i;
161345153Sdim  int tos = p->stack_top;
162345153Sdim  kmp_str_buf_t buffer;
163345153Sdim  __kmp_str_buf_init(&buffer);
164345153Sdim  __kmp_str_buf_print(
165345153Sdim      &buffer,
166345153Sdim      "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n");
167345153Sdim  __kmp_str_buf_print(&buffer,
168345153Sdim                      "Begin construct stack with %d items for thread %d\n",
169345153Sdim                      tos, gtid);
170345153Sdim  __kmp_str_buf_print(&buffer, "     stack_top=%d { P=%d, W=%d, S=%d }\n", tos,
171345153Sdim                      p->p_top, p->w_top, p->s_top);
172345153Sdim  for (i = tos; i > 0; i--) {
173345153Sdim    struct cons_data *c = &(p->stack_data[i]);
174345153Sdim    __kmp_str_buf_print(
175345153Sdim        &buffer, "        stack_data[%2d] = { %s (%s) %d %p }\n", i,
176345153Sdim        cons_text_c[c->type], get_src(c->ident), c->prev, c->name);
177345153Sdim  }
178345153Sdim  __kmp_str_buf_print(&buffer, "End construct stack for thread %d\n", gtid);
179345153Sdim  __kmp_str_buf_print(
180345153Sdim      &buffer,
181345153Sdim      "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n");
182345153Sdim  __kmp_debug_printf("%s", buffer.str);
183345153Sdim  __kmp_str_buf_free(&buffer);
184345153Sdim}
185345153Sdim#endif
186345153Sdim
187345153Sdimvoid __kmp_push_parallel(int gtid, ident_t const *ident) {
188345153Sdim  int tos;
189345153Sdim  struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
190345153Sdim
191345153Sdim  KMP_DEBUG_ASSERT(__kmp_threads[gtid]->th.th_cons);
192345153Sdim  KE_TRACE(10, ("__kmp_push_parallel (%d %d)\n", gtid, __kmp_get_gtid()));
193345153Sdim  KE_TRACE(100, (PUSH_MSG(ct_parallel, ident)));
194345153Sdim  if (p->stack_top >= p->stack_size) {
195345153Sdim    __kmp_expand_cons_stack(gtid, p);
196345153Sdim  }
197345153Sdim  tos = ++p->stack_top;
198345153Sdim  p->stack_data[tos].type = ct_parallel;
199345153Sdim  p->stack_data[tos].prev = p->p_top;
200345153Sdim  p->stack_data[tos].ident = ident;
201345153Sdim  p->stack_data[tos].name = NULL;
202345153Sdim  p->p_top = tos;
203345153Sdim  KE_DUMP(1000, dump_cons_stack(gtid, p));
204345153Sdim}
205345153Sdim
206345153Sdimvoid __kmp_check_workshare(int gtid, enum cons_type ct, ident_t const *ident) {
207345153Sdim  struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
208345153Sdim
209345153Sdim  KMP_DEBUG_ASSERT(__kmp_threads[gtid]->th.th_cons);
210345153Sdim  KE_TRACE(10, ("__kmp_check_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
211345153Sdim
212345153Sdim  if (p->stack_top >= p->stack_size) {
213345153Sdim    __kmp_expand_cons_stack(gtid, p);
214345153Sdim  }
215353358Sdim  if (p->w_top > p->p_top) {
216345153Sdim    // We are already in a WORKSHARE construct for this PARALLEL region.
217345153Sdim    __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
218345153Sdim                           &p->stack_data[p->w_top]);
219345153Sdim  }
220345153Sdim  if (p->s_top > p->p_top) {
221345153Sdim    // We are already in a SYNC construct for this PARALLEL region.
222345153Sdim    __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
223345153Sdim                           &p->stack_data[p->s_top]);
224345153Sdim  }
225345153Sdim}
226345153Sdim
227345153Sdimvoid __kmp_push_workshare(int gtid, enum cons_type ct, ident_t const *ident) {
228345153Sdim  int tos;
229345153Sdim  struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
230345153Sdim  KE_TRACE(10, ("__kmp_push_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
231345153Sdim  __kmp_check_workshare(gtid, ct, ident);
232345153Sdim  KE_TRACE(100, (PUSH_MSG(ct, ident)));
233345153Sdim  tos = ++p->stack_top;
234345153Sdim  p->stack_data[tos].type = ct;
235345153Sdim  p->stack_data[tos].prev = p->w_top;
236345153Sdim  p->stack_data[tos].ident = ident;
237345153Sdim  p->stack_data[tos].name = NULL;
238345153Sdim  p->w_top = tos;
239345153Sdim  KE_DUMP(1000, dump_cons_stack(gtid, p));
240345153Sdim}
241345153Sdim
242345153Sdimvoid
243345153Sdim#if KMP_USE_DYNAMIC_LOCK
244345153Sdim__kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
245345153Sdim#else
246345153Sdim__kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
247345153Sdim#endif
248345153Sdim{
249345153Sdim  struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
250345153Sdim
251345153Sdim  KE_TRACE(10, ("__kmp_check_sync (gtid=%d)\n", __kmp_get_gtid()));
252345153Sdim
253345153Sdim  if (p->stack_top >= p->stack_size)
254345153Sdim    __kmp_expand_cons_stack(gtid, p);
255345153Sdim
256353358Sdim  if (ct == ct_ordered_in_parallel || ct == ct_ordered_in_pdo) {
257345153Sdim    if (p->w_top <= p->p_top) {
258345153Sdim/* we are not in a worksharing construct */
259345153Sdim#ifdef BUILD_PARALLEL_ORDERED
260345153Sdim      /* do not report error messages for PARALLEL ORDERED */
261345153Sdim      KMP_ASSERT(ct == ct_ordered_in_parallel);
262345153Sdim#else
263345153Sdim      __kmp_error_construct(kmp_i18n_msg_CnsBoundToWorksharing, ct, ident);
264345153Sdim#endif /* BUILD_PARALLEL_ORDERED */
265345153Sdim    } else {
266345153Sdim      /* inside a WORKSHARING construct for this PARALLEL region */
267345153Sdim      if (!IS_CONS_TYPE_ORDERED(p->stack_data[p->w_top].type)) {
268353358Sdim        __kmp_error_construct2(kmp_i18n_msg_CnsNoOrderedClause, ct, ident,
269353358Sdim                               &p->stack_data[p->w_top]);
270345153Sdim      }
271345153Sdim    }
272345153Sdim    if (p->s_top > p->p_top && p->s_top > p->w_top) {
273345153Sdim      /* inside a sync construct which is inside a worksharing construct */
274345153Sdim      int index = p->s_top;
275345153Sdim      enum cons_type stack_type;
276345153Sdim
277345153Sdim      stack_type = p->stack_data[index].type;
278345153Sdim
279345153Sdim      if (stack_type == ct_critical ||
280345153Sdim          ((stack_type == ct_ordered_in_parallel ||
281353358Sdim            stack_type == ct_ordered_in_pdo) &&
282353358Sdim           /* C doesn't allow named ordered; ordered in ordered gets error */
283345153Sdim           p->stack_data[index].ident != NULL &&
284345153Sdim           (p->stack_data[index].ident->flags & KMP_IDENT_KMPC))) {
285345153Sdim        /* we are in ORDERED which is inside an ORDERED or CRITICAL construct */
286345153Sdim        __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
287345153Sdim                               &p->stack_data[index]);
288345153Sdim      }
289345153Sdim    }
290345153Sdim  } else if (ct == ct_critical) {
291345153Sdim#if KMP_USE_DYNAMIC_LOCK
292345153Sdim    if (lck != NULL &&
293345153Sdim        __kmp_get_user_lock_owner(lck, seq) ==
294345153Sdim            gtid) { /* this thread already has lock for this critical section */
295345153Sdim#else
296345153Sdim    if (lck != NULL &&
297345153Sdim        __kmp_get_user_lock_owner(lck) ==
298345153Sdim            gtid) { /* this thread already has lock for this critical section */
299345153Sdim#endif
300345153Sdim      int index = p->s_top;
301345153Sdim      struct cons_data cons = {NULL, ct_critical, 0, NULL};
302345153Sdim      /* walk up construct stack and try to find critical with matching name */
303345153Sdim      while (index != 0 && p->stack_data[index].name != lck) {
304345153Sdim        index = p->stack_data[index].prev;
305345153Sdim      }
306345153Sdim      if (index != 0) {
307345153Sdim        /* found match on the stack (may not always because of interleaved
308345153Sdim         * critical for Fortran) */
309345153Sdim        cons = p->stack_data[index];
310345153Sdim      }
311345153Sdim      /* we are in CRITICAL which is inside a CRITICAL construct of same name */
312345153Sdim      __kmp_error_construct2(kmp_i18n_msg_CnsNestingSameName, ct, ident, &cons);
313345153Sdim    }
314345153Sdim  } else if (ct == ct_master || ct == ct_reduce) {
315345153Sdim    if (p->w_top > p->p_top) {
316345153Sdim      /* inside a WORKSHARING construct for this PARALLEL region */
317345153Sdim      __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
318345153Sdim                             &p->stack_data[p->w_top]);
319345153Sdim    }
320345153Sdim    if (ct == ct_reduce && p->s_top > p->p_top) {
321345153Sdim      /* inside a another SYNC construct for this PARALLEL region */
322345153Sdim      __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
323345153Sdim                             &p->stack_data[p->s_top]);
324345153Sdim    }
325345153Sdim  }
326345153Sdim}
327345153Sdim
328345153Sdimvoid
329345153Sdim#if KMP_USE_DYNAMIC_LOCK
330345153Sdim__kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
331345153Sdim#else
332345153Sdim__kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
333345153Sdim#endif
334345153Sdim{
335345153Sdim  int tos;
336345153Sdim  struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
337345153Sdim
338345153Sdim  KMP_ASSERT(gtid == __kmp_get_gtid());
339345153Sdim  KE_TRACE(10, ("__kmp_push_sync (gtid=%d)\n", gtid));
340345153Sdim#if KMP_USE_DYNAMIC_LOCK
341345153Sdim  __kmp_check_sync(gtid, ct, ident, lck, seq);
342345153Sdim#else
343345153Sdim  __kmp_check_sync(gtid, ct, ident, lck);
344345153Sdim#endif
345345153Sdim  KE_TRACE(100, (PUSH_MSG(ct, ident)));
346345153Sdim  tos = ++p->stack_top;
347345153Sdim  p->stack_data[tos].type = ct;
348345153Sdim  p->stack_data[tos].prev = p->s_top;
349345153Sdim  p->stack_data[tos].ident = ident;
350345153Sdim  p->stack_data[tos].name = lck;
351345153Sdim  p->s_top = tos;
352345153Sdim  KE_DUMP(1000, dump_cons_stack(gtid, p));
353345153Sdim}
354345153Sdim
355345153Sdim/* ------------------------------------------------------------------------ */
356345153Sdim
357345153Sdimvoid __kmp_pop_parallel(int gtid, ident_t const *ident) {
358345153Sdim  int tos;
359345153Sdim  struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
360345153Sdim  tos = p->stack_top;
361345153Sdim  KE_TRACE(10, ("__kmp_pop_parallel (%d %d)\n", gtid, __kmp_get_gtid()));
362345153Sdim  if (tos == 0 || p->p_top == 0) {
363345153Sdim    __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct_parallel, ident);
364345153Sdim  }
365345153Sdim  if (tos != p->p_top || p->stack_data[tos].type != ct_parallel) {
366345153Sdim    __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct_parallel, ident,
367345153Sdim                           &p->stack_data[tos]);
368345153Sdim  }
369345153Sdim  KE_TRACE(100, (POP_MSG(p)));
370345153Sdim  p->p_top = p->stack_data[tos].prev;
371345153Sdim  p->stack_data[tos].type = ct_none;
372345153Sdim  p->stack_data[tos].ident = NULL;
373345153Sdim  p->stack_top = tos - 1;
374345153Sdim  KE_DUMP(1000, dump_cons_stack(gtid, p));
375345153Sdim}
376345153Sdim
377345153Sdimenum cons_type __kmp_pop_workshare(int gtid, enum cons_type ct,
378345153Sdim                                   ident_t const *ident) {
379345153Sdim  int tos;
380345153Sdim  struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
381345153Sdim
382345153Sdim  tos = p->stack_top;
383345153Sdim  KE_TRACE(10, ("__kmp_pop_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
384345153Sdim  if (tos == 0 || p->w_top == 0) {
385345153Sdim    __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct, ident);
386345153Sdim  }
387345153Sdim
388345153Sdim  if (tos != p->w_top ||
389345153Sdim      (p->stack_data[tos].type != ct &&
390353358Sdim       // below is the exception to the rule that construct types must match
391353358Sdim       !(p->stack_data[tos].type == ct_pdo_ordered && ct == ct_pdo))) {
392345153Sdim    __kmp_check_null_func();
393345153Sdim    __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct, ident,
394345153Sdim                           &p->stack_data[tos]);
395345153Sdim  }
396345153Sdim  KE_TRACE(100, (POP_MSG(p)));
397345153Sdim  p->w_top = p->stack_data[tos].prev;
398345153Sdim  p->stack_data[tos].type = ct_none;
399345153Sdim  p->stack_data[tos].ident = NULL;
400345153Sdim  p->stack_top = tos - 1;
401345153Sdim  KE_DUMP(1000, dump_cons_stack(gtid, p));
402345153Sdim  return p->stack_data[p->w_top].type;
403345153Sdim}
404345153Sdim
405345153Sdimvoid __kmp_pop_sync(int gtid, enum cons_type ct, ident_t const *ident) {
406345153Sdim  int tos;
407345153Sdim  struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
408345153Sdim  tos = p->stack_top;
409345153Sdim  KE_TRACE(10, ("__kmp_pop_sync (%d %d)\n", gtid, __kmp_get_gtid()));
410345153Sdim  if (tos == 0 || p->s_top == 0) {
411345153Sdim    __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct, ident);
412345153Sdim  }
413345153Sdim  if (tos != p->s_top || p->stack_data[tos].type != ct) {
414345153Sdim    __kmp_check_null_func();
415345153Sdim    __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct, ident,
416345153Sdim                           &p->stack_data[tos]);
417345153Sdim  }
418345153Sdim  if (gtid < 0) {
419345153Sdim    __kmp_check_null_func();
420345153Sdim  }
421345153Sdim  KE_TRACE(100, (POP_MSG(p)));
422345153Sdim  p->s_top = p->stack_data[tos].prev;
423345153Sdim  p->stack_data[tos].type = ct_none;
424345153Sdim  p->stack_data[tos].ident = NULL;
425345153Sdim  p->stack_top = tos - 1;
426345153Sdim  KE_DUMP(1000, dump_cons_stack(gtid, p));
427345153Sdim}
428345153Sdim
429345153Sdim/* ------------------------------------------------------------------------ */
430345153Sdim
431345153Sdimvoid __kmp_check_barrier(int gtid, enum cons_type ct, ident_t const *ident) {
432345153Sdim  struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
433345153Sdim  KE_TRACE(10, ("__kmp_check_barrier (loc: %p, gtid: %d %d)\n", ident, gtid,
434345153Sdim                __kmp_get_gtid()));
435345153Sdim  if (ident != 0) {
436345153Sdim    __kmp_check_null_func();
437345153Sdim  }
438345153Sdim  if (p->w_top > p->p_top) {
439345153Sdim    /* we are already in a WORKSHARING construct for this PARALLEL region */
440345153Sdim    __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
441345153Sdim                           &p->stack_data[p->w_top]);
442345153Sdim  }
443345153Sdim  if (p->s_top > p->p_top) {
444345153Sdim    /* we are already in a SYNC construct for this PARALLEL region */
445345153Sdim    __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
446345153Sdim                           &p->stack_data[p->s_top]);
447345153Sdim  }
448345153Sdim}
449