1 /*
2 * kmp_error.cpp -- KPTS functions for error checking at runtime
3 */
4
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "kmp.h"
14 #include "kmp_error.h"
15 #include "kmp_i18n.h"
16 #include "kmp_str.h"
17
18 /* ------------------------------------------------------------------------ */
19
20 #define MIN_STACK 100
21
22 static char const *cons_text_c[] = {
23 "(none)", "\"parallel\"", "work-sharing", /* this is not called "for"
24 because of lowering of
25 "sections" pragmas */
26 "\"ordered\" work-sharing", /* this is not called "for ordered" because of
27 lowering of "sections" pragmas */
28 "\"sections\"",
29 "work-sharing", /* this is not called "single" because of lowering of
30 "sections" pragmas */
31 "\"critical\"", "\"ordered\"", /* in PARALLEL */
32 "\"ordered\"", /* in PDO */
33 "\"master\"", "\"reduce\"", "\"barrier\""};
34
35 #define get_src(ident) ((ident) == NULL ? NULL : (ident)->psource)
36
37 #define PUSH_MSG(ct, ident) \
38 "\tpushing on stack: %s (%s)\n", cons_text_c[(ct)], get_src((ident))
39 #define POP_MSG(p) \
40 "\tpopping off stack: %s (%s)\n", cons_text_c[(p)->stack_data[tos].type], \
41 get_src((p)->stack_data[tos].ident)
42
43 static int const cons_text_c_num = sizeof(cons_text_c) / sizeof(char const *);
44
45 /* --------------- START OF STATIC LOCAL ROUTINES ------------------------- */
46
__kmp_check_null_func(void)47 static void __kmp_check_null_func(void) { /* nothing to do */
48 }
49
__kmp_expand_cons_stack(int gtid,struct cons_header * p)50 static void __kmp_expand_cons_stack(int gtid, struct cons_header *p) {
51 int i;
52 struct cons_data *d;
53
54 /* TODO for monitor perhaps? */
55 if (gtid < 0)
56 __kmp_check_null_func();
57
58 KE_TRACE(10, ("expand cons_stack (%d %d)\n", gtid, __kmp_get_gtid()));
59
60 d = p->stack_data;
61
62 p->stack_size = (p->stack_size * 2) + 100;
63
64 /* TODO free the old data */
65 p->stack_data = (struct cons_data *)__kmp_allocate(sizeof(struct cons_data) *
66 (p->stack_size + 1));
67
68 for (i = p->stack_top; i >= 0; --i)
69 p->stack_data[i] = d[i];
70
71 /* NOTE: we do not free the old stack_data */
72 }
73
74 // NOTE: Function returns allocated memory, caller must free it!
__kmp_pragma(int ct,ident_t const * ident)75 static char *__kmp_pragma(int ct, ident_t const *ident) {
76 char const *cons = NULL; // Construct name.
77 char *file = NULL; // File name.
78 char *func = NULL; // Function (routine) name.
79 char *line = NULL; // Line number.
80 kmp_str_buf_t buffer;
81 kmp_msg_t prgm;
82 __kmp_str_buf_init(&buffer);
83 if (0 < ct && ct < cons_text_c_num) {
84 cons = cons_text_c[ct];
85 } else {
86 KMP_DEBUG_ASSERT(0);
87 }
88 if (ident != NULL && ident->psource != NULL) {
89 char *tail = NULL;
90 __kmp_str_buf_print(&buffer, "%s",
91 ident->psource); // Copy source to buffer.
92 // Split string in buffer to file, func, and line.
93 tail = buffer.str;
94 __kmp_str_split(tail, ';', NULL, &tail);
95 __kmp_str_split(tail, ';', &file, &tail);
96 __kmp_str_split(tail, ';', &func, &tail);
97 __kmp_str_split(tail, ';', &line, &tail);
98 }
99 prgm = __kmp_msg_format(kmp_i18n_fmt_Pragma, cons, file, func, line);
100 __kmp_str_buf_free(&buffer);
101 return prgm.str;
102 } // __kmp_pragma
103
104 /* ----------------- END OF STATIC LOCAL ROUTINES ------------------------- */
105
__kmp_error_construct(kmp_i18n_id_t id,enum cons_type ct,ident_t const * ident)106 void __kmp_error_construct(kmp_i18n_id_t id, // Message identifier.
107 enum cons_type ct, // Construct type.
108 ident_t const *ident // Construct ident.
109 ) {
110 char *construct = __kmp_pragma(ct, ident);
111 __kmp_fatal(__kmp_msg_format(id, construct), __kmp_msg_null);
112 KMP_INTERNAL_FREE(construct);
113 }
114
__kmp_error_construct2(kmp_i18n_id_t id,enum cons_type ct,ident_t const * ident,struct cons_data const * cons)115 void __kmp_error_construct2(kmp_i18n_id_t id, // Message identifier.
116 enum cons_type ct, // First construct type.
117 ident_t const *ident, // First construct ident.
118 struct cons_data const *cons // Second construct.
119 ) {
120 char *construct1 = __kmp_pragma(ct, ident);
121 char *construct2 = __kmp_pragma(cons->type, cons->ident);
122 __kmp_fatal(__kmp_msg_format(id, construct1, construct2), __kmp_msg_null);
123 KMP_INTERNAL_FREE(construct1);
124 KMP_INTERNAL_FREE(construct2);
125 }
126
__kmp_allocate_cons_stack(int gtid)127 struct cons_header *__kmp_allocate_cons_stack(int gtid) {
128 struct cons_header *p;
129
130 /* TODO for monitor perhaps? */
131 if (gtid < 0) {
132 __kmp_check_null_func();
133 }
134 KE_TRACE(10, ("allocate cons_stack (%d)\n", gtid));
135 p = (struct cons_header *)__kmp_allocate(sizeof(struct cons_header));
136 p->p_top = p->w_top = p->s_top = 0;
137 p->stack_data = (struct cons_data *)__kmp_allocate(sizeof(struct cons_data) *
138 (MIN_STACK + 1));
139 p->stack_size = MIN_STACK;
140 p->stack_top = 0;
141 p->stack_data[0].type = ct_none;
142 p->stack_data[0].prev = 0;
143 p->stack_data[0].ident = NULL;
144 return p;
145 }
146
__kmp_free_cons_stack(void * ptr)147 void __kmp_free_cons_stack(void *ptr) {
148 struct cons_header *p = (struct cons_header *)ptr;
149 if (p != NULL) {
150 if (p->stack_data != NULL) {
151 __kmp_free(p->stack_data);
152 p->stack_data = NULL;
153 }
154 __kmp_free(p);
155 }
156 }
157
158 #if KMP_DEBUG
dump_cons_stack(int gtid,struct cons_header * p)159 static void dump_cons_stack(int gtid, struct cons_header *p) {
160 int i;
161 int tos = p->stack_top;
162 kmp_str_buf_t buffer;
163 __kmp_str_buf_init(&buffer);
164 __kmp_str_buf_print(
165 &buffer,
166 "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n");
167 __kmp_str_buf_print(&buffer,
168 "Begin construct stack with %d items for thread %d\n",
169 tos, gtid);
170 __kmp_str_buf_print(&buffer, " stack_top=%d { P=%d, W=%d, S=%d }\n", tos,
171 p->p_top, p->w_top, p->s_top);
172 for (i = tos; i > 0; i--) {
173 struct cons_data *c = &(p->stack_data[i]);
174 __kmp_str_buf_print(
175 &buffer, " stack_data[%2d] = { %s (%s) %d %p }\n", i,
176 cons_text_c[c->type], get_src(c->ident), c->prev, c->name);
177 }
178 __kmp_str_buf_print(&buffer, "End construct stack for thread %d\n", gtid);
179 __kmp_str_buf_print(
180 &buffer,
181 "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n");
182 __kmp_debug_printf("%s", buffer.str);
183 __kmp_str_buf_free(&buffer);
184 }
185 #endif
186
__kmp_push_parallel(int gtid,ident_t const * ident)187 void __kmp_push_parallel(int gtid, ident_t const *ident) {
188 int tos;
189 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
190
191 KMP_DEBUG_ASSERT(__kmp_threads[gtid]->th.th_cons);
192 KE_TRACE(10, ("__kmp_push_parallel (%d %d)\n", gtid, __kmp_get_gtid()));
193 KE_TRACE(100, (PUSH_MSG(ct_parallel, ident)));
194 if (p->stack_top >= p->stack_size) {
195 __kmp_expand_cons_stack(gtid, p);
196 }
197 tos = ++p->stack_top;
198 p->stack_data[tos].type = ct_parallel;
199 p->stack_data[tos].prev = p->p_top;
200 p->stack_data[tos].ident = ident;
201 p->stack_data[tos].name = NULL;
202 p->p_top = tos;
203 KE_DUMP(1000, dump_cons_stack(gtid, p));
204 }
205
__kmp_check_workshare(int gtid,enum cons_type ct,ident_t const * ident)206 void __kmp_check_workshare(int gtid, enum cons_type ct, ident_t const *ident) {
207 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
208
209 KMP_DEBUG_ASSERT(__kmp_threads[gtid]->th.th_cons);
210 KE_TRACE(10, ("__kmp_check_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
211
212 if (p->stack_top >= p->stack_size) {
213 __kmp_expand_cons_stack(gtid, p);
214 }
215 if (p->w_top > p->p_top) {
216 // We are already in a WORKSHARE construct for this PARALLEL region.
217 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
218 &p->stack_data[p->w_top]);
219 }
220 if (p->s_top > p->p_top) {
221 // We are already in a SYNC construct for this PARALLEL region.
222 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
223 &p->stack_data[p->s_top]);
224 }
225 }
226
__kmp_push_workshare(int gtid,enum cons_type ct,ident_t const * ident)227 void __kmp_push_workshare(int gtid, enum cons_type ct, ident_t const *ident) {
228 int tos;
229 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
230 KE_TRACE(10, ("__kmp_push_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
231 __kmp_check_workshare(gtid, ct, ident);
232 KE_TRACE(100, (PUSH_MSG(ct, ident)));
233 tos = ++p->stack_top;
234 p->stack_data[tos].type = ct;
235 p->stack_data[tos].prev = p->w_top;
236 p->stack_data[tos].ident = ident;
237 p->stack_data[tos].name = NULL;
238 p->w_top = tos;
239 KE_DUMP(1000, dump_cons_stack(gtid, p));
240 }
241
242 void
243 #if KMP_USE_DYNAMIC_LOCK
__kmp_check_sync(int gtid,enum cons_type ct,ident_t const * ident,kmp_user_lock_p lck,kmp_uint32 seq)244 __kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
245 #else
246 __kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
247 #endif
248 {
249 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
250
251 KE_TRACE(10, ("__kmp_check_sync (gtid=%d)\n", __kmp_get_gtid()));
252
253 if (p->stack_top >= p->stack_size)
254 __kmp_expand_cons_stack(gtid, p);
255
256 if (ct == ct_ordered_in_parallel || ct == ct_ordered_in_pdo) {
257 if (p->w_top <= p->p_top) {
258 /* we are not in a worksharing construct */
259 #ifdef BUILD_PARALLEL_ORDERED
260 /* do not report error messages for PARALLEL ORDERED */
261 KMP_ASSERT(ct == ct_ordered_in_parallel);
262 #else
263 __kmp_error_construct(kmp_i18n_msg_CnsBoundToWorksharing, ct, ident);
264 #endif /* BUILD_PARALLEL_ORDERED */
265 } else {
266 /* inside a WORKSHARING construct for this PARALLEL region */
267 if (!IS_CONS_TYPE_ORDERED(p->stack_data[p->w_top].type)) {
268 __kmp_error_construct2(kmp_i18n_msg_CnsNoOrderedClause, ct, ident,
269 &p->stack_data[p->w_top]);
270 }
271 }
272 if (p->s_top > p->p_top && p->s_top > p->w_top) {
273 /* inside a sync construct which is inside a worksharing construct */
274 int index = p->s_top;
275 enum cons_type stack_type;
276
277 stack_type = p->stack_data[index].type;
278
279 if (stack_type == ct_critical ||
280 ((stack_type == ct_ordered_in_parallel ||
281 stack_type == ct_ordered_in_pdo) &&
282 /* C doesn't allow named ordered; ordered in ordered gets error */
283 p->stack_data[index].ident != NULL &&
284 (p->stack_data[index].ident->flags & KMP_IDENT_KMPC))) {
285 /* we are in ORDERED which is inside an ORDERED or CRITICAL construct */
286 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
287 &p->stack_data[index]);
288 }
289 }
290 } else if (ct == ct_critical) {
291 #if KMP_USE_DYNAMIC_LOCK
292 if (lck != NULL &&
293 __kmp_get_user_lock_owner(lck, seq) ==
294 gtid) { /* this thread already has lock for this critical section */
295 #else
296 if (lck != NULL &&
297 __kmp_get_user_lock_owner(lck) ==
298 gtid) { /* this thread already has lock for this critical section */
299 #endif
300 int index = p->s_top;
301 struct cons_data cons = {NULL, ct_critical, 0, NULL};
302 /* walk up construct stack and try to find critical with matching name */
303 while (index != 0 && p->stack_data[index].name != lck) {
304 index = p->stack_data[index].prev;
305 }
306 if (index != 0) {
307 /* found match on the stack (may not always because of interleaved
308 * critical for Fortran) */
309 cons = p->stack_data[index];
310 }
311 /* we are in CRITICAL which is inside a CRITICAL construct of same name */
312 __kmp_error_construct2(kmp_i18n_msg_CnsNestingSameName, ct, ident, &cons);
313 }
314 } else if (ct == ct_master || ct == ct_reduce) {
315 if (p->w_top > p->p_top) {
316 /* inside a WORKSHARING construct for this PARALLEL region */
317 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
318 &p->stack_data[p->w_top]);
319 }
320 if (ct == ct_reduce && p->s_top > p->p_top) {
321 /* inside a another SYNC construct for this PARALLEL region */
322 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
323 &p->stack_data[p->s_top]);
324 }
325 }
326 }
327
328 void
329 #if KMP_USE_DYNAMIC_LOCK
330 __kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
331 #else
332 __kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
333 #endif
334 {
335 int tos;
336 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
337
338 KMP_ASSERT(gtid == __kmp_get_gtid());
339 KE_TRACE(10, ("__kmp_push_sync (gtid=%d)\n", gtid));
340 #if KMP_USE_DYNAMIC_LOCK
341 __kmp_check_sync(gtid, ct, ident, lck, seq);
342 #else
343 __kmp_check_sync(gtid, ct, ident, lck);
344 #endif
345 KE_TRACE(100, (PUSH_MSG(ct, ident)));
346 tos = ++p->stack_top;
347 p->stack_data[tos].type = ct;
348 p->stack_data[tos].prev = p->s_top;
349 p->stack_data[tos].ident = ident;
350 p->stack_data[tos].name = lck;
351 p->s_top = tos;
352 KE_DUMP(1000, dump_cons_stack(gtid, p));
353 }
354
355 /* ------------------------------------------------------------------------ */
356
357 void __kmp_pop_parallel(int gtid, ident_t const *ident) {
358 int tos;
359 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
360 tos = p->stack_top;
361 KE_TRACE(10, ("__kmp_pop_parallel (%d %d)\n", gtid, __kmp_get_gtid()));
362 if (tos == 0 || p->p_top == 0) {
363 __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct_parallel, ident);
364 }
365 if (tos != p->p_top || p->stack_data[tos].type != ct_parallel) {
366 __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct_parallel, ident,
367 &p->stack_data[tos]);
368 }
369 KE_TRACE(100, (POP_MSG(p)));
370 p->p_top = p->stack_data[tos].prev;
371 p->stack_data[tos].type = ct_none;
372 p->stack_data[tos].ident = NULL;
373 p->stack_top = tos - 1;
374 KE_DUMP(1000, dump_cons_stack(gtid, p));
375 }
376
377 enum cons_type __kmp_pop_workshare(int gtid, enum cons_type ct,
378 ident_t const *ident) {
379 int tos;
380 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
381
382 tos = p->stack_top;
383 KE_TRACE(10, ("__kmp_pop_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
384 if (tos == 0 || p->w_top == 0) {
385 __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct, ident);
386 }
387
388 if (tos != p->w_top ||
389 (p->stack_data[tos].type != ct &&
390 // below is the exception to the rule that construct types must match
391 !(p->stack_data[tos].type == ct_pdo_ordered && ct == ct_pdo))) {
392 __kmp_check_null_func();
393 __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct, ident,
394 &p->stack_data[tos]);
395 }
396 KE_TRACE(100, (POP_MSG(p)));
397 p->w_top = p->stack_data[tos].prev;
398 p->stack_data[tos].type = ct_none;
399 p->stack_data[tos].ident = NULL;
400 p->stack_top = tos - 1;
401 KE_DUMP(1000, dump_cons_stack(gtid, p));
402 return p->stack_data[p->w_top].type;
403 }
404
405 void __kmp_pop_sync(int gtid, enum cons_type ct, ident_t const *ident) {
406 int tos;
407 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
408 tos = p->stack_top;
409 KE_TRACE(10, ("__kmp_pop_sync (%d %d)\n", gtid, __kmp_get_gtid()));
410 if (tos == 0 || p->s_top == 0) {
411 __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct, ident);
412 }
413 if (tos != p->s_top || p->stack_data[tos].type != ct) {
414 __kmp_check_null_func();
415 __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct, ident,
416 &p->stack_data[tos]);
417 }
418 KE_TRACE(100, (POP_MSG(p)));
419 p->s_top = p->stack_data[tos].prev;
420 p->stack_data[tos].type = ct_none;
421 p->stack_data[tos].ident = NULL;
422 p->stack_top = tos - 1;
423 KE_DUMP(1000, dump_cons_stack(gtid, p));
424 }
425
426 /* ------------------------------------------------------------------------ */
427
428 void __kmp_check_barrier(int gtid, enum cons_type ct, ident_t const *ident) {
429 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
430 KE_TRACE(10, ("__kmp_check_barrier (loc: %p, gtid: %d %d)\n", ident, gtid,
431 __kmp_get_gtid()));
432 if (ident != 0) {
433 __kmp_check_null_func();
434 }
435 if (p->w_top > p->p_top) {
436 /* we are already in a WORKSHARING construct for this PARALLEL region */
437 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
438 &p->stack_data[p->w_top]);
439 }
440 if (p->s_top > p->p_top) {
441 /* we are already in a SYNC construct for this PARALLEL region */
442 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
443 &p->stack_data[p->s_top]);
444 }
445 }
446