• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * z_Linux_util.cpp -- platform specific routines.
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "kmp.h"
14 #include "kmp_affinity.h"
15 #include "kmp_i18n.h"
16 #include "kmp_io.h"
17 #include "kmp_itt.h"
18 #include "kmp_lock.h"
19 #include "kmp_stats.h"
20 #include "kmp_str.h"
21 #include "kmp_wait_release.h"
22 #include "kmp_wrapper_getpid.h"
23 
24 #if !KMP_OS_DRAGONFLY && !KMP_OS_FREEBSD && !KMP_OS_NETBSD && !KMP_OS_OPENBSD
25 #include <alloca.h>
26 #endif
27 #include <math.h> // HUGE_VAL.
28 #include <sys/resource.h>
29 #include <sys/syscall.h>
30 #include <sys/time.h>
31 #include <sys/times.h>
32 #include <unistd.h>
33 
34 #if KMP_OS_LINUX
35 #include <sys/sysinfo.h>
36 #if KMP_USE_FUTEX
37 // We should really include <futex.h>, but that causes compatibility problems on
38 // different Linux* OS distributions that either require that you include (or
39 // break when you try to include) <pci/types.h>. Since all we need is the two
40 // macros below (which are part of the kernel ABI, so can't change) we just
41 // define the constants here and don't include <futex.h>
42 #ifndef FUTEX_WAIT
43 #define FUTEX_WAIT 0
44 #endif
45 #ifndef FUTEX_WAKE
46 #define FUTEX_WAKE 1
47 #endif
48 #endif
49 #elif KMP_OS_DARWIN
50 #include <mach/mach.h>
51 #include <sys/sysctl.h>
52 #elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD
53 #include <sys/types.h>
54 #include <sys/sysctl.h>
55 #include <sys/user.h>
56 #include <pthread_np.h>
57 #elif KMP_OS_NETBSD || KMP_OS_OPENBSD
58 #include <sys/types.h>
59 #include <sys/sysctl.h>
60 #endif
61 
62 #include <ctype.h>
63 #include <dirent.h>
64 #include <fcntl.h>
65 
66 #include "tsan_annotations.h"
67 
68 struct kmp_sys_timer {
69   struct timespec start;
70 };
71 
72 // Convert timespec to nanoseconds.
73 #define TS2NS(timespec) (((timespec).tv_sec * 1e9) + (timespec).tv_nsec)
74 
75 static struct kmp_sys_timer __kmp_sys_timer_data;
76 
77 #if KMP_HANDLE_SIGNALS
78 typedef void (*sig_func_t)(int);
79 STATIC_EFI2_WORKAROUND struct sigaction __kmp_sighldrs[NSIG];
80 static sigset_t __kmp_sigset;
81 #endif
82 
83 static int __kmp_init_runtime = FALSE;
84 
85 static int __kmp_fork_count = 0;
86 
87 static pthread_condattr_t __kmp_suspend_cond_attr;
88 static pthread_mutexattr_t __kmp_suspend_mutex_attr;
89 
90 static kmp_cond_align_t __kmp_wait_cv;
91 static kmp_mutex_align_t __kmp_wait_mx;
92 
93 kmp_uint64 __kmp_ticks_per_msec = 1000000;
94 
95 #ifdef DEBUG_SUSPEND
__kmp_print_cond(char * buffer,kmp_cond_align_t * cond)96 static void __kmp_print_cond(char *buffer, kmp_cond_align_t *cond) {
97   KMP_SNPRINTF(buffer, 128, "(cond (lock (%ld, %d)), (descr (%p)))",
98                cond->c_cond.__c_lock.__status, cond->c_cond.__c_lock.__spinlock,
99                cond->c_cond.__c_waiting);
100 }
101 #endif
102 
103 #if ((KMP_OS_LINUX || KMP_OS_FREEBSD) && KMP_AFFINITY_SUPPORTED)
104 
105 /* Affinity support */
106 
__kmp_affinity_bind_thread(int which)107 void __kmp_affinity_bind_thread(int which) {
108   KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
109               "Illegal set affinity operation when not capable");
110 
111   kmp_affin_mask_t *mask;
112   KMP_CPU_ALLOC_ON_STACK(mask);
113   KMP_CPU_ZERO(mask);
114   KMP_CPU_SET(which, mask);
115   __kmp_set_system_affinity(mask, TRUE);
116   KMP_CPU_FREE_FROM_STACK(mask);
117 }
118 
119 /* Determine if we can access affinity functionality on this version of
120  * Linux* OS by checking __NR_sched_{get,set}affinity system calls, and set
121  * __kmp_affin_mask_size to the appropriate value (0 means not capable). */
__kmp_affinity_determine_capable(const char * env_var)122 void __kmp_affinity_determine_capable(const char *env_var) {
123 // Check and see if the OS supports thread affinity.
124 
125 #if KMP_OS_LINUX
126 #define KMP_CPU_SET_SIZE_LIMIT (1024 * 1024)
127 #elif KMP_OS_FREEBSD
128 #define KMP_CPU_SET_SIZE_LIMIT (sizeof(cpuset_t))
129 #endif
130 
131 
132 #if KMP_OS_LINUX
133   // If Linux* OS:
134   // If the syscall fails or returns a suggestion for the size,
135   // then we don't have to search for an appropriate size.
136   int gCode;
137   int sCode;
138   unsigned char *buf;
139   buf = (unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
140   gCode = syscall(__NR_sched_getaffinity, 0, KMP_CPU_SET_SIZE_LIMIT, buf);
141   KA_TRACE(30, ("__kmp_affinity_determine_capable: "
142                 "initial getaffinity call returned %d errno = %d\n",
143                 gCode, errno));
144 
145   // if ((gCode < 0) && (errno == ENOSYS))
146   if (gCode < 0) {
147     // System call not supported
148     if (__kmp_affinity_verbose ||
149         (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none) &&
150          (__kmp_affinity_type != affinity_default) &&
151          (__kmp_affinity_type != affinity_disabled))) {
152       int error = errno;
153       kmp_msg_t err_code = KMP_ERR(error);
154       __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
155                 err_code, __kmp_msg_null);
156       if (__kmp_generate_warnings == kmp_warnings_off) {
157         __kmp_str_free(&err_code.str);
158       }
159     }
160     KMP_AFFINITY_DISABLE();
161     KMP_INTERNAL_FREE(buf);
162     return;
163   }
164   if (gCode > 0) { // Linux* OS only
165     // The optimal situation: the OS returns the size of the buffer it expects.
166     //
167     // A verification of correct behavior is that setaffinity on a NULL
168     // buffer with the same size fails with errno set to EFAULT.
169     sCode = syscall(__NR_sched_setaffinity, 0, gCode, NULL);
170     KA_TRACE(30, ("__kmp_affinity_determine_capable: "
171                   "setaffinity for mask size %d returned %d errno = %d\n",
172                   gCode, sCode, errno));
173     if (sCode < 0) {
174       if (errno == ENOSYS) {
175         if (__kmp_affinity_verbose ||
176             (__kmp_affinity_warnings &&
177              (__kmp_affinity_type != affinity_none) &&
178              (__kmp_affinity_type != affinity_default) &&
179              (__kmp_affinity_type != affinity_disabled))) {
180           int error = errno;
181           kmp_msg_t err_code = KMP_ERR(error);
182           __kmp_msg(kmp_ms_warning, KMP_MSG(SetAffSysCallNotSupported, env_var),
183                     err_code, __kmp_msg_null);
184           if (__kmp_generate_warnings == kmp_warnings_off) {
185             __kmp_str_free(&err_code.str);
186           }
187         }
188         KMP_AFFINITY_DISABLE();
189         KMP_INTERNAL_FREE(buf);
190       }
191       if (errno == EFAULT) {
192         KMP_AFFINITY_ENABLE(gCode);
193         KA_TRACE(10, ("__kmp_affinity_determine_capable: "
194                       "affinity supported (mask size %d)\n",
195                       (int)__kmp_affin_mask_size));
196         KMP_INTERNAL_FREE(buf);
197         return;
198       }
199     }
200   }
201 
202   // Call the getaffinity system call repeatedly with increasing set sizes
203   // until we succeed, or reach an upper bound on the search.
204   KA_TRACE(30, ("__kmp_affinity_determine_capable: "
205                 "searching for proper set size\n"));
206   int size;
207   for (size = 1; size <= KMP_CPU_SET_SIZE_LIMIT; size *= 2) {
208     gCode = syscall(__NR_sched_getaffinity, 0, size, buf);
209     KA_TRACE(30, ("__kmp_affinity_determine_capable: "
210                   "getaffinity for mask size %d returned %d errno = %d\n",
211                   size, gCode, errno));
212 
213     if (gCode < 0) {
214       if (errno == ENOSYS) {
215         // We shouldn't get here
216         KA_TRACE(30, ("__kmp_affinity_determine_capable: "
217                       "inconsistent OS call behavior: errno == ENOSYS for mask "
218                       "size %d\n",
219                       size));
220         if (__kmp_affinity_verbose ||
221             (__kmp_affinity_warnings &&
222              (__kmp_affinity_type != affinity_none) &&
223              (__kmp_affinity_type != affinity_default) &&
224              (__kmp_affinity_type != affinity_disabled))) {
225           int error = errno;
226           kmp_msg_t err_code = KMP_ERR(error);
227           __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
228                     err_code, __kmp_msg_null);
229           if (__kmp_generate_warnings == kmp_warnings_off) {
230             __kmp_str_free(&err_code.str);
231           }
232         }
233         KMP_AFFINITY_DISABLE();
234         KMP_INTERNAL_FREE(buf);
235         return;
236       }
237       continue;
238     }
239 
240     sCode = syscall(__NR_sched_setaffinity, 0, gCode, NULL);
241     KA_TRACE(30, ("__kmp_affinity_determine_capable: "
242                   "setaffinity for mask size %d returned %d errno = %d\n",
243                   gCode, sCode, errno));
244     if (sCode < 0) {
245       if (errno == ENOSYS) { // Linux* OS only
246         // We shouldn't get here
247         KA_TRACE(30, ("__kmp_affinity_determine_capable: "
248                       "inconsistent OS call behavior: errno == ENOSYS for mask "
249                       "size %d\n",
250                       size));
251         if (__kmp_affinity_verbose ||
252             (__kmp_affinity_warnings &&
253              (__kmp_affinity_type != affinity_none) &&
254              (__kmp_affinity_type != affinity_default) &&
255              (__kmp_affinity_type != affinity_disabled))) {
256           int error = errno;
257           kmp_msg_t err_code = KMP_ERR(error);
258           __kmp_msg(kmp_ms_warning, KMP_MSG(SetAffSysCallNotSupported, env_var),
259                     err_code, __kmp_msg_null);
260           if (__kmp_generate_warnings == kmp_warnings_off) {
261             __kmp_str_free(&err_code.str);
262           }
263         }
264         KMP_AFFINITY_DISABLE();
265         KMP_INTERNAL_FREE(buf);
266         return;
267       }
268       if (errno == EFAULT) {
269         KMP_AFFINITY_ENABLE(gCode);
270         KA_TRACE(10, ("__kmp_affinity_determine_capable: "
271                       "affinity supported (mask size %d)\n",
272                       (int)__kmp_affin_mask_size));
273         KMP_INTERNAL_FREE(buf);
274         return;
275       }
276     }
277   }
278 #elif KMP_OS_FREEBSD
279   int gCode;
280   unsigned char *buf;
281   buf = (unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
282   gCode = pthread_getaffinity_np(pthread_self(), KMP_CPU_SET_SIZE_LIMIT, reinterpret_cast<cpuset_t *>(buf));
283   KA_TRACE(30, ("__kmp_affinity_determine_capable: "
284                 "initial getaffinity call returned %d errno = %d\n",
285                 gCode, errno));
286   if (gCode == 0) {
287     KMP_AFFINITY_ENABLE(KMP_CPU_SET_SIZE_LIMIT);
288     KA_TRACE(10, ("__kmp_affinity_determine_capable: "
289                   "affinity supported (mask size %d)\n",
290 		  (int)__kmp_affin_mask_size));
291     KMP_INTERNAL_FREE(buf);
292     return;
293   }
294 #endif
295   // save uncaught error code
296   // int error = errno;
297   KMP_INTERNAL_FREE(buf);
298   // restore uncaught error code, will be printed at the next KMP_WARNING below
299   // errno = error;
300 
301   // Affinity is not supported
302   KMP_AFFINITY_DISABLE();
303   KA_TRACE(10, ("__kmp_affinity_determine_capable: "
304                 "cannot determine mask size - affinity not supported\n"));
305   if (__kmp_affinity_verbose ||
306       (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none) &&
307        (__kmp_affinity_type != affinity_default) &&
308        (__kmp_affinity_type != affinity_disabled))) {
309     KMP_WARNING(AffCantGetMaskSize, env_var);
310   }
311 }
312 
313 #endif // KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED
314 
315 #if KMP_USE_FUTEX
316 
__kmp_futex_determine_capable()317 int __kmp_futex_determine_capable() {
318   int loc = 0;
319   int rc = syscall(__NR_futex, &loc, FUTEX_WAKE, 1, NULL, NULL, 0);
320   int retval = (rc == 0) || (errno != ENOSYS);
321 
322   KA_TRACE(10,
323            ("__kmp_futex_determine_capable: rc = %d errno = %d\n", rc, errno));
324   KA_TRACE(10, ("__kmp_futex_determine_capable: futex syscall%s supported\n",
325                 retval ? "" : " not"));
326 
327   return retval;
328 }
329 
330 #endif // KMP_USE_FUTEX
331 
332 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (!KMP_ASM_INTRINS)
333 /* Only 32-bit "add-exchange" instruction on IA-32 architecture causes us to
334    use compare_and_store for these routines */
335 
__kmp_test_then_or8(volatile kmp_int8 * p,kmp_int8 d)336 kmp_int8 __kmp_test_then_or8(volatile kmp_int8 *p, kmp_int8 d) {
337   kmp_int8 old_value, new_value;
338 
339   old_value = TCR_1(*p);
340   new_value = old_value | d;
341 
342   while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
343     KMP_CPU_PAUSE();
344     old_value = TCR_1(*p);
345     new_value = old_value | d;
346   }
347   return old_value;
348 }
349 
__kmp_test_then_and8(volatile kmp_int8 * p,kmp_int8 d)350 kmp_int8 __kmp_test_then_and8(volatile kmp_int8 *p, kmp_int8 d) {
351   kmp_int8 old_value, new_value;
352 
353   old_value = TCR_1(*p);
354   new_value = old_value & d;
355 
356   while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
357     KMP_CPU_PAUSE();
358     old_value = TCR_1(*p);
359     new_value = old_value & d;
360   }
361   return old_value;
362 }
363 
__kmp_test_then_or32(volatile kmp_uint32 * p,kmp_uint32 d)364 kmp_uint32 __kmp_test_then_or32(volatile kmp_uint32 *p, kmp_uint32 d) {
365   kmp_uint32 old_value, new_value;
366 
367   old_value = TCR_4(*p);
368   new_value = old_value | d;
369 
370   while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
371     KMP_CPU_PAUSE();
372     old_value = TCR_4(*p);
373     new_value = old_value | d;
374   }
375   return old_value;
376 }
377 
__kmp_test_then_and32(volatile kmp_uint32 * p,kmp_uint32 d)378 kmp_uint32 __kmp_test_then_and32(volatile kmp_uint32 *p, kmp_uint32 d) {
379   kmp_uint32 old_value, new_value;
380 
381   old_value = TCR_4(*p);
382   new_value = old_value & d;
383 
384   while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
385     KMP_CPU_PAUSE();
386     old_value = TCR_4(*p);
387     new_value = old_value & d;
388   }
389   return old_value;
390 }
391 
392 #if KMP_ARCH_X86
__kmp_test_then_add8(volatile kmp_int8 * p,kmp_int8 d)393 kmp_int8 __kmp_test_then_add8(volatile kmp_int8 *p, kmp_int8 d) {
394   kmp_int8 old_value, new_value;
395 
396   old_value = TCR_1(*p);
397   new_value = old_value + d;
398 
399   while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
400     KMP_CPU_PAUSE();
401     old_value = TCR_1(*p);
402     new_value = old_value + d;
403   }
404   return old_value;
405 }
406 
__kmp_test_then_add64(volatile kmp_int64 * p,kmp_int64 d)407 kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 d) {
408   kmp_int64 old_value, new_value;
409 
410   old_value = TCR_8(*p);
411   new_value = old_value + d;
412 
413   while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
414     KMP_CPU_PAUSE();
415     old_value = TCR_8(*p);
416     new_value = old_value + d;
417   }
418   return old_value;
419 }
420 #endif /* KMP_ARCH_X86 */
421 
__kmp_test_then_or64(volatile kmp_uint64 * p,kmp_uint64 d)422 kmp_uint64 __kmp_test_then_or64(volatile kmp_uint64 *p, kmp_uint64 d) {
423   kmp_uint64 old_value, new_value;
424 
425   old_value = TCR_8(*p);
426   new_value = old_value | d;
427   while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
428     KMP_CPU_PAUSE();
429     old_value = TCR_8(*p);
430     new_value = old_value | d;
431   }
432   return old_value;
433 }
434 
__kmp_test_then_and64(volatile kmp_uint64 * p,kmp_uint64 d)435 kmp_uint64 __kmp_test_then_and64(volatile kmp_uint64 *p, kmp_uint64 d) {
436   kmp_uint64 old_value, new_value;
437 
438   old_value = TCR_8(*p);
439   new_value = old_value & d;
440   while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
441     KMP_CPU_PAUSE();
442     old_value = TCR_8(*p);
443     new_value = old_value & d;
444   }
445   return old_value;
446 }
447 
448 #endif /* (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (! KMP_ASM_INTRINS) */
449 
__kmp_terminate_thread(int gtid)450 void __kmp_terminate_thread(int gtid) {
451   int status;
452   kmp_info_t *th = __kmp_threads[gtid];
453 
454   if (!th)
455     return;
456 
457 #ifdef KMP_CANCEL_THREADS
458   KA_TRACE(10, ("__kmp_terminate_thread: kill (%d)\n", gtid));
459   status = pthread_cancel(th->th.th_info.ds.ds_thread);
460   if (status != 0 && status != ESRCH) {
461     __kmp_fatal(KMP_MSG(CantTerminateWorkerThread), KMP_ERR(status),
462                 __kmp_msg_null);
463   }
464 #endif
465   KMP_YIELD(TRUE);
466 } //
467 
468 /* Set thread stack info according to values returned by pthread_getattr_np().
469    If values are unreasonable, assume call failed and use incremental stack
470    refinement method instead. Returns TRUE if the stack parameters could be
471    determined exactly, FALSE if incremental refinement is necessary. */
__kmp_set_stack_info(int gtid,kmp_info_t * th)472 static kmp_int32 __kmp_set_stack_info(int gtid, kmp_info_t *th) {
473   int stack_data;
474 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD ||     \
475         KMP_OS_HURD
476   pthread_attr_t attr;
477   int status;
478   size_t size = 0;
479   void *addr = 0;
480 
481   /* Always do incremental stack refinement for ubermaster threads since the
482      initial thread stack range can be reduced by sibling thread creation so
483      pthread_attr_getstack may cause thread gtid aliasing */
484   if (!KMP_UBER_GTID(gtid)) {
485 
486     /* Fetch the real thread attributes */
487     status = pthread_attr_init(&attr);
488     KMP_CHECK_SYSFAIL("pthread_attr_init", status);
489 #if KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD
490     status = pthread_attr_get_np(pthread_self(), &attr);
491     KMP_CHECK_SYSFAIL("pthread_attr_get_np", status);
492 #else
493     status = pthread_getattr_np(pthread_self(), &attr);
494     KMP_CHECK_SYSFAIL("pthread_getattr_np", status);
495 #endif
496     status = pthread_attr_getstack(&attr, &addr, &size);
497     KMP_CHECK_SYSFAIL("pthread_attr_getstack", status);
498     KA_TRACE(60,
499              ("__kmp_set_stack_info: T#%d pthread_attr_getstack returned size:"
500               " %lu, low addr: %p\n",
501               gtid, size, addr));
502     status = pthread_attr_destroy(&attr);
503     KMP_CHECK_SYSFAIL("pthread_attr_destroy", status);
504   }
505 
506   if (size != 0 && addr != 0) { // was stack parameter determination successful?
507     /* Store the correct base and size */
508     TCW_PTR(th->th.th_info.ds.ds_stackbase, (((char *)addr) + size));
509     TCW_PTR(th->th.th_info.ds.ds_stacksize, size);
510     TCW_4(th->th.th_info.ds.ds_stackgrow, FALSE);
511     return TRUE;
512   }
513 #endif /* KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD ||
514               KMP_OS_HURD */
515   /* Use incremental refinement starting from initial conservative estimate */
516   TCW_PTR(th->th.th_info.ds.ds_stacksize, 0);
517   TCW_PTR(th->th.th_info.ds.ds_stackbase, &stack_data);
518   TCW_4(th->th.th_info.ds.ds_stackgrow, TRUE);
519   return FALSE;
520 }
521 
__kmp_launch_worker(void * thr)522 static void *__kmp_launch_worker(void *thr) {
523   int status, old_type, old_state;
524 #ifdef KMP_BLOCK_SIGNALS
525   sigset_t new_set, old_set;
526 #endif /* KMP_BLOCK_SIGNALS */
527   void *exit_val;
528 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD ||     \
529         KMP_OS_OPENBSD || KMP_OS_HURD
530   void *volatile padding = 0;
531 #endif
532   int gtid;
533 
534   gtid = ((kmp_info_t *)thr)->th.th_info.ds.ds_gtid;
535   __kmp_gtid_set_specific(gtid);
536 #ifdef KMP_TDATA_GTID
537   __kmp_gtid = gtid;
538 #endif
539 #if KMP_STATS_ENABLED
540   // set thread local index to point to thread-specific stats
541   __kmp_stats_thread_ptr = ((kmp_info_t *)thr)->th.th_stats;
542   __kmp_stats_thread_ptr->startLife();
543   KMP_SET_THREAD_STATE(IDLE);
544   KMP_INIT_PARTITIONED_TIMERS(OMP_idle);
545 #endif
546 
547 #if USE_ITT_BUILD
548   __kmp_itt_thread_name(gtid);
549 #endif /* USE_ITT_BUILD */
550 
551 #if KMP_AFFINITY_SUPPORTED
552   __kmp_affinity_set_init_mask(gtid, FALSE);
553 #endif
554 
555 #ifdef KMP_CANCEL_THREADS
556   status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
557   KMP_CHECK_SYSFAIL("pthread_setcanceltype", status);
558   // josh todo: isn't PTHREAD_CANCEL_ENABLE default for newly-created threads?
559   status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
560   KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
561 #endif
562 
563 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
564   // Set FP control regs to be a copy of the parallel initialization thread's.
565   __kmp_clear_x87_fpu_status_word();
566   __kmp_load_x87_fpu_control_word(&__kmp_init_x87_fpu_control_word);
567   __kmp_load_mxcsr(&__kmp_init_mxcsr);
568 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
569 
570 #ifdef KMP_BLOCK_SIGNALS
571   status = sigfillset(&new_set);
572   KMP_CHECK_SYSFAIL_ERRNO("sigfillset", status);
573   status = pthread_sigmask(SIG_BLOCK, &new_set, &old_set);
574   KMP_CHECK_SYSFAIL("pthread_sigmask", status);
575 #endif /* KMP_BLOCK_SIGNALS */
576 
577 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD ||     \
578         KMP_OS_OPENBSD
579   if (__kmp_stkoffset > 0 && gtid > 0) {
580     padding = KMP_ALLOCA(gtid * __kmp_stkoffset);
581   }
582 #endif
583 
584   KMP_MB();
585   __kmp_set_stack_info(gtid, (kmp_info_t *)thr);
586 
587   __kmp_check_stack_overlap((kmp_info_t *)thr);
588 
589   exit_val = __kmp_launch_thread((kmp_info_t *)thr);
590 
591 #ifdef KMP_BLOCK_SIGNALS
592   status = pthread_sigmask(SIG_SETMASK, &old_set, NULL);
593   KMP_CHECK_SYSFAIL("pthread_sigmask", status);
594 #endif /* KMP_BLOCK_SIGNALS */
595 
596   return exit_val;
597 }
598 
599 #if KMP_USE_MONITOR
600 /* The monitor thread controls all of the threads in the complex */
601 
__kmp_launch_monitor(void * thr)602 static void *__kmp_launch_monitor(void *thr) {
603   int status, old_type, old_state;
604 #ifdef KMP_BLOCK_SIGNALS
605   sigset_t new_set;
606 #endif /* KMP_BLOCK_SIGNALS */
607   struct timespec interval;
608 
609   KMP_MB(); /* Flush all pending memory write invalidates.  */
610 
611   KA_TRACE(10, ("__kmp_launch_monitor: #1 launched\n"));
612 
613   /* register us as the monitor thread */
614   __kmp_gtid_set_specific(KMP_GTID_MONITOR);
615 #ifdef KMP_TDATA_GTID
616   __kmp_gtid = KMP_GTID_MONITOR;
617 #endif
618 
619   KMP_MB();
620 
621 #if USE_ITT_BUILD
622   // Instruct Intel(R) Threading Tools to ignore monitor thread.
623   __kmp_itt_thread_ignore();
624 #endif /* USE_ITT_BUILD */
625 
626   __kmp_set_stack_info(((kmp_info_t *)thr)->th.th_info.ds.ds_gtid,
627                        (kmp_info_t *)thr);
628 
629   __kmp_check_stack_overlap((kmp_info_t *)thr);
630 
631 #ifdef KMP_CANCEL_THREADS
632   status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
633   KMP_CHECK_SYSFAIL("pthread_setcanceltype", status);
634   // josh todo: isn't PTHREAD_CANCEL_ENABLE default for newly-created threads?
635   status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
636   KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
637 #endif
638 
639 #if KMP_REAL_TIME_FIX
640   // This is a potential fix which allows application with real-time scheduling
641   // policy work. However, decision about the fix is not made yet, so it is
642   // disabled by default.
643   { // Are program started with real-time scheduling policy?
644     int sched = sched_getscheduler(0);
645     if (sched == SCHED_FIFO || sched == SCHED_RR) {
646       // Yes, we are a part of real-time application. Try to increase the
647       // priority of the monitor.
648       struct sched_param param;
649       int max_priority = sched_get_priority_max(sched);
650       int rc;
651       KMP_WARNING(RealTimeSchedNotSupported);
652       sched_getparam(0, &param);
653       if (param.sched_priority < max_priority) {
654         param.sched_priority += 1;
655         rc = sched_setscheduler(0, sched, &param);
656         if (rc != 0) {
657           int error = errno;
658           kmp_msg_t err_code = KMP_ERR(error);
659           __kmp_msg(kmp_ms_warning, KMP_MSG(CantChangeMonitorPriority),
660                     err_code, KMP_MSG(MonitorWillStarve), __kmp_msg_null);
661           if (__kmp_generate_warnings == kmp_warnings_off) {
662             __kmp_str_free(&err_code.str);
663           }
664         }
665       } else {
666         // We cannot abort here, because number of CPUs may be enough for all
667         // the threads, including the monitor thread, so application could
668         // potentially work...
669         __kmp_msg(kmp_ms_warning, KMP_MSG(RunningAtMaxPriority),
670                   KMP_MSG(MonitorWillStarve), KMP_HNT(RunningAtMaxPriority),
671                   __kmp_msg_null);
672       }
673     }
674     // AC: free thread that waits for monitor started
675     TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
676   }
677 #endif // KMP_REAL_TIME_FIX
678 
679   KMP_MB(); /* Flush all pending memory write invalidates.  */
680 
681   if (__kmp_monitor_wakeups == 1) {
682     interval.tv_sec = 1;
683     interval.tv_nsec = 0;
684   } else {
685     interval.tv_sec = 0;
686     interval.tv_nsec = (KMP_NSEC_PER_SEC / __kmp_monitor_wakeups);
687   }
688 
689   KA_TRACE(10, ("__kmp_launch_monitor: #2 monitor\n"));
690 
691   while (!TCR_4(__kmp_global.g.g_done)) {
692     struct timespec now;
693     struct timeval tval;
694 
695     /*  This thread monitors the state of the system */
696 
697     KA_TRACE(15, ("__kmp_launch_monitor: update\n"));
698 
699     status = gettimeofday(&tval, NULL);
700     KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
701     TIMEVAL_TO_TIMESPEC(&tval, &now);
702 
703     now.tv_sec += interval.tv_sec;
704     now.tv_nsec += interval.tv_nsec;
705 
706     if (now.tv_nsec >= KMP_NSEC_PER_SEC) {
707       now.tv_sec += 1;
708       now.tv_nsec -= KMP_NSEC_PER_SEC;
709     }
710 
711     status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
712     KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
713     // AC: the monitor should not fall asleep if g_done has been set
714     if (!TCR_4(__kmp_global.g.g_done)) { // check once more under mutex
715       status = pthread_cond_timedwait(&__kmp_wait_cv.c_cond,
716                                       &__kmp_wait_mx.m_mutex, &now);
717       if (status != 0) {
718         if (status != ETIMEDOUT && status != EINTR) {
719           KMP_SYSFAIL("pthread_cond_timedwait", status);
720         }
721       }
722     }
723     status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
724     KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
725 
726     TCW_4(__kmp_global.g.g_time.dt.t_value,
727           TCR_4(__kmp_global.g.g_time.dt.t_value) + 1);
728 
729     KMP_MB(); /* Flush all pending memory write invalidates.  */
730   }
731 
732   KA_TRACE(10, ("__kmp_launch_monitor: #3 cleanup\n"));
733 
734 #ifdef KMP_BLOCK_SIGNALS
735   status = sigfillset(&new_set);
736   KMP_CHECK_SYSFAIL_ERRNO("sigfillset", status);
737   status = pthread_sigmask(SIG_UNBLOCK, &new_set, NULL);
738   KMP_CHECK_SYSFAIL("pthread_sigmask", status);
739 #endif /* KMP_BLOCK_SIGNALS */
740 
741   KA_TRACE(10, ("__kmp_launch_monitor: #4 finished\n"));
742 
743   if (__kmp_global.g.g_abort != 0) {
744     /* now we need to terminate the worker threads  */
745     /* the value of t_abort is the signal we caught */
746 
747     int gtid;
748 
749     KA_TRACE(10, ("__kmp_launch_monitor: #5 terminate sig=%d\n",
750                   __kmp_global.g.g_abort));
751 
752     /* terminate the OpenMP worker threads */
753     /* TODO this is not valid for sibling threads!!
754      * the uber master might not be 0 anymore.. */
755     for (gtid = 1; gtid < __kmp_threads_capacity; ++gtid)
756       __kmp_terminate_thread(gtid);
757 
758     __kmp_cleanup();
759 
760     KA_TRACE(10, ("__kmp_launch_monitor: #6 raise sig=%d\n",
761                   __kmp_global.g.g_abort));
762 
763     if (__kmp_global.g.g_abort > 0)
764       raise(__kmp_global.g.g_abort);
765   }
766 
767   KA_TRACE(10, ("__kmp_launch_monitor: #7 exit\n"));
768 
769   return thr;
770 }
771 #endif // KMP_USE_MONITOR
772 
__kmp_create_worker(int gtid,kmp_info_t * th,size_t stack_size)773 void __kmp_create_worker(int gtid, kmp_info_t *th, size_t stack_size) {
774   pthread_t handle;
775   pthread_attr_t thread_attr;
776   int status;
777 
778   th->th.th_info.ds.ds_gtid = gtid;
779 
780 #if KMP_STATS_ENABLED
781   // sets up worker thread stats
782   __kmp_acquire_tas_lock(&__kmp_stats_lock, gtid);
783 
784   // th->th.th_stats is used to transfer thread-specific stats-pointer to
785   // __kmp_launch_worker. So when thread is created (goes into
786   // __kmp_launch_worker) it will set its thread local pointer to
787   // th->th.th_stats
788   if (!KMP_UBER_GTID(gtid)) {
789     th->th.th_stats = __kmp_stats_list->push_back(gtid);
790   } else {
791     // For root threads, __kmp_stats_thread_ptr is set in __kmp_register_root(),
792     // so set the th->th.th_stats field to it.
793     th->th.th_stats = __kmp_stats_thread_ptr;
794   }
795   __kmp_release_tas_lock(&__kmp_stats_lock, gtid);
796 
797 #endif // KMP_STATS_ENABLED
798 
799   if (KMP_UBER_GTID(gtid)) {
800     KA_TRACE(10, ("__kmp_create_worker: uber thread (%d)\n", gtid));
801     th->th.th_info.ds.ds_thread = pthread_self();
802     __kmp_set_stack_info(gtid, th);
803     __kmp_check_stack_overlap(th);
804     return;
805   }
806 
807   KA_TRACE(10, ("__kmp_create_worker: try to create thread (%d)\n", gtid));
808 
809   KMP_MB(); /* Flush all pending memory write invalidates.  */
810 
811 #ifdef KMP_THREAD_ATTR
812   status = pthread_attr_init(&thread_attr);
813   if (status != 0) {
814     __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
815   }
816   status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
817   if (status != 0) {
818     __kmp_fatal(KMP_MSG(CantSetWorkerState), KMP_ERR(status), __kmp_msg_null);
819   }
820 
821   /* Set stack size for this thread now.
822      The multiple of 2 is there because on some machines, requesting an unusual
823      stacksize causes the thread to have an offset before the dummy alloca()
824      takes place to create the offset.  Since we want the user to have a
825      sufficient stacksize AND support a stack offset, we alloca() twice the
826      offset so that the upcoming alloca() does not eliminate any premade offset,
827      and also gives the user the stack space they requested for all threads */
828   stack_size += gtid * __kmp_stkoffset * 2;
829 
830 #if defined(__ANDROID__) && __ANDROID_API__ < 19
831     // Round the stack size to a multiple of the page size. Older versions of
832     // Android (until KitKat) would fail pthread_attr_setstacksize with EINVAL
833     // if the stack size was not a multiple of the page size.
834     stack_size = (stack_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
835 #endif
836 
837   KA_TRACE(10, ("__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
838                 "__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n",
839                 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
840 
841 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
842   status = pthread_attr_setstacksize(&thread_attr, stack_size);
843 #ifdef KMP_BACKUP_STKSIZE
844   if (status != 0) {
845     if (!__kmp_env_stksize) {
846       stack_size = KMP_BACKUP_STKSIZE + gtid * __kmp_stkoffset;
847       __kmp_stksize = KMP_BACKUP_STKSIZE;
848       KA_TRACE(10, ("__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
849                     "__kmp_stksize = %lu bytes, (backup) final stacksize = %lu "
850                     "bytes\n",
851                     gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
852       status = pthread_attr_setstacksize(&thread_attr, stack_size);
853     }
854   }
855 #endif /* KMP_BACKUP_STKSIZE */
856   if (status != 0) {
857     __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
858                 KMP_HNT(ChangeWorkerStackSize), __kmp_msg_null);
859   }
860 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
861 
862 #endif /* KMP_THREAD_ATTR */
863 
864   status =
865       pthread_create(&handle, &thread_attr, __kmp_launch_worker, (void *)th);
866   if (status != 0 || !handle) { // ??? Why do we check handle??
867 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
868     if (status == EINVAL) {
869       __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
870                   KMP_HNT(IncreaseWorkerStackSize), __kmp_msg_null);
871     }
872     if (status == ENOMEM) {
873       __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
874                   KMP_HNT(DecreaseWorkerStackSize), __kmp_msg_null);
875     }
876 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
877     if (status == EAGAIN) {
878       __kmp_fatal(KMP_MSG(NoResourcesForWorkerThread), KMP_ERR(status),
879                   KMP_HNT(Decrease_NUM_THREADS), __kmp_msg_null);
880     }
881     KMP_SYSFAIL("pthread_create", status);
882   }
883 
884   th->th.th_info.ds.ds_thread = handle;
885 
886 #ifdef KMP_THREAD_ATTR
887   status = pthread_attr_destroy(&thread_attr);
888   if (status) {
889     kmp_msg_t err_code = KMP_ERR(status);
890     __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
891               __kmp_msg_null);
892     if (__kmp_generate_warnings == kmp_warnings_off) {
893       __kmp_str_free(&err_code.str);
894     }
895   }
896 #endif /* KMP_THREAD_ATTR */
897 
898   KMP_MB(); /* Flush all pending memory write invalidates.  */
899 
900   KA_TRACE(10, ("__kmp_create_worker: done creating thread (%d)\n", gtid));
901 
902 } // __kmp_create_worker
903 
904 #if KMP_USE_MONITOR
__kmp_create_monitor(kmp_info_t * th)905 void __kmp_create_monitor(kmp_info_t *th) {
906   pthread_t handle;
907   pthread_attr_t thread_attr;
908   size_t size;
909   int status;
910   int auto_adj_size = FALSE;
911 
912   if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME) {
913     // We don't need monitor thread in case of MAX_BLOCKTIME
914     KA_TRACE(10, ("__kmp_create_monitor: skipping monitor thread because of "
915                   "MAX blocktime\n"));
916     th->th.th_info.ds.ds_tid = 0; // this makes reap_monitor no-op
917     th->th.th_info.ds.ds_gtid = 0;
918     return;
919   }
920   KA_TRACE(10, ("__kmp_create_monitor: try to create monitor\n"));
921 
922   KMP_MB(); /* Flush all pending memory write invalidates.  */
923 
924   th->th.th_info.ds.ds_tid = KMP_GTID_MONITOR;
925   th->th.th_info.ds.ds_gtid = KMP_GTID_MONITOR;
926 #if KMP_REAL_TIME_FIX
927   TCW_4(__kmp_global.g.g_time.dt.t_value,
928         -1); // Will use it for synchronization a bit later.
929 #else
930   TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
931 #endif // KMP_REAL_TIME_FIX
932 
933 #ifdef KMP_THREAD_ATTR
934   if (__kmp_monitor_stksize == 0) {
935     __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
936     auto_adj_size = TRUE;
937   }
938   status = pthread_attr_init(&thread_attr);
939   if (status != 0) {
940     __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
941   }
942   status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
943   if (status != 0) {
944     __kmp_fatal(KMP_MSG(CantSetMonitorState), KMP_ERR(status), __kmp_msg_null);
945   }
946 
947 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
948   status = pthread_attr_getstacksize(&thread_attr, &size);
949   KMP_CHECK_SYSFAIL("pthread_attr_getstacksize", status);
950 #else
951   size = __kmp_sys_min_stksize;
952 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
953 #endif /* KMP_THREAD_ATTR */
954 
955   if (__kmp_monitor_stksize == 0) {
956     __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
957   }
958   if (__kmp_monitor_stksize < __kmp_sys_min_stksize) {
959     __kmp_monitor_stksize = __kmp_sys_min_stksize;
960   }
961 
962   KA_TRACE(10, ("__kmp_create_monitor: default stacksize = %lu bytes,"
963                 "requested stacksize = %lu bytes\n",
964                 size, __kmp_monitor_stksize));
965 
966 retry:
967 
968 /* Set stack size for this thread now. */
969 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
970   KA_TRACE(10, ("__kmp_create_monitor: setting stacksize = %lu bytes,",
971                 __kmp_monitor_stksize));
972   status = pthread_attr_setstacksize(&thread_attr, __kmp_monitor_stksize);
973   if (status != 0) {
974     if (auto_adj_size) {
975       __kmp_monitor_stksize *= 2;
976       goto retry;
977     }
978     kmp_msg_t err_code = KMP_ERR(status);
979     __kmp_msg(kmp_ms_warning, // should this be fatal?  BB
980               KMP_MSG(CantSetMonitorStackSize, (long int)__kmp_monitor_stksize),
981               err_code, KMP_HNT(ChangeMonitorStackSize), __kmp_msg_null);
982     if (__kmp_generate_warnings == kmp_warnings_off) {
983       __kmp_str_free(&err_code.str);
984     }
985   }
986 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
987 
988   status =
989       pthread_create(&handle, &thread_attr, __kmp_launch_monitor, (void *)th);
990 
991   if (status != 0) {
992 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
993     if (status == EINVAL) {
994       if (auto_adj_size && (__kmp_monitor_stksize < (size_t)0x40000000)) {
995         __kmp_monitor_stksize *= 2;
996         goto retry;
997       }
998       __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
999                   KMP_ERR(status), KMP_HNT(IncreaseMonitorStackSize),
1000                   __kmp_msg_null);
1001     }
1002     if (status == ENOMEM) {
1003       __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
1004                   KMP_ERR(status), KMP_HNT(DecreaseMonitorStackSize),
1005                   __kmp_msg_null);
1006     }
1007 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
1008     if (status == EAGAIN) {
1009       __kmp_fatal(KMP_MSG(NoResourcesForMonitorThread), KMP_ERR(status),
1010                   KMP_HNT(DecreaseNumberOfThreadsInUse), __kmp_msg_null);
1011     }
1012     KMP_SYSFAIL("pthread_create", status);
1013   }
1014 
1015   th->th.th_info.ds.ds_thread = handle;
1016 
1017 #if KMP_REAL_TIME_FIX
1018   // Wait for the monitor thread is really started and set its *priority*.
1019   KMP_DEBUG_ASSERT(sizeof(kmp_uint32) ==
1020                    sizeof(__kmp_global.g.g_time.dt.t_value));
1021   __kmp_wait_4((kmp_uint32 volatile *)&__kmp_global.g.g_time.dt.t_value, -1,
1022                &__kmp_neq_4, NULL);
1023 #endif // KMP_REAL_TIME_FIX
1024 
1025 #ifdef KMP_THREAD_ATTR
1026   status = pthread_attr_destroy(&thread_attr);
1027   if (status != 0) {
1028     kmp_msg_t err_code = KMP_ERR(status);
1029     __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
1030               __kmp_msg_null);
1031     if (__kmp_generate_warnings == kmp_warnings_off) {
1032       __kmp_str_free(&err_code.str);
1033     }
1034   }
1035 #endif
1036 
1037   KMP_MB(); /* Flush all pending memory write invalidates.  */
1038 
1039   KA_TRACE(10, ("__kmp_create_monitor: monitor created %#.8lx\n",
1040                 th->th.th_info.ds.ds_thread));
1041 
1042 } // __kmp_create_monitor
1043 #endif // KMP_USE_MONITOR
1044 
__kmp_exit_thread(int exit_status)1045 void __kmp_exit_thread(int exit_status) {
1046   pthread_exit((void *)(intptr_t)exit_status);
1047 } // __kmp_exit_thread
1048 
1049 #if KMP_USE_MONITOR
1050 void __kmp_resume_monitor();
1051 
__kmp_reap_monitor(kmp_info_t * th)1052 void __kmp_reap_monitor(kmp_info_t *th) {
1053   int status;
1054   void *exit_val;
1055 
1056   KA_TRACE(10, ("__kmp_reap_monitor: try to reap monitor thread with handle"
1057                 " %#.8lx\n",
1058                 th->th.th_info.ds.ds_thread));
1059 
1060   // If monitor has been created, its tid and gtid should be KMP_GTID_MONITOR.
1061   // If both tid and gtid are 0, it means the monitor did not ever start.
1062   // If both tid and gtid are KMP_GTID_DNE, the monitor has been shut down.
1063   KMP_DEBUG_ASSERT(th->th.th_info.ds.ds_tid == th->th.th_info.ds.ds_gtid);
1064   if (th->th.th_info.ds.ds_gtid != KMP_GTID_MONITOR) {
1065     KA_TRACE(10, ("__kmp_reap_monitor: monitor did not start, returning\n"));
1066     return;
1067   }
1068 
1069   KMP_MB(); /* Flush all pending memory write invalidates.  */
1070 
1071   /* First, check to see whether the monitor thread exists to wake it up. This
1072      is to avoid performance problem when the monitor sleeps during
1073      blocktime-size interval */
1074 
1075   status = pthread_kill(th->th.th_info.ds.ds_thread, 0);
1076   if (status != ESRCH) {
1077     __kmp_resume_monitor(); // Wake up the monitor thread
1078   }
1079   KA_TRACE(10, ("__kmp_reap_monitor: try to join with monitor\n"));
1080   status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1081   if (exit_val != th) {
1082     __kmp_fatal(KMP_MSG(ReapMonitorError), KMP_ERR(status), __kmp_msg_null);
1083   }
1084 
1085   th->th.th_info.ds.ds_tid = KMP_GTID_DNE;
1086   th->th.th_info.ds.ds_gtid = KMP_GTID_DNE;
1087 
1088   KA_TRACE(10, ("__kmp_reap_monitor: done reaping monitor thread with handle"
1089                 " %#.8lx\n",
1090                 th->th.th_info.ds.ds_thread));
1091 
1092   KMP_MB(); /* Flush all pending memory write invalidates.  */
1093 }
1094 #endif // KMP_USE_MONITOR
1095 
__kmp_reap_worker(kmp_info_t * th)1096 void __kmp_reap_worker(kmp_info_t *th) {
1097   int status;
1098   void *exit_val;
1099 
1100   KMP_MB(); /* Flush all pending memory write invalidates.  */
1101 
1102   KA_TRACE(
1103       10, ("__kmp_reap_worker: try to reap T#%d\n", th->th.th_info.ds.ds_gtid));
1104 
1105   status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1106 #ifdef KMP_DEBUG
1107   /* Don't expose these to the user until we understand when they trigger */
1108   if (status != 0) {
1109     __kmp_fatal(KMP_MSG(ReapWorkerError), KMP_ERR(status), __kmp_msg_null);
1110   }
1111   if (exit_val != th) {
1112     KA_TRACE(10, ("__kmp_reap_worker: worker T#%d did not reap properly, "
1113                   "exit_val = %p\n",
1114                   th->th.th_info.ds.ds_gtid, exit_val));
1115   }
1116 #endif /* KMP_DEBUG */
1117 
1118   KA_TRACE(10, ("__kmp_reap_worker: done reaping T#%d\n",
1119                 th->th.th_info.ds.ds_gtid));
1120 
1121   KMP_MB(); /* Flush all pending memory write invalidates.  */
1122 }
1123 
1124 #if KMP_HANDLE_SIGNALS
1125 
__kmp_null_handler(int signo)1126 static void __kmp_null_handler(int signo) {
1127   //  Do nothing, for doing SIG_IGN-type actions.
1128 } // __kmp_null_handler
1129 
__kmp_team_handler(int signo)1130 static void __kmp_team_handler(int signo) {
1131   if (__kmp_global.g.g_abort == 0) {
1132 /* Stage 1 signal handler, let's shut down all of the threads */
1133 #ifdef KMP_DEBUG
1134     __kmp_debug_printf("__kmp_team_handler: caught signal = %d\n", signo);
1135 #endif
1136     switch (signo) {
1137     case SIGHUP:
1138     case SIGINT:
1139     case SIGQUIT:
1140     case SIGILL:
1141     case SIGABRT:
1142     case SIGFPE:
1143     case SIGBUS:
1144     case SIGSEGV:
1145 #ifdef SIGSYS
1146     case SIGSYS:
1147 #endif
1148     case SIGTERM:
1149       if (__kmp_debug_buf) {
1150         __kmp_dump_debug_buffer();
1151       }
1152       __kmp_unregister_library(); // cleanup shared memory
1153       KMP_MB(); // Flush all pending memory write invalidates.
1154       TCW_4(__kmp_global.g.g_abort, signo);
1155       KMP_MB(); // Flush all pending memory write invalidates.
1156       TCW_4(__kmp_global.g.g_done, TRUE);
1157       KMP_MB(); // Flush all pending memory write invalidates.
1158       break;
1159     default:
1160 #ifdef KMP_DEBUG
1161       __kmp_debug_printf("__kmp_team_handler: unknown signal type");
1162 #endif
1163       break;
1164     }
1165   }
1166 } // __kmp_team_handler
1167 
__kmp_sigaction(int signum,const struct sigaction * act,struct sigaction * oldact)1168 static void __kmp_sigaction(int signum, const struct sigaction *act,
1169                             struct sigaction *oldact) {
1170   int rc = sigaction(signum, act, oldact);
1171   KMP_CHECK_SYSFAIL_ERRNO("sigaction", rc);
1172 }
1173 
__kmp_install_one_handler(int sig,sig_func_t handler_func,int parallel_init)1174 static void __kmp_install_one_handler(int sig, sig_func_t handler_func,
1175                                       int parallel_init) {
1176   KMP_MB(); // Flush all pending memory write invalidates.
1177   KB_TRACE(60,
1178            ("__kmp_install_one_handler( %d, ..., %d )\n", sig, parallel_init));
1179   if (parallel_init) {
1180     struct sigaction new_action;
1181     struct sigaction old_action;
1182     new_action.sa_handler = handler_func;
1183     new_action.sa_flags = 0;
1184     sigfillset(&new_action.sa_mask);
1185     __kmp_sigaction(sig, &new_action, &old_action);
1186     if (old_action.sa_handler == __kmp_sighldrs[sig].sa_handler) {
1187       sigaddset(&__kmp_sigset, sig);
1188     } else {
1189       // Restore/keep user's handler if one previously installed.
1190       __kmp_sigaction(sig, &old_action, NULL);
1191     }
1192   } else {
1193     // Save initial/system signal handlers to see if user handlers installed.
1194     __kmp_sigaction(sig, NULL, &__kmp_sighldrs[sig]);
1195   }
1196   KMP_MB(); // Flush all pending memory write invalidates.
1197 } // __kmp_install_one_handler
1198 
__kmp_remove_one_handler(int sig)1199 static void __kmp_remove_one_handler(int sig) {
1200   KB_TRACE(60, ("__kmp_remove_one_handler( %d )\n", sig));
1201   if (sigismember(&__kmp_sigset, sig)) {
1202     struct sigaction old;
1203     KMP_MB(); // Flush all pending memory write invalidates.
1204     __kmp_sigaction(sig, &__kmp_sighldrs[sig], &old);
1205     if ((old.sa_handler != __kmp_team_handler) &&
1206         (old.sa_handler != __kmp_null_handler)) {
1207       // Restore the users signal handler.
1208       KB_TRACE(10, ("__kmp_remove_one_handler: oops, not our handler, "
1209                     "restoring: sig=%d\n",
1210                     sig));
1211       __kmp_sigaction(sig, &old, NULL);
1212     }
1213     sigdelset(&__kmp_sigset, sig);
1214     KMP_MB(); // Flush all pending memory write invalidates.
1215   }
1216 } // __kmp_remove_one_handler
1217 
__kmp_install_signals(int parallel_init)1218 void __kmp_install_signals(int parallel_init) {
1219   KB_TRACE(10, ("__kmp_install_signals( %d )\n", parallel_init));
1220   if (__kmp_handle_signals || !parallel_init) {
1221     // If ! parallel_init, we do not install handlers, just save original
1222     // handlers. Let us do it even __handle_signals is 0.
1223     sigemptyset(&__kmp_sigset);
1224     __kmp_install_one_handler(SIGHUP, __kmp_team_handler, parallel_init);
1225     __kmp_install_one_handler(SIGINT, __kmp_team_handler, parallel_init);
1226     __kmp_install_one_handler(SIGQUIT, __kmp_team_handler, parallel_init);
1227     __kmp_install_one_handler(SIGILL, __kmp_team_handler, parallel_init);
1228     __kmp_install_one_handler(SIGABRT, __kmp_team_handler, parallel_init);
1229     __kmp_install_one_handler(SIGFPE, __kmp_team_handler, parallel_init);
1230     __kmp_install_one_handler(SIGBUS, __kmp_team_handler, parallel_init);
1231     __kmp_install_one_handler(SIGSEGV, __kmp_team_handler, parallel_init);
1232 #ifdef SIGSYS
1233     __kmp_install_one_handler(SIGSYS, __kmp_team_handler, parallel_init);
1234 #endif // SIGSYS
1235     __kmp_install_one_handler(SIGTERM, __kmp_team_handler, parallel_init);
1236 #ifdef SIGPIPE
1237     __kmp_install_one_handler(SIGPIPE, __kmp_team_handler, parallel_init);
1238 #endif // SIGPIPE
1239   }
1240 } // __kmp_install_signals
1241 
__kmp_remove_signals(void)1242 void __kmp_remove_signals(void) {
1243   int sig;
1244   KB_TRACE(10, ("__kmp_remove_signals()\n"));
1245   for (sig = 1; sig < NSIG; ++sig) {
1246     __kmp_remove_one_handler(sig);
1247   }
1248 } // __kmp_remove_signals
1249 
1250 #endif // KMP_HANDLE_SIGNALS
1251 
__kmp_enable(int new_state)1252 void __kmp_enable(int new_state) {
1253 #ifdef KMP_CANCEL_THREADS
1254   int status, old_state;
1255   status = pthread_setcancelstate(new_state, &old_state);
1256   KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
1257   KMP_DEBUG_ASSERT(old_state == PTHREAD_CANCEL_DISABLE);
1258 #endif
1259 }
1260 
__kmp_disable(int * old_state)1261 void __kmp_disable(int *old_state) {
1262 #ifdef KMP_CANCEL_THREADS
1263   int status;
1264   status = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, old_state);
1265   KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
1266 #endif
1267 }
1268 
__kmp_atfork_prepare(void)1269 static void __kmp_atfork_prepare(void) {
1270   __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
1271   __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
1272 }
1273 
__kmp_atfork_parent(void)1274 static void __kmp_atfork_parent(void) {
1275   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
1276   __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1277 }
1278 
1279 /* Reset the library so execution in the child starts "all over again" with
1280    clean data structures in initial states.  Don't worry about freeing memory
1281    allocated by parent, just abandon it to be safe. */
__kmp_atfork_child(void)1282 static void __kmp_atfork_child(void) {
1283   __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1284   /* TODO make sure this is done right for nested/sibling */
1285   // ATT:  Memory leaks are here? TODO: Check it and fix.
1286   /* KMP_ASSERT( 0 ); */
1287 
1288   ++__kmp_fork_count;
1289 
1290 #if KMP_AFFINITY_SUPPORTED
1291 #if KMP_OS_LINUX || KMP_OS_FREEBSD
1292   // reset the affinity in the child to the initial thread
1293   // affinity in the parent
1294   kmp_set_thread_affinity_mask_initial();
1295 #endif
1296   // Set default not to bind threads tightly in the child (we’re expecting
1297   // over-subscription after the fork and this can improve things for
1298   // scripting languages that use OpenMP inside process-parallel code).
1299   __kmp_affinity_type = affinity_none;
1300   if (__kmp_nested_proc_bind.bind_types != NULL) {
1301     __kmp_nested_proc_bind.bind_types[0] = proc_bind_false;
1302   }
1303 #endif // KMP_AFFINITY_SUPPORTED
1304 
1305   __kmp_init_runtime = FALSE;
1306 #if KMP_USE_MONITOR
1307   __kmp_init_monitor = 0;
1308 #endif
1309   __kmp_init_parallel = FALSE;
1310   __kmp_init_middle = FALSE;
1311   __kmp_init_serial = FALSE;
1312   TCW_4(__kmp_init_gtid, FALSE);
1313   __kmp_init_common = FALSE;
1314 
1315   TCW_4(__kmp_init_user_locks, FALSE);
1316 #if !KMP_USE_DYNAMIC_LOCK
1317   __kmp_user_lock_table.used = 1;
1318   __kmp_user_lock_table.allocated = 0;
1319   __kmp_user_lock_table.table = NULL;
1320   __kmp_lock_blocks = NULL;
1321 #endif
1322 
1323   __kmp_all_nth = 0;
1324   TCW_4(__kmp_nth, 0);
1325 
1326   __kmp_thread_pool = NULL;
1327   __kmp_thread_pool_insert_pt = NULL;
1328   __kmp_team_pool = NULL;
1329 
1330   /* Must actually zero all the *cache arguments passed to __kmpc_threadprivate
1331      here so threadprivate doesn't use stale data */
1332   KA_TRACE(10, ("__kmp_atfork_child: checking cache address list %p\n",
1333                 __kmp_threadpriv_cache_list));
1334 
1335   while (__kmp_threadpriv_cache_list != NULL) {
1336 
1337     if (*__kmp_threadpriv_cache_list->addr != NULL) {
1338       KC_TRACE(50, ("__kmp_atfork_child: zeroing cache at address %p\n",
1339                     &(*__kmp_threadpriv_cache_list->addr)));
1340 
1341       *__kmp_threadpriv_cache_list->addr = NULL;
1342     }
1343     __kmp_threadpriv_cache_list = __kmp_threadpriv_cache_list->next;
1344   }
1345 
1346   __kmp_init_runtime = FALSE;
1347 
1348   /* reset statically initialized locks */
1349   __kmp_init_bootstrap_lock(&__kmp_initz_lock);
1350   __kmp_init_bootstrap_lock(&__kmp_stdio_lock);
1351   __kmp_init_bootstrap_lock(&__kmp_console_lock);
1352   __kmp_init_bootstrap_lock(&__kmp_task_team_lock);
1353 
1354 #if USE_ITT_BUILD
1355   __kmp_itt_reset(); // reset ITT's global state
1356 #endif /* USE_ITT_BUILD */
1357 
1358   /* This is necessary to make sure no stale data is left around */
1359   /* AC: customers complain that we use unsafe routines in the atfork
1360      handler. Mathworks: dlsym() is unsafe. We call dlsym and dlopen
1361      in dynamic_link when check the presence of shared tbbmalloc library.
1362      Suggestion is to make the library initialization lazier, similar
1363      to what done for __kmpc_begin(). */
1364   // TODO: synchronize all static initializations with regular library
1365   //       startup; look at kmp_global.cpp and etc.
1366   //__kmp_internal_begin ();
1367 }
1368 
__kmp_register_atfork(void)1369 void __kmp_register_atfork(void) {
1370   if (__kmp_need_register_atfork) {
1371     int status = pthread_atfork(__kmp_atfork_prepare, __kmp_atfork_parent,
1372                                 __kmp_atfork_child);
1373     KMP_CHECK_SYSFAIL("pthread_atfork", status);
1374     __kmp_need_register_atfork = FALSE;
1375   }
1376 }
1377 
__kmp_suspend_initialize(void)1378 void __kmp_suspend_initialize(void) {
1379   int status;
1380   status = pthread_mutexattr_init(&__kmp_suspend_mutex_attr);
1381   KMP_CHECK_SYSFAIL("pthread_mutexattr_init", status);
1382   status = pthread_condattr_init(&__kmp_suspend_cond_attr);
1383   KMP_CHECK_SYSFAIL("pthread_condattr_init", status);
1384 }
1385 
__kmp_suspend_initialize_thread(kmp_info_t * th)1386 void __kmp_suspend_initialize_thread(kmp_info_t *th) {
1387   ANNOTATE_HAPPENS_AFTER(&th->th.th_suspend_init_count);
1388   int old_value = KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count);
1389   int new_value = __kmp_fork_count + 1;
1390   // Return if already initialized
1391   if (old_value == new_value)
1392     return;
1393   // Wait, then return if being initialized
1394   if (old_value == -1 ||
1395       !__kmp_atomic_compare_store(&th->th.th_suspend_init_count, old_value,
1396                                   -1)) {
1397     while (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) != new_value) {
1398       KMP_CPU_PAUSE();
1399     }
1400   } else {
1401     // Claim to be the initializer and do initializations
1402     int status;
1403     status = pthread_cond_init(&th->th.th_suspend_cv.c_cond,
1404                                &__kmp_suspend_cond_attr);
1405     KMP_CHECK_SYSFAIL("pthread_cond_init", status);
1406     status = pthread_mutex_init(&th->th.th_suspend_mx.m_mutex,
1407                                 &__kmp_suspend_mutex_attr);
1408     KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
1409     KMP_ATOMIC_ST_REL(&th->th.th_suspend_init_count, new_value);
1410     ANNOTATE_HAPPENS_BEFORE(&th->th.th_suspend_init_count);
1411   }
1412 }
1413 
__kmp_suspend_uninitialize_thread(kmp_info_t * th)1414 void __kmp_suspend_uninitialize_thread(kmp_info_t *th) {
1415   if (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) > __kmp_fork_count) {
1416     /* this means we have initialize the suspension pthread objects for this
1417        thread in this instance of the process */
1418     int status;
1419 
1420     status = pthread_cond_destroy(&th->th.th_suspend_cv.c_cond);
1421     if (status != 0 && status != EBUSY) {
1422       KMP_SYSFAIL("pthread_cond_destroy", status);
1423     }
1424     status = pthread_mutex_destroy(&th->th.th_suspend_mx.m_mutex);
1425     if (status != 0 && status != EBUSY) {
1426       KMP_SYSFAIL("pthread_mutex_destroy", status);
1427     }
1428     --th->th.th_suspend_init_count;
1429     KMP_DEBUG_ASSERT(KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count) ==
1430                      __kmp_fork_count);
1431   }
1432 }
1433 
1434 // return true if lock obtained, false otherwise
__kmp_try_suspend_mx(kmp_info_t * th)1435 int __kmp_try_suspend_mx(kmp_info_t *th) {
1436   return (pthread_mutex_trylock(&th->th.th_suspend_mx.m_mutex) == 0);
1437 }
1438 
__kmp_lock_suspend_mx(kmp_info_t * th)1439 void __kmp_lock_suspend_mx(kmp_info_t *th) {
1440   int status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex);
1441   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
1442 }
1443 
__kmp_unlock_suspend_mx(kmp_info_t * th)1444 void __kmp_unlock_suspend_mx(kmp_info_t *th) {
1445   int status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1446   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
1447 }
1448 
1449 /* This routine puts the calling thread to sleep after setting the
1450    sleep bit for the indicated flag variable to true. */
1451 template <class C>
__kmp_suspend_template(int th_gtid,C * flag)1452 static inline void __kmp_suspend_template(int th_gtid, C *flag) {
1453   KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_suspend);
1454   kmp_info_t *th = __kmp_threads[th_gtid];
1455   int status;
1456   typename C::flag_t old_spin;
1457 
1458   KF_TRACE(30, ("__kmp_suspend_template: T#%d enter for flag = %p\n", th_gtid,
1459                 flag->get()));
1460 
1461   __kmp_suspend_initialize_thread(th);
1462 
1463   __kmp_lock_suspend_mx(th);
1464 
1465   KF_TRACE(10, ("__kmp_suspend_template: T#%d setting sleep bit for spin(%p)\n",
1466                 th_gtid, flag->get()));
1467 
1468   /* TODO: shouldn't this use release semantics to ensure that
1469      __kmp_suspend_initialize_thread gets called first? */
1470   old_spin = flag->set_sleeping();
1471   if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME &&
1472       __kmp_pause_status != kmp_soft_paused) {
1473     flag->unset_sleeping();
1474     __kmp_unlock_suspend_mx(th);
1475     return;
1476   }
1477   KF_TRACE(5, ("__kmp_suspend_template: T#%d set sleep bit for spin(%p)==%x,"
1478                " was %x\n",
1479                th_gtid, flag->get(), flag->load(), old_spin));
1480 
1481   if (flag->done_check_val(old_spin)) {
1482     old_spin = flag->unset_sleeping();
1483     KF_TRACE(5, ("__kmp_suspend_template: T#%d false alarm, reset sleep bit "
1484                  "for spin(%p)\n",
1485                  th_gtid, flag->get()));
1486   } else {
1487     /* Encapsulate in a loop as the documentation states that this may
1488        "with low probability" return when the condition variable has
1489        not been signaled or broadcast */
1490     int deactivated = FALSE;
1491     TCW_PTR(th->th.th_sleep_loc, (void *)flag);
1492 
1493     while (flag->is_sleeping()) {
1494 #ifdef DEBUG_SUSPEND
1495       char buffer[128];
1496       __kmp_suspend_count++;
1497       __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1498       __kmp_printf("__kmp_suspend_template: suspending T#%d: %s\n", th_gtid,
1499                    buffer);
1500 #endif
1501       // Mark the thread as no longer active (only in the first iteration of the
1502       // loop).
1503       if (!deactivated) {
1504         th->th.th_active = FALSE;
1505         if (th->th.th_active_in_pool) {
1506           th->th.th_active_in_pool = FALSE;
1507           KMP_ATOMIC_DEC(&__kmp_thread_pool_active_nth);
1508           KMP_DEBUG_ASSERT(TCR_4(__kmp_thread_pool_active_nth) >= 0);
1509         }
1510         deactivated = TRUE;
1511       }
1512 
1513 #if USE_SUSPEND_TIMEOUT
1514       struct timespec now;
1515       struct timeval tval;
1516       int msecs;
1517 
1518       status = gettimeofday(&tval, NULL);
1519       KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
1520       TIMEVAL_TO_TIMESPEC(&tval, &now);
1521 
1522       msecs = (4 * __kmp_dflt_blocktime) + 200;
1523       now.tv_sec += msecs / 1000;
1524       now.tv_nsec += (msecs % 1000) * 1000;
1525 
1526       KF_TRACE(15, ("__kmp_suspend_template: T#%d about to perform "
1527                     "pthread_cond_timedwait\n",
1528                     th_gtid));
1529       status = pthread_cond_timedwait(&th->th.th_suspend_cv.c_cond,
1530                                       &th->th.th_suspend_mx.m_mutex, &now);
1531 #else
1532       KF_TRACE(15, ("__kmp_suspend_template: T#%d about to perform"
1533                     " pthread_cond_wait\n",
1534                     th_gtid));
1535       status = pthread_cond_wait(&th->th.th_suspend_cv.c_cond,
1536                                  &th->th.th_suspend_mx.m_mutex);
1537 #endif // USE_SUSPEND_TIMEOUT
1538 
1539       if ((status != 0) && (status != EINTR) && (status != ETIMEDOUT)) {
1540         KMP_SYSFAIL("pthread_cond_wait", status);
1541       }
1542 #ifdef KMP_DEBUG
1543       if (status == ETIMEDOUT) {
1544         if (flag->is_sleeping()) {
1545           KF_TRACE(100,
1546                    ("__kmp_suspend_template: T#%d timeout wakeup\n", th_gtid));
1547         } else {
1548           KF_TRACE(2, ("__kmp_suspend_template: T#%d timeout wakeup, sleep bit "
1549                        "not set!\n",
1550                        th_gtid));
1551         }
1552       } else if (flag->is_sleeping()) {
1553         KF_TRACE(100,
1554                  ("__kmp_suspend_template: T#%d spurious wakeup\n", th_gtid));
1555       }
1556 #endif
1557     } // while
1558 
1559     // Mark the thread as active again (if it was previous marked as inactive)
1560     if (deactivated) {
1561       th->th.th_active = TRUE;
1562       if (TCR_4(th->th.th_in_pool)) {
1563         KMP_ATOMIC_INC(&__kmp_thread_pool_active_nth);
1564         th->th.th_active_in_pool = TRUE;
1565       }
1566     }
1567   }
1568 #ifdef DEBUG_SUSPEND
1569   {
1570     char buffer[128];
1571     __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1572     __kmp_printf("__kmp_suspend_template: T#%d has awakened: %s\n", th_gtid,
1573                  buffer);
1574   }
1575 #endif
1576 
1577   __kmp_unlock_suspend_mx(th);
1578   KF_TRACE(30, ("__kmp_suspend_template: T#%d exit\n", th_gtid));
1579 }
1580 
1581 template <bool C, bool S>
__kmp_suspend_32(int th_gtid,kmp_flag_32<C,S> * flag)1582 void __kmp_suspend_32(int th_gtid, kmp_flag_32<C, S> *flag) {
1583   __kmp_suspend_template(th_gtid, flag);
1584 }
1585 template <bool C, bool S>
__kmp_suspend_64(int th_gtid,kmp_flag_64<C,S> * flag)1586 void __kmp_suspend_64(int th_gtid, kmp_flag_64<C, S> *flag) {
1587   __kmp_suspend_template(th_gtid, flag);
1588 }
__kmp_suspend_oncore(int th_gtid,kmp_flag_oncore * flag)1589 void __kmp_suspend_oncore(int th_gtid, kmp_flag_oncore *flag) {
1590   __kmp_suspend_template(th_gtid, flag);
1591 }
1592 
1593 template void __kmp_suspend_32<false, false>(int, kmp_flag_32<false, false> *);
1594 template void __kmp_suspend_64<false, true>(int, kmp_flag_64<false, true> *);
1595 template void __kmp_suspend_64<true, false>(int, kmp_flag_64<true, false> *);
1596 
1597 /* This routine signals the thread specified by target_gtid to wake up
1598    after setting the sleep bit indicated by the flag argument to FALSE.
1599    The target thread must already have called __kmp_suspend_template() */
1600 template <class C>
__kmp_resume_template(int target_gtid,C * flag)1601 static inline void __kmp_resume_template(int target_gtid, C *flag) {
1602   KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1603   kmp_info_t *th = __kmp_threads[target_gtid];
1604   int status;
1605 
1606 #ifdef KMP_DEBUG
1607   int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1608 #endif
1609 
1610   KF_TRACE(30, ("__kmp_resume_template: T#%d wants to wakeup T#%d enter\n",
1611                 gtid, target_gtid));
1612   KMP_DEBUG_ASSERT(gtid != target_gtid);
1613 
1614   __kmp_suspend_initialize_thread(th);
1615 
1616   __kmp_lock_suspend_mx(th);
1617 
1618   if (!flag) { // coming from __kmp_null_resume_wrapper
1619     flag = (C *)CCAST(void *, th->th.th_sleep_loc);
1620   }
1621 
1622   // First, check if the flag is null or its type has changed. If so, someone
1623   // else woke it up.
1624   if (!flag || flag->get_type() != flag->get_ptr_type()) { // get_ptr_type
1625     // simply shows what flag was cast to
1626     KF_TRACE(5, ("__kmp_resume_template: T#%d exiting, thread T#%d already "
1627                  "awake: flag(%p)\n",
1628                  gtid, target_gtid, NULL));
1629     __kmp_unlock_suspend_mx(th);
1630     return;
1631   } else { // if multiple threads are sleeping, flag should be internally
1632     // referring to a specific thread here
1633     typename C::flag_t old_spin = flag->unset_sleeping();
1634     if (!flag->is_sleeping_val(old_spin)) {
1635       KF_TRACE(5, ("__kmp_resume_template: T#%d exiting, thread T#%d already "
1636                    "awake: flag(%p): "
1637                    "%u => %u\n",
1638                    gtid, target_gtid, flag->get(), old_spin, flag->load()));
1639       __kmp_unlock_suspend_mx(th);
1640       return;
1641     }
1642     KF_TRACE(5, ("__kmp_resume_template: T#%d about to wakeup T#%d, reset "
1643                  "sleep bit for flag's loc(%p): "
1644                  "%u => %u\n",
1645                  gtid, target_gtid, flag->get(), old_spin, flag->load()));
1646   }
1647   TCW_PTR(th->th.th_sleep_loc, NULL);
1648 
1649 #ifdef DEBUG_SUSPEND
1650   {
1651     char buffer[128];
1652     __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1653     __kmp_printf("__kmp_resume_template: T#%d resuming T#%d: %s\n", gtid,
1654                  target_gtid, buffer);
1655   }
1656 #endif
1657   status = pthread_cond_signal(&th->th.th_suspend_cv.c_cond);
1658   KMP_CHECK_SYSFAIL("pthread_cond_signal", status);
1659   __kmp_unlock_suspend_mx(th);
1660   KF_TRACE(30, ("__kmp_resume_template: T#%d exiting after signaling wake up"
1661                 " for T#%d\n",
1662                 gtid, target_gtid));
1663 }
1664 
1665 template <bool C, bool S>
__kmp_resume_32(int target_gtid,kmp_flag_32<C,S> * flag)1666 void __kmp_resume_32(int target_gtid, kmp_flag_32<C, S> *flag) {
1667   __kmp_resume_template(target_gtid, flag);
1668 }
1669 template <bool C, bool S>
__kmp_resume_64(int target_gtid,kmp_flag_64<C,S> * flag)1670 void __kmp_resume_64(int target_gtid, kmp_flag_64<C, S> *flag) {
1671   __kmp_resume_template(target_gtid, flag);
1672 }
__kmp_resume_oncore(int target_gtid,kmp_flag_oncore * flag)1673 void __kmp_resume_oncore(int target_gtid, kmp_flag_oncore *flag) {
1674   __kmp_resume_template(target_gtid, flag);
1675 }
1676 
1677 template void __kmp_resume_32<false, true>(int, kmp_flag_32<false, true> *);
1678 template void __kmp_resume_64<false, true>(int, kmp_flag_64<false, true> *);
1679 
1680 #if KMP_USE_MONITOR
__kmp_resume_monitor()1681 void __kmp_resume_monitor() {
1682   KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1683   int status;
1684 #ifdef KMP_DEBUG
1685   int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1686   KF_TRACE(30, ("__kmp_resume_monitor: T#%d wants to wakeup T#%d enter\n", gtid,
1687                 KMP_GTID_MONITOR));
1688   KMP_DEBUG_ASSERT(gtid != KMP_GTID_MONITOR);
1689 #endif
1690   status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
1691   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
1692 #ifdef DEBUG_SUSPEND
1693   {
1694     char buffer[128];
1695     __kmp_print_cond(buffer, &__kmp_wait_cv.c_cond);
1696     __kmp_printf("__kmp_resume_monitor: T#%d resuming T#%d: %s\n", gtid,
1697                  KMP_GTID_MONITOR, buffer);
1698   }
1699 #endif
1700   status = pthread_cond_signal(&__kmp_wait_cv.c_cond);
1701   KMP_CHECK_SYSFAIL("pthread_cond_signal", status);
1702   status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
1703   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
1704   KF_TRACE(30, ("__kmp_resume_monitor: T#%d exiting after signaling wake up"
1705                 " for T#%d\n",
1706                 gtid, KMP_GTID_MONITOR));
1707 }
1708 #endif // KMP_USE_MONITOR
1709 
__kmp_yield()1710 void __kmp_yield() { sched_yield(); }
1711 
__kmp_gtid_set_specific(int gtid)1712 void __kmp_gtid_set_specific(int gtid) {
1713   if (__kmp_init_gtid) {
1714     int status;
1715     status = pthread_setspecific(__kmp_gtid_threadprivate_key,
1716                                  (void *)(intptr_t)(gtid + 1));
1717     KMP_CHECK_SYSFAIL("pthread_setspecific", status);
1718   } else {
1719     KA_TRACE(50, ("__kmp_gtid_set_specific: runtime shutdown, returning\n"));
1720   }
1721 }
1722 
__kmp_gtid_get_specific()1723 int __kmp_gtid_get_specific() {
1724   int gtid;
1725   if (!__kmp_init_gtid) {
1726     KA_TRACE(50, ("__kmp_gtid_get_specific: runtime shutdown, returning "
1727                   "KMP_GTID_SHUTDOWN\n"));
1728     return KMP_GTID_SHUTDOWN;
1729   }
1730   gtid = (int)(size_t)pthread_getspecific(__kmp_gtid_threadprivate_key);
1731   if (gtid == 0) {
1732     gtid = KMP_GTID_DNE;
1733   } else {
1734     gtid--;
1735   }
1736   KA_TRACE(50, ("__kmp_gtid_get_specific: key:%d gtid:%d\n",
1737                 __kmp_gtid_threadprivate_key, gtid));
1738   return gtid;
1739 }
1740 
__kmp_read_cpu_time(void)1741 double __kmp_read_cpu_time(void) {
1742   /*clock_t   t;*/
1743   struct tms buffer;
1744 
1745   /*t =*/times(&buffer);
1746 
1747   return (buffer.tms_utime + buffer.tms_cutime) / (double)CLOCKS_PER_SEC;
1748 }
1749 
__kmp_read_system_info(struct kmp_sys_info * info)1750 int __kmp_read_system_info(struct kmp_sys_info *info) {
1751   int status;
1752   struct rusage r_usage;
1753 
1754   memset(info, 0, sizeof(*info));
1755 
1756   status = getrusage(RUSAGE_SELF, &r_usage);
1757   KMP_CHECK_SYSFAIL_ERRNO("getrusage", status);
1758 
1759   // The maximum resident set size utilized (in kilobytes)
1760   info->maxrss = r_usage.ru_maxrss;
1761   // The number of page faults serviced without any I/O
1762   info->minflt = r_usage.ru_minflt;
1763   // The number of page faults serviced that required I/O
1764   info->majflt = r_usage.ru_majflt;
1765   // The number of times a process was "swapped" out of memory
1766   info->nswap = r_usage.ru_nswap;
1767   // The number of times the file system had to perform input
1768   info->inblock = r_usage.ru_inblock;
1769   // The number of times the file system had to perform output
1770   info->oublock = r_usage.ru_oublock;
1771   // The number of times a context switch was voluntarily
1772   info->nvcsw = r_usage.ru_nvcsw;
1773   // The number of times a context switch was forced
1774   info->nivcsw = r_usage.ru_nivcsw;
1775 
1776   return (status != 0);
1777 }
1778 
__kmp_read_system_time(double * delta)1779 void __kmp_read_system_time(double *delta) {
1780   double t_ns;
1781   struct timeval tval;
1782   struct timespec stop;
1783   int status;
1784 
1785   status = gettimeofday(&tval, NULL);
1786   KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
1787   TIMEVAL_TO_TIMESPEC(&tval, &stop);
1788   t_ns = TS2NS(stop) - TS2NS(__kmp_sys_timer_data.start);
1789   *delta = (t_ns * 1e-9);
1790 }
1791 
__kmp_clear_system_time(void)1792 void __kmp_clear_system_time(void) {
1793   struct timeval tval;
1794   int status;
1795   status = gettimeofday(&tval, NULL);
1796   KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
1797   TIMEVAL_TO_TIMESPEC(&tval, &__kmp_sys_timer_data.start);
1798 }
1799 
__kmp_get_xproc(void)1800 static int __kmp_get_xproc(void) {
1801 
1802   int r = 0;
1803 
1804 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD ||     \
1805         KMP_OS_OPENBSD || KMP_OS_HURD
1806 
1807   r = sysconf(_SC_NPROCESSORS_ONLN);
1808 
1809 #elif KMP_OS_DARWIN
1810 
1811   // Bug C77011 High "OpenMP Threads and number of active cores".
1812 
1813   // Find the number of available CPUs.
1814   kern_return_t rc;
1815   host_basic_info_data_t info;
1816   mach_msg_type_number_t num = HOST_BASIC_INFO_COUNT;
1817   rc = host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t)&info, &num);
1818   if (rc == 0 && num == HOST_BASIC_INFO_COUNT) {
1819     // Cannot use KA_TRACE() here because this code works before trace support
1820     // is initialized.
1821     r = info.avail_cpus;
1822   } else {
1823     KMP_WARNING(CantGetNumAvailCPU);
1824     KMP_INFORM(AssumedNumCPU);
1825   }
1826 
1827 #else
1828 
1829 #error "Unknown or unsupported OS."
1830 
1831 #endif
1832 
1833   return r > 0 ? r : 2; /* guess value of 2 if OS told us 0 */
1834 
1835 } // __kmp_get_xproc
1836 
__kmp_read_from_file(char const * path,char const * format,...)1837 int __kmp_read_from_file(char const *path, char const *format, ...) {
1838   int result;
1839   va_list args;
1840 
1841   va_start(args, format);
1842   FILE *f = fopen(path, "rb");
1843   if (f == NULL)
1844     return 0;
1845   result = vfscanf(f, format, args);
1846   fclose(f);
1847 
1848   return result;
1849 }
1850 
__kmp_runtime_initialize(void)1851 void __kmp_runtime_initialize(void) {
1852   int status;
1853   pthread_mutexattr_t mutex_attr;
1854   pthread_condattr_t cond_attr;
1855 
1856   if (__kmp_init_runtime) {
1857     return;
1858   }
1859 
1860 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64)
1861   if (!__kmp_cpuinfo.initialized) {
1862     __kmp_query_cpuid(&__kmp_cpuinfo);
1863   }
1864 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
1865 
1866   __kmp_xproc = __kmp_get_xproc();
1867 
1868 #if ! KMP_32_BIT_ARCH
1869   struct rlimit rlim;
1870   // read stack size of calling thread, save it as default for worker threads;
1871   // this should be done before reading environment variables
1872   status = getrlimit(RLIMIT_STACK, &rlim);
1873   if (status == 0) { // success?
1874     __kmp_stksize = rlim.rlim_cur;
1875     __kmp_check_stksize(&__kmp_stksize); // check value and adjust if needed
1876   }
1877 #endif /* KMP_32_BIT_ARCH */
1878 
1879   if (sysconf(_SC_THREADS)) {
1880 
1881     /* Query the maximum number of threads */
1882     __kmp_sys_max_nth = sysconf(_SC_THREAD_THREADS_MAX);
1883     if (__kmp_sys_max_nth == -1) {
1884       /* Unlimited threads for NPTL */
1885       __kmp_sys_max_nth = INT_MAX;
1886     } else if (__kmp_sys_max_nth <= 1) {
1887       /* Can't tell, just use PTHREAD_THREADS_MAX */
1888       __kmp_sys_max_nth = KMP_MAX_NTH;
1889     }
1890 
1891     /* Query the minimum stack size */
1892     __kmp_sys_min_stksize = sysconf(_SC_THREAD_STACK_MIN);
1893     if (__kmp_sys_min_stksize <= 1) {
1894       __kmp_sys_min_stksize = KMP_MIN_STKSIZE;
1895     }
1896   }
1897 
1898   /* Set up minimum number of threads to switch to TLS gtid */
1899   __kmp_tls_gtid_min = KMP_TLS_GTID_MIN;
1900 
1901   status = pthread_key_create(&__kmp_gtid_threadprivate_key,
1902                               __kmp_internal_end_dest);
1903   KMP_CHECK_SYSFAIL("pthread_key_create", status);
1904   status = pthread_mutexattr_init(&mutex_attr);
1905   KMP_CHECK_SYSFAIL("pthread_mutexattr_init", status);
1906   status = pthread_mutex_init(&__kmp_wait_mx.m_mutex, &mutex_attr);
1907   KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
1908   status = pthread_condattr_init(&cond_attr);
1909   KMP_CHECK_SYSFAIL("pthread_condattr_init", status);
1910   status = pthread_cond_init(&__kmp_wait_cv.c_cond, &cond_attr);
1911   KMP_CHECK_SYSFAIL("pthread_cond_init", status);
1912 #if USE_ITT_BUILD
1913   __kmp_itt_initialize();
1914 #endif /* USE_ITT_BUILD */
1915 
1916   __kmp_init_runtime = TRUE;
1917 }
1918 
__kmp_runtime_destroy(void)1919 void __kmp_runtime_destroy(void) {
1920   int status;
1921 
1922   if (!__kmp_init_runtime) {
1923     return; // Nothing to do.
1924   }
1925 
1926 #if USE_ITT_BUILD
1927   __kmp_itt_destroy();
1928 #endif /* USE_ITT_BUILD */
1929 
1930   status = pthread_key_delete(__kmp_gtid_threadprivate_key);
1931   KMP_CHECK_SYSFAIL("pthread_key_delete", status);
1932 
1933   status = pthread_mutex_destroy(&__kmp_wait_mx.m_mutex);
1934   if (status != 0 && status != EBUSY) {
1935     KMP_SYSFAIL("pthread_mutex_destroy", status);
1936   }
1937   status = pthread_cond_destroy(&__kmp_wait_cv.c_cond);
1938   if (status != 0 && status != EBUSY) {
1939     KMP_SYSFAIL("pthread_cond_destroy", status);
1940   }
1941 #if KMP_AFFINITY_SUPPORTED
1942   __kmp_affinity_uninitialize();
1943 #endif
1944 
1945   __kmp_init_runtime = FALSE;
1946 }
1947 
1948 /* Put the thread to sleep for a time period */
1949 /* NOTE: not currently used anywhere */
__kmp_thread_sleep(int millis)1950 void __kmp_thread_sleep(int millis) { sleep((millis + 500) / 1000); }
1951 
1952 /* Calculate the elapsed wall clock time for the user */
__kmp_elapsed(double * t)1953 void __kmp_elapsed(double *t) {
1954   int status;
1955 #ifdef FIX_SGI_CLOCK
1956   struct timespec ts;
1957 
1958   status = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts);
1959   KMP_CHECK_SYSFAIL_ERRNO("clock_gettime", status);
1960   *t =
1961       (double)ts.tv_nsec * (1.0 / (double)KMP_NSEC_PER_SEC) + (double)ts.tv_sec;
1962 #else
1963   struct timeval tv;
1964 
1965   status = gettimeofday(&tv, NULL);
1966   KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
1967   *t =
1968       (double)tv.tv_usec * (1.0 / (double)KMP_USEC_PER_SEC) + (double)tv.tv_sec;
1969 #endif
1970 }
1971 
1972 /* Calculate the elapsed wall clock tick for the user */
__kmp_elapsed_tick(double * t)1973 void __kmp_elapsed_tick(double *t) { *t = 1 / (double)CLOCKS_PER_SEC; }
1974 
1975 /* Return the current time stamp in nsec */
__kmp_now_nsec()1976 kmp_uint64 __kmp_now_nsec() {
1977   struct timeval t;
1978   gettimeofday(&t, NULL);
1979   kmp_uint64 nsec = (kmp_uint64)KMP_NSEC_PER_SEC * (kmp_uint64)t.tv_sec +
1980                     (kmp_uint64)1000 * (kmp_uint64)t.tv_usec;
1981   return nsec;
1982 }
1983 
1984 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1985 /* Measure clock ticks per millisecond */
__kmp_initialize_system_tick()1986 void __kmp_initialize_system_tick() {
1987   kmp_uint64 now, nsec2, diff;
1988   kmp_uint64 delay = 100000; // 50~100 usec on most machines.
1989   kmp_uint64 nsec = __kmp_now_nsec();
1990   kmp_uint64 goal = __kmp_hardware_timestamp() + delay;
1991   while ((now = __kmp_hardware_timestamp()) < goal)
1992     ;
1993   nsec2 = __kmp_now_nsec();
1994   diff = nsec2 - nsec;
1995   if (diff > 0) {
1996     kmp_uint64 tpms = (kmp_uint64)(1e6 * (delay + (now - goal)) / diff);
1997     if (tpms > 0)
1998       __kmp_ticks_per_msec = tpms;
1999   }
2000 }
2001 #endif
2002 
2003 /* Determine whether the given address is mapped into the current address
2004    space. */
2005 
__kmp_is_address_mapped(void * addr)2006 int __kmp_is_address_mapped(void *addr) {
2007 
2008   int found = 0;
2009   int rc;
2010 
2011 #if KMP_OS_LINUX || KMP_OS_HURD
2012 
2013   /* On GNUish OSes, read the /proc/<pid>/maps pseudo-file to get all the address
2014      ranges mapped into the address space. */
2015 
2016   char *name = __kmp_str_format("/proc/%d/maps", getpid());
2017   FILE *file = NULL;
2018 
2019   file = fopen(name, "r");
2020   KMP_ASSERT(file != NULL);
2021 
2022   for (;;) {
2023 
2024     void *beginning = NULL;
2025     void *ending = NULL;
2026     char perms[5];
2027 
2028     rc = fscanf(file, "%p-%p %4s %*[^\n]\n", &beginning, &ending, perms);
2029     if (rc == EOF) {
2030       break;
2031     }
2032     KMP_ASSERT(rc == 3 &&
2033                KMP_STRLEN(perms) == 4); // Make sure all fields are read.
2034 
2035     // Ending address is not included in the region, but beginning is.
2036     if ((addr >= beginning) && (addr < ending)) {
2037       perms[2] = 0; // 3th and 4th character does not matter.
2038       if (strcmp(perms, "rw") == 0) {
2039         // Memory we are looking for should be readable and writable.
2040         found = 1;
2041       }
2042       break;
2043     }
2044   }
2045 
2046   // Free resources.
2047   fclose(file);
2048   KMP_INTERNAL_FREE(name);
2049 #elif KMP_OS_FREEBSD
2050   char *buf;
2051   size_t lstsz;
2052   int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid()};
2053   rc = sysctl(mib, 4, NULL, &lstsz, NULL, 0);
2054   if (rc < 0)
2055      return 0;
2056   // We pass from number of vm entry's semantic
2057   // to size of whole entry map list.
2058   lstsz = lstsz * 4 / 3;
2059   buf = reinterpret_cast<char *>(kmpc_malloc(lstsz));
2060   rc = sysctl(mib, 4, buf, &lstsz, NULL, 0);
2061   if (rc < 0) {
2062      kmpc_free(buf);
2063      return 0;
2064   }
2065 
2066   char *lw = buf;
2067   char *up = buf + lstsz;
2068 
2069   while (lw < up) {
2070       struct kinfo_vmentry *cur = reinterpret_cast<struct kinfo_vmentry *>(lw);
2071       size_t cursz = cur->kve_structsize;
2072       if (cursz == 0)
2073           break;
2074       void *start = reinterpret_cast<void *>(cur->kve_start);
2075       void *end = reinterpret_cast<void *>(cur->kve_end);
2076       // Readable/Writable addresses within current map entry
2077       if ((addr >= start) && (addr < end)) {
2078           if ((cur->kve_protection & KVME_PROT_READ) != 0 &&
2079               (cur->kve_protection & KVME_PROT_WRITE) != 0) {
2080               found = 1;
2081               break;
2082           }
2083       }
2084       lw += cursz;
2085   }
2086   kmpc_free(buf);
2087 
2088 #elif KMP_OS_DARWIN
2089 
2090   /* On OS X*, /proc pseudo filesystem is not available. Try to read memory
2091      using vm interface. */
2092 
2093   int buffer;
2094   vm_size_t count;
2095   rc = vm_read_overwrite(
2096       mach_task_self(), // Task to read memory of.
2097       (vm_address_t)(addr), // Address to read from.
2098       1, // Number of bytes to be read.
2099       (vm_address_t)(&buffer), // Address of buffer to save read bytes in.
2100       &count // Address of var to save number of read bytes in.
2101       );
2102   if (rc == 0) {
2103     // Memory successfully read.
2104     found = 1;
2105   }
2106 
2107 #elif KMP_OS_NETBSD
2108 
2109   int mib[5];
2110   mib[0] = CTL_VM;
2111   mib[1] = VM_PROC;
2112   mib[2] = VM_PROC_MAP;
2113   mib[3] = getpid();
2114   mib[4] = sizeof(struct kinfo_vmentry);
2115 
2116   size_t size;
2117   rc = sysctl(mib, __arraycount(mib), NULL, &size, NULL, 0);
2118   KMP_ASSERT(!rc);
2119   KMP_ASSERT(size);
2120 
2121   size = size * 4 / 3;
2122   struct kinfo_vmentry *kiv = (struct kinfo_vmentry *)KMP_INTERNAL_MALLOC(size);
2123   KMP_ASSERT(kiv);
2124 
2125   rc = sysctl(mib, __arraycount(mib), kiv, &size, NULL, 0);
2126   KMP_ASSERT(!rc);
2127   KMP_ASSERT(size);
2128 
2129   for (size_t i = 0; i < size; i++) {
2130     if (kiv[i].kve_start >= (uint64_t)addr &&
2131         kiv[i].kve_end <= (uint64_t)addr) {
2132       found = 1;
2133       break;
2134     }
2135   }
2136   KMP_INTERNAL_FREE(kiv);
2137 #elif KMP_OS_OPENBSD
2138 
2139   int mib[3];
2140   mib[0] = CTL_KERN;
2141   mib[1] = KERN_PROC_VMMAP;
2142   mib[2] = getpid();
2143 
2144   size_t size;
2145   uint64_t end;
2146   rc = sysctl(mib, 3, NULL, &size, NULL, 0);
2147   KMP_ASSERT(!rc);
2148   KMP_ASSERT(size);
2149   end = size;
2150 
2151   struct kinfo_vmentry kiv = {.kve_start = 0};
2152 
2153   while ((rc = sysctl(mib, 3, &kiv, &size, NULL, 0)) == 0) {
2154     KMP_ASSERT(size);
2155     if (kiv.kve_end == end)
2156       break;
2157 
2158     if (kiv.kve_start >= (uint64_t)addr && kiv.kve_end <= (uint64_t)addr) {
2159       found = 1;
2160       break;
2161     }
2162     kiv.kve_start += 1;
2163   }
2164 #elif KMP_OS_DRAGONFLY
2165 
2166   // FIXME(DragonFly): Implement this
2167   found = 1;
2168 
2169 #else
2170 
2171 #error "Unknown or unsupported OS"
2172 
2173 #endif
2174 
2175   return found;
2176 
2177 } // __kmp_is_address_mapped
2178 
2179 #ifdef USE_LOAD_BALANCE
2180 
2181 #if KMP_OS_DARWIN || KMP_OS_NETBSD
2182 
2183 // The function returns the rounded value of the system load average
2184 // during given time interval which depends on the value of
2185 // __kmp_load_balance_interval variable (default is 60 sec, other values
2186 // may be 300 sec or 900 sec).
2187 // It returns -1 in case of error.
__kmp_get_load_balance(int max)2188 int __kmp_get_load_balance(int max) {
2189   double averages[3];
2190   int ret_avg = 0;
2191 
2192   int res = getloadavg(averages, 3);
2193 
2194   // Check __kmp_load_balance_interval to determine which of averages to use.
2195   // getloadavg() may return the number of samples less than requested that is
2196   // less than 3.
2197   if (__kmp_load_balance_interval < 180 && (res >= 1)) {
2198     ret_avg = averages[0]; // 1 min
2199   } else if ((__kmp_load_balance_interval >= 180 &&
2200               __kmp_load_balance_interval < 600) &&
2201              (res >= 2)) {
2202     ret_avg = averages[1]; // 5 min
2203   } else if ((__kmp_load_balance_interval >= 600) && (res == 3)) {
2204     ret_avg = averages[2]; // 15 min
2205   } else { // Error occurred
2206     return -1;
2207   }
2208 
2209   return ret_avg;
2210 }
2211 
2212 #else // Linux* OS
2213 
2214 // The function returns number of running (not sleeping) threads, or -1 in case
2215 // of error. Error could be reported if Linux* OS kernel too old (without
2216 // "/proc" support). Counting running threads stops if max running threads
2217 // encountered.
__kmp_get_load_balance(int max)2218 int __kmp_get_load_balance(int max) {
2219   static int permanent_error = 0;
2220   static int glb_running_threads = 0; // Saved count of the running threads for
2221   // the thread balance algorithm
2222   static double glb_call_time = 0; /* Thread balance algorithm call time */
2223 
2224   int running_threads = 0; // Number of running threads in the system.
2225 
2226   DIR *proc_dir = NULL; // Handle of "/proc/" directory.
2227   struct dirent *proc_entry = NULL;
2228 
2229   kmp_str_buf_t task_path; // "/proc/<pid>/task/<tid>/" path.
2230   DIR *task_dir = NULL; // Handle of "/proc/<pid>/task/<tid>/" directory.
2231   struct dirent *task_entry = NULL;
2232   int task_path_fixed_len;
2233 
2234   kmp_str_buf_t stat_path; // "/proc/<pid>/task/<tid>/stat" path.
2235   int stat_file = -1;
2236   int stat_path_fixed_len;
2237 
2238   int total_processes = 0; // Total number of processes in system.
2239   int total_threads = 0; // Total number of threads in system.
2240 
2241   double call_time = 0.0;
2242 
2243   __kmp_str_buf_init(&task_path);
2244   __kmp_str_buf_init(&stat_path);
2245 
2246   __kmp_elapsed(&call_time);
2247 
2248   if (glb_call_time &&
2249       (call_time - glb_call_time < __kmp_load_balance_interval)) {
2250     running_threads = glb_running_threads;
2251     goto finish;
2252   }
2253 
2254   glb_call_time = call_time;
2255 
2256   // Do not spend time on scanning "/proc/" if we have a permanent error.
2257   if (permanent_error) {
2258     running_threads = -1;
2259     goto finish;
2260   }
2261 
2262   if (max <= 0) {
2263     max = INT_MAX;
2264   }
2265 
2266   // Open "/proc/" directory.
2267   proc_dir = opendir("/proc");
2268   if (proc_dir == NULL) {
2269     // Cannot open "/prroc/". Probably the kernel does not support it. Return an
2270     // error now and in subsequent calls.
2271     running_threads = -1;
2272     permanent_error = 1;
2273     goto finish;
2274   }
2275 
2276   // Initialize fixed part of task_path. This part will not change.
2277   __kmp_str_buf_cat(&task_path, "/proc/", 6);
2278   task_path_fixed_len = task_path.used; // Remember number of used characters.
2279 
2280   proc_entry = readdir(proc_dir);
2281   while (proc_entry != NULL) {
2282     // Proc entry is a directory and name starts with a digit. Assume it is a
2283     // process' directory.
2284     if (proc_entry->d_type == DT_DIR && isdigit(proc_entry->d_name[0])) {
2285 
2286       ++total_processes;
2287       // Make sure init process is the very first in "/proc", so we can replace
2288       // strcmp( proc_entry->d_name, "1" ) == 0 with simpler total_processes ==
2289       // 1. We are going to check that total_processes == 1 => d_name == "1" is
2290       // true (where "=>" is implication). Since C++ does not have => operator,
2291       // let us replace it with its equivalent: a => b == ! a || b.
2292       KMP_DEBUG_ASSERT(total_processes != 1 ||
2293                        strcmp(proc_entry->d_name, "1") == 0);
2294 
2295       // Construct task_path.
2296       task_path.used = task_path_fixed_len; // Reset task_path to "/proc/".
2297       __kmp_str_buf_cat(&task_path, proc_entry->d_name,
2298                         KMP_STRLEN(proc_entry->d_name));
2299       __kmp_str_buf_cat(&task_path, "/task", 5);
2300 
2301       task_dir = opendir(task_path.str);
2302       if (task_dir == NULL) {
2303         // Process can finish between reading "/proc/" directory entry and
2304         // opening process' "task/" directory. So, in general case we should not
2305         // complain, but have to skip this process and read the next one. But on
2306         // systems with no "task/" support we will spend lot of time to scan
2307         // "/proc/" tree again and again without any benefit. "init" process
2308         // (its pid is 1) should exist always, so, if we cannot open
2309         // "/proc/1/task/" directory, it means "task/" is not supported by
2310         // kernel. Report an error now and in the future.
2311         if (strcmp(proc_entry->d_name, "1") == 0) {
2312           running_threads = -1;
2313           permanent_error = 1;
2314           goto finish;
2315         }
2316       } else {
2317         // Construct fixed part of stat file path.
2318         __kmp_str_buf_clear(&stat_path);
2319         __kmp_str_buf_cat(&stat_path, task_path.str, task_path.used);
2320         __kmp_str_buf_cat(&stat_path, "/", 1);
2321         stat_path_fixed_len = stat_path.used;
2322 
2323         task_entry = readdir(task_dir);
2324         while (task_entry != NULL) {
2325           // It is a directory and name starts with a digit.
2326           if (proc_entry->d_type == DT_DIR && isdigit(task_entry->d_name[0])) {
2327             ++total_threads;
2328 
2329             // Construct complete stat file path. Easiest way would be:
2330             //  __kmp_str_buf_print( & stat_path, "%s/%s/stat", task_path.str,
2331             //  task_entry->d_name );
2332             // but seriae of __kmp_str_buf_cat works a bit faster.
2333             stat_path.used =
2334                 stat_path_fixed_len; // Reset stat path to its fixed part.
2335             __kmp_str_buf_cat(&stat_path, task_entry->d_name,
2336                               KMP_STRLEN(task_entry->d_name));
2337             __kmp_str_buf_cat(&stat_path, "/stat", 5);
2338 
2339             // Note: Low-level API (open/read/close) is used. High-level API
2340             // (fopen/fclose)  works ~ 30 % slower.
2341             stat_file = open(stat_path.str, O_RDONLY);
2342             if (stat_file == -1) {
2343               // We cannot report an error because task (thread) can terminate
2344               // just before reading this file.
2345             } else {
2346               /* Content of "stat" file looks like:
2347                  24285 (program) S ...
2348 
2349                  It is a single line (if program name does not include funny
2350                  symbols). First number is a thread id, then name of executable
2351                  file name in paretheses, then state of the thread. We need just
2352                  thread state.
2353 
2354                  Good news: Length of program name is 15 characters max. Longer
2355                  names are truncated.
2356 
2357                  Thus, we need rather short buffer: 15 chars for program name +
2358                  2 parenthesis, + 3 spaces + ~7 digits of pid = 37.
2359 
2360                  Bad news: Program name may contain special symbols like space,
2361                  closing parenthesis, or even new line. This makes parsing
2362                  "stat" file not 100 % reliable. In case of fanny program names
2363                  parsing may fail (report incorrect thread state).
2364 
2365                  Parsing "status" file looks more promissing (due to different
2366                  file structure and escaping special symbols) but reading and
2367                  parsing of "status" file works slower.
2368                   -- ln
2369               */
2370               char buffer[65];
2371               int len;
2372               len = read(stat_file, buffer, sizeof(buffer) - 1);
2373               if (len >= 0) {
2374                 buffer[len] = 0;
2375                 // Using scanf:
2376                 //     sscanf( buffer, "%*d (%*s) %c ", & state );
2377                 // looks very nice, but searching for a closing parenthesis
2378                 // works a bit faster.
2379                 char *close_parent = strstr(buffer, ") ");
2380                 if (close_parent != NULL) {
2381                   char state = *(close_parent + 2);
2382                   if (state == 'R') {
2383                     ++running_threads;
2384                     if (running_threads >= max) {
2385                       goto finish;
2386                     }
2387                   }
2388                 }
2389               }
2390               close(stat_file);
2391               stat_file = -1;
2392             }
2393           }
2394           task_entry = readdir(task_dir);
2395         }
2396         closedir(task_dir);
2397         task_dir = NULL;
2398       }
2399     }
2400     proc_entry = readdir(proc_dir);
2401   }
2402 
2403   // There _might_ be a timing hole where the thread executing this
2404   // code get skipped in the load balance, and running_threads is 0.
2405   // Assert in the debug builds only!!!
2406   KMP_DEBUG_ASSERT(running_threads > 0);
2407   if (running_threads <= 0) {
2408     running_threads = 1;
2409   }
2410 
2411 finish: // Clean up and exit.
2412   if (proc_dir != NULL) {
2413     closedir(proc_dir);
2414   }
2415   __kmp_str_buf_free(&task_path);
2416   if (task_dir != NULL) {
2417     closedir(task_dir);
2418   }
2419   __kmp_str_buf_free(&stat_path);
2420   if (stat_file != -1) {
2421     close(stat_file);
2422   }
2423 
2424   glb_running_threads = running_threads;
2425 
2426   return running_threads;
2427 
2428 } // __kmp_get_load_balance
2429 
2430 #endif // KMP_OS_DARWIN
2431 
2432 #endif // USE_LOAD_BALANCE
2433 
2434 #if !(KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC ||                            \
2435       ((KMP_OS_LINUX || KMP_OS_DARWIN) && KMP_ARCH_AARCH64) ||                 \
2436       KMP_ARCH_PPC64 || KMP_ARCH_RISCV64)
2437 
2438 // we really only need the case with 1 argument, because CLANG always build
2439 // a struct of pointers to shared variables referenced in the outlined function
__kmp_invoke_microtask(microtask_t pkfn,int gtid,int tid,int argc,void * p_argv[],void ** exit_frame_ptr)2440 int __kmp_invoke_microtask(microtask_t pkfn, int gtid, int tid, int argc,
2441                            void *p_argv[]
2442 #if OMPT_SUPPORT
2443                            ,
2444                            void **exit_frame_ptr
2445 #endif
2446                            ) {
2447 #if OMPT_SUPPORT
2448   *exit_frame_ptr = OMPT_GET_FRAME_ADDRESS(0);
2449 #endif
2450 
2451   switch (argc) {
2452   default:
2453     fprintf(stderr, "Too many args to microtask: %d!\n", argc);
2454     fflush(stderr);
2455     exit(-1);
2456   case 0:
2457     (*pkfn)(&gtid, &tid);
2458     break;
2459   case 1:
2460     (*pkfn)(&gtid, &tid, p_argv[0]);
2461     break;
2462   case 2:
2463     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1]);
2464     break;
2465   case 3:
2466     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2]);
2467     break;
2468   case 4:
2469     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3]);
2470     break;
2471   case 5:
2472     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4]);
2473     break;
2474   case 6:
2475     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2476             p_argv[5]);
2477     break;
2478   case 7:
2479     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2480             p_argv[5], p_argv[6]);
2481     break;
2482   case 8:
2483     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2484             p_argv[5], p_argv[6], p_argv[7]);
2485     break;
2486   case 9:
2487     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2488             p_argv[5], p_argv[6], p_argv[7], p_argv[8]);
2489     break;
2490   case 10:
2491     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2492             p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9]);
2493     break;
2494   case 11:
2495     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2496             p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10]);
2497     break;
2498   case 12:
2499     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2500             p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2501             p_argv[11]);
2502     break;
2503   case 13:
2504     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2505             p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2506             p_argv[11], p_argv[12]);
2507     break;
2508   case 14:
2509     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2510             p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2511             p_argv[11], p_argv[12], p_argv[13]);
2512     break;
2513   case 15:
2514     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2515             p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2516             p_argv[11], p_argv[12], p_argv[13], p_argv[14]);
2517     break;
2518   }
2519 
2520   return 1;
2521 }
2522 
2523 #endif
2524 
2525 // end of file //
2526