• Home
  • Raw
  • Download

Lines Matching refs:gtid

99   kmp_int32 gtid = __kmp_entry_gtid();  in __kmpc_global_thread_num()  local
101 KC_TRACE(10, ("__kmpc_global_thread_num: T#%d\n", gtid)); in __kmpc_global_thread_num()
103 return gtid; in __kmpc_global_thread_num()
262 int gtid = __kmp_entry_gtid(); in __kmpc_fork_call() local
289 kmp_info_t *master_th = __kmp_threads[gtid]; in __kmpc_fork_call()
295 int tid = __kmp_tid_from_gtid(gtid); in __kmpc_fork_call()
301 OMPT_STORE_RETURN_ADDRESS(gtid); in __kmpc_fork_call()
307 __kmp_fork_call(loc, gtid, fork_context_intel, argc, in __kmpc_fork_call()
314 __kmp_join_call(loc, gtid in __kmpc_fork_call()
366 int gtid = __kmp_entry_gtid(); in __kmpc_fork_teams() local
367 kmp_info_t *this_thr = __kmp_threads[gtid]; in __kmpc_fork_teams()
388 int tid = __kmp_tid_from_gtid(gtid); in __kmpc_fork_teams()
393 OMPT_STORE_RETURN_ADDRESS(gtid); in __kmpc_fork_teams()
399 __kmp_push_num_teams(loc, gtid, 0, 0); in __kmpc_fork_teams()
406 loc, gtid, fork_context_intel, argc, in __kmpc_fork_teams()
409 __kmp_join_call(loc, gtid in __kmpc_fork_teams()
451 int __kmpc_invoke_task_func(int gtid) { return __kmp_invoke_task_func(gtid); } in __kmpc_invoke_task_func() argument
830 void __kmpc_ordered(ident_t *loc, kmp_int32 gtid) { in __kmpc_ordered() argument
835 KC_TRACE(10, ("__kmpc_ordered: called T#%d\n", gtid)); in __kmpc_ordered()
836 __kmp_assert_valid_gtid(gtid); in __kmpc_ordered()
844 __kmp_itt_ordered_prep(gtid); in __kmpc_ordered()
848 th = __kmp_threads[gtid]; in __kmpc_ordered()
854 OMPT_STORE_RETURN_ADDRESS(gtid); in __kmpc_ordered()
856 team = __kmp_team_from_gtid(gtid); in __kmpc_ordered()
863 codeptr_ra = OMPT_LOAD_RETURN_ADDRESS(gtid); in __kmpc_ordered()
873 (*th->th.th_dispatch->th_deo_fcn)(&gtid, &cid, loc); in __kmpc_ordered()
875 __kmp_parallel_deo(&gtid, &cid, loc); in __kmpc_ordered()
892 __kmp_itt_ordered_start(gtid); in __kmpc_ordered()
903 void __kmpc_end_ordered(ident_t *loc, kmp_int32 gtid) { in __kmpc_end_ordered() argument
907 KC_TRACE(10, ("__kmpc_end_ordered: called T#%d\n", gtid)); in __kmpc_end_ordered()
908 __kmp_assert_valid_gtid(gtid); in __kmpc_end_ordered()
911 __kmp_itt_ordered_end(gtid); in __kmpc_end_ordered()
915 th = __kmp_threads[gtid]; in __kmpc_end_ordered()
918 (*th->th.th_dispatch->th_dxo_fcn)(&gtid, &cid, loc); in __kmpc_end_ordered()
920 __kmp_parallel_dxo(&gtid, &cid, loc); in __kmpc_end_ordered()
923 OMPT_STORE_RETURN_ADDRESS(gtid); in __kmpc_end_ordered()
927 (ompt_wait_id_t)(uintptr_t)&__kmp_team_from_gtid(gtid) in __kmpc_end_ordered()
929 OMPT_LOAD_RETURN_ADDRESS(gtid)); in __kmpc_end_ordered()
938 kmp_int32 gtid, kmp_indirect_locktag_t tag) { in __kmp_init_indirect_csptr() argument
944 kmp_indirect_lock_t *ilk = __kmp_allocate_indirect_lock(&idx, gtid, tag); in __kmp_init_indirect_csptr()
966 #define KMP_ACQUIRE_TAS_LOCK(lock, gtid) \ argument
970 kmp_int32 tas_busy = KMP_LOCK_BUSY(gtid + 1, tas); \
993 #define KMP_TEST_TAS_LOCK(lock, gtid, rc) \ argument
997 kmp_int32 tas_busy = KMP_LOCK_BUSY(gtid + 1, tas); \
1003 #define KMP_RELEASE_TAS_LOCK(lock, gtid) \ argument
1018 #define KMP_ACQUIRE_FUTEX_LOCK(lock, gtid) \ argument
1021 kmp_int32 gtid_code = (gtid + 1) << 1; \
1048 #define KMP_TEST_FUTEX_LOCK(lock, gtid, rc) \ argument
1052 KMP_LOCK_BUSY(gtid + 1 << 1, futex))) { \
1061 #define KMP_RELEASE_FUTEX_LOCK(lock, gtid) \ argument
1082 kmp_int32 gtid) { in __kmp_get_critical_section_ptr() argument
1093 lck = __kmp_user_lock_allocate(&idx, gtid, kmp_lf_critical_section); in __kmp_get_critical_section_ptr()
1118 __kmp_user_lock_free(&idx, gtid, lck); in __kmp_get_critical_section_ptr()
1187 OMPT_STORE_RETURN_ADDRESS(gtid); in __kmpc_critical()
1197 codeptr_ra = OMPT_LOAD_RETURN_ADDRESS(gtid); in __kmpc_critical()
1901 int gtid; in ompc_display_affinity() local
1905 gtid = __kmp_get_gtid(); in ompc_display_affinity()
1906 __kmp_aux_display_affinity(gtid, format); in ompc_display_affinity()
1911 int gtid; in ompc_capture_affinity() local
1917 gtid = __kmp_get_gtid(); in ompc_capture_affinity()
1919 num_required = __kmp_aux_capture_affinity(gtid, format, &capture_buf); in ompc_capture_affinity()
1939 int gtid, tid; in kmpc_set_blocktime() local
1942 gtid = __kmp_entry_gtid(); in kmpc_set_blocktime()
1943 tid = __kmp_tid_from_gtid(gtid); in kmpc_set_blocktime()
1944 thread = __kmp_thread_from_gtid(gtid); in kmpc_set_blocktime()
2044 void __kmpc_copyprivate(ident_t *loc, kmp_int32 gtid, size_t cpy_size, in __kmpc_copyprivate() argument
2048 KC_TRACE(10, ("__kmpc_copyprivate: called T#%d\n", gtid)); in __kmpc_copyprivate()
2049 __kmp_assert_valid_gtid(gtid); in __kmpc_copyprivate()
2053 data_ptr = &__kmp_team_from_gtid(gtid)->t.t_copypriv_data; in __kmpc_copyprivate()
2073 OMPT_STORE_RETURN_ADDRESS(gtid); in __kmpc_copyprivate()
2077 __kmp_threads[gtid]->th.th_ident = loc; in __kmpc_copyprivate()
2079 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL); in __kmpc_copyprivate()
2088 OMPT_STORE_RETURN_ADDRESS(gtid); in __kmpc_copyprivate()
2091 __kmp_threads[gtid]->th.th_ident = loc; // TODO: check if it is needed (e.g. in __kmpc_copyprivate()
2094 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL); in __kmpc_copyprivate()
2179 void __kmpc_init_lock_with_hint(ident_t *loc, kmp_int32 gtid, void **user_lock, in __kmpc_init_lock_with_hint() argument
2190 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid); in __kmpc_init_lock_with_hint()
2203 void __kmpc_init_nest_lock_with_hint(ident_t *loc, kmp_int32 gtid, in __kmpc_init_nest_lock_with_hint() argument
2214 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid); in __kmpc_init_nest_lock_with_hint()
2229 void __kmpc_init_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { in __kmpc_init_lock() argument
2240 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid); in __kmpc_init_lock()
2276 lck = __kmp_user_lock_allocate(user_lock, gtid, 0); in __kmpc_init_lock()
2283 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid); in __kmpc_init_lock()
2301 void __kmpc_init_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { in __kmpc_init_nest_lock() argument
2312 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid); in __kmpc_init_nest_lock()
2350 lck = __kmp_user_lock_allocate(user_lock, gtid, 0); in __kmpc_init_nest_lock()
2358 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid); in __kmpc_init_nest_lock()
2375 void __kmpc_destroy_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { in __kmpc_destroy_lock() argument
2389 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid); in __kmpc_destroy_lock()
2423 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid); in __kmpc_destroy_lock()
2448 __kmp_user_lock_free(user_lock, gtid, lck); in __kmpc_destroy_lock()
2454 void __kmpc_destroy_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { in __kmpc_destroy_nest_lock() argument
2463 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid); in __kmpc_destroy_nest_lock()
2495 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid); in __kmpc_destroy_nest_lock()
2523 __kmp_user_lock_free(user_lock, gtid, lck); in __kmpc_destroy_nest_lock()
2528 void __kmpc_set_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { in __kmpc_set_lock() argument
2539 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid); in __kmpc_set_lock()
2551 KMP_ACQUIRE_TAS_LOCK(user_lock, gtid); in __kmpc_set_lock()
2555 KMP_ACQUIRE_FUTEX_LOCK(user_lock, gtid); in __kmpc_set_lock()
2559 __kmp_direct_set[tag]((kmp_dyna_lock_t *)user_lock, gtid); in __kmpc_set_lock()
2594 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid); in __kmpc_set_lock()
2604 ACQUIRE_LOCK(lck, gtid); in __kmpc_set_lock()
2620 void __kmpc_set_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { in __kmpc_set_nest_lock() argument
2628 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid); in __kmpc_set_nest_lock()
2641 KMP_D_LOCK_FUNC(user_lock, set)((kmp_dyna_lock_t *)user_lock, gtid); in __kmpc_set_nest_lock()
2691 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid); in __kmpc_set_nest_lock()
2704 ACQUIRE_NESTED_LOCK(lck, gtid, &acquire_status); in __kmpc_set_nest_lock()
2731 void __kmpc_unset_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { in __kmpc_unset_lock() argument
2740 KMP_RELEASE_TAS_LOCK(user_lock, gtid); in __kmpc_unset_lock()
2744 KMP_RELEASE_FUTEX_LOCK(user_lock, gtid); in __kmpc_unset_lock()
2748 __kmp_direct_unset[tag]((kmp_dyna_lock_t *)user_lock, gtid); in __kmpc_unset_lock()
2753 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid); in __kmpc_unset_lock()
2782 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid); in __kmpc_unset_lock()
2810 RELEASE_LOCK(lck, gtid); in __kmpc_unset_lock()
2814 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid); in __kmpc_unset_lock()
2827 void __kmpc_unset_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { in __kmpc_unset_nest_lock() argument
2834 KMP_D_LOCK_FUNC(user_lock, unset)((kmp_dyna_lock_t *)user_lock, gtid); in __kmpc_unset_nest_lock()
2839 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid); in __kmpc_unset_nest_lock()
2889 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid); in __kmpc_unset_nest_lock()
2928 release_status = RELEASE_NESTED_LOCK(lck, gtid); in __kmpc_unset_nest_lock()
2931 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid); in __kmpc_unset_nest_lock()
2953 int __kmpc_test_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { in __kmpc_test_lock() argument
2964 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid); in __kmpc_test_lock()
2976 KMP_TEST_TAS_LOCK(user_lock, gtid, rc); in __kmpc_test_lock()
2980 KMP_TEST_FUTEX_LOCK(user_lock, gtid, rc); in __kmpc_test_lock()
2984 rc = __kmp_direct_test[tag]((kmp_dyna_lock_t *)user_lock, gtid); in __kmpc_test_lock()
3028 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid); in __kmpc_test_lock()
3038 rc = TEST_LOCK(lck, gtid); in __kmpc_test_lock()
3061 int __kmpc_test_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) { in __kmpc_test_nest_lock() argument
3069 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid); in __kmpc_test_nest_lock()
3079 rc = KMP_D_LOCK_FUNC(user_lock, test)((kmp_dyna_lock_t *)user_lock, gtid); in __kmpc_test_nest_lock()
3134 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid); in __kmpc_test_nest_lock()
3146 rc = TEST_NESTED_LOCK(lck, gtid); in __kmpc_test_nest_lock()
3185 #define __KMP_SET_REDUCTION_METHOD(gtid, rmethod) \ argument
3186 ((__kmp_threads[(gtid)]->th.th_local.packed_reduction_method) = (rmethod))
3188 #define __KMP_GET_REDUCTION_METHOD(gtid) \ argument
3189 (__kmp_threads[(gtid)]->th.th_local.packed_reduction_method)
3828 kmp_int32 gtid; in __kmpc_get_taskid() local
3831 gtid = __kmp_get_gtid(); in __kmpc_get_taskid()
3832 if (gtid < 0) { in __kmpc_get_taskid()
3835 thread = __kmp_thread_from_gtid(gtid); in __kmpc_get_taskid()
3842 kmp_int32 gtid; in __kmpc_get_parent_taskid() local
3846 gtid = __kmp_get_gtid(); in __kmpc_get_parent_taskid()
3847 if (gtid < 0) { in __kmpc_get_parent_taskid()
3850 thread = __kmp_thread_from_gtid(gtid); in __kmpc_get_parent_taskid()
3867 void __kmpc_doacross_init(ident_t *loc, int gtid, int num_dims, in __kmpc_doacross_init() argument
3869 __kmp_assert_valid_gtid(gtid); in __kmpc_doacross_init()
3872 kmp_info_t *th = __kmp_threads[gtid]; in __kmpc_doacross_init()
3881 gtid, num_dims, !team->t.t_serialized)); in __kmpc_doacross_init()
3985 KA_TRACE(20, ("__kmpc_doacross_init() exit: T#%d\n", gtid)); in __kmpc_doacross_init()
3988 void __kmpc_doacross_wait(ident_t *loc, int gtid, const kmp_int64 *vec) { in __kmpc_doacross_wait() argument
3989 __kmp_assert_valid_gtid(gtid); in __kmpc_doacross_wait()
3993 kmp_info_t *th = __kmp_threads[gtid]; in __kmpc_doacross_wait()
3998 KA_TRACE(20, ("__kmpc_doacross_wait() enter: called T#%d\n", gtid)); in __kmpc_doacross_wait()
4018 gtid, vec[0], lo, up)); in __kmpc_doacross_wait()
4026 gtid, vec[0], lo, up)); in __kmpc_doacross_wait()
4034 gtid, vec[0], lo, up)); in __kmpc_doacross_wait()
4054 gtid, vec[i], lo, up)); in __kmpc_doacross_wait()
4062 gtid, vec[i], lo, up)); in __kmpc_doacross_wait()
4070 gtid, vec[i], lo, up)); in __kmpc_doacross_wait()
4096 gtid, (iter_number << 5) + shft)); in __kmpc_doacross_wait()
4099 void __kmpc_doacross_post(ident_t *loc, int gtid, const kmp_int64 *vec) { in __kmpc_doacross_post() argument
4100 __kmp_assert_valid_gtid(gtid); in __kmpc_doacross_post()
4104 kmp_info_t *th = __kmp_threads[gtid]; in __kmpc_doacross_post()
4109 KA_TRACE(20, ("__kmpc_doacross_post() enter: called T#%d\n", gtid)); in __kmpc_doacross_post()
4167 KA_TRACE(20, ("__kmpc_doacross_post() exit: T#%d iter %lld posted\n", gtid, in __kmpc_doacross_post()
4171 void __kmpc_doacross_fini(ident_t *loc, int gtid) { in __kmpc_doacross_fini() argument
4172 __kmp_assert_valid_gtid(gtid); in __kmpc_doacross_fini()
4174 kmp_info_t *th = __kmp_threads[gtid]; in __kmpc_doacross_fini()
4178 KA_TRACE(20, ("__kmpc_doacross_fini() enter: called T#%d\n", gtid)); in __kmpc_doacross_fini()
4203 KA_TRACE(20, ("__kmpc_doacross_fini() exit: T#%d\n", gtid)); in __kmpc_doacross_fini()