Lines Matching refs:gtid
79 int gtid);
88 void __kmp_fork_barrier(int gtid, int tid);
89 void __kmp_join_barrier(int gtid);
99 static int __kmp_unregister_root_other_thread(int gtid);
218 int gtid; in __kmp_get_global_thread_id_reg() local
221 gtid = KMP_GTID_DNE; in __kmp_get_global_thread_id_reg()
226 gtid = __kmp_gtid; in __kmp_get_global_thread_id_reg()
231 gtid = __kmp_gtid_get_specific(); in __kmp_get_global_thread_id_reg()
235 gtid = __kmp_get_global_thread_id(); in __kmp_get_global_thread_id_reg()
239 if (gtid == KMP_GTID_DNE) { in __kmp_get_global_thread_id_reg()
246 gtid = __kmp_gtid_get_specific(); in __kmp_get_global_thread_id_reg()
248 gtid = __kmp_register_root(FALSE); in __kmp_get_global_thread_id_reg()
254 KMP_DEBUG_ASSERT(gtid >= 0); in __kmp_get_global_thread_id_reg()
256 return gtid; in __kmp_get_global_thread_id_reg()
264 int gtid; in __kmp_check_stack_overlap() local
271 gtid = __kmp_gtid_from_thread(th); in __kmp_check_stack_overlap()
273 if (gtid == KMP_GTID_MONITOR) { in __kmp_check_stack_overlap()
275 gtid, stack_beg, stack_end, th->th.th_info.ds.ds_stacksize, in __kmp_check_stack_overlap()
280 gtid, stack_beg, stack_end, th->th.th_info.ds.ds_stacksize, in __kmp_check_stack_overlap()
281 "th_%d stack (%s)", gtid, in __kmp_check_stack_overlap()
288 gtid = __kmp_gtid_from_thread(th); in __kmp_check_stack_overlap()
289 if (__kmp_env_checks == TRUE && !KMP_UBER_GTID(gtid)) { in __kmp_check_stack_overlap()
336 void __kmp_print_storage_map_gtid(int gtid, void *p1, void *p2, size_t size, in __kmp_print_storage_map_gtid() argument
348 if (gtid >= 0) { in __kmp_print_storage_map_gtid()
357 int localProc = __kmp_get_cpu_from_gtid(gtid); in __kmp_print_storage_map_gtid()
364 __kmp_printf_no_lock(" GTID %d localNode %d\n", gtid, in __kmp_print_storage_map_gtid()
367 __kmp_printf_no_lock(" GTID %d\n", gtid); in __kmp_print_storage_map_gtid()
461 static void __kmp_print_thread_storage_map(kmp_info_t *thr, int gtid) { in __kmp_print_thread_storage_map() argument
462 __kmp_print_storage_map_gtid(gtid, thr, thr + 1, sizeof(kmp_info_t), "th_%d", in __kmp_print_thread_storage_map()
463 gtid); in __kmp_print_thread_storage_map()
465 __kmp_print_storage_map_gtid(gtid, &thr->th.th_info, &thr->th.th_team, in __kmp_print_thread_storage_map()
466 sizeof(kmp_desc_t), "th_%d.th_info", gtid); in __kmp_print_thread_storage_map()
468 __kmp_print_storage_map_gtid(gtid, &thr->th.th_local, &thr->th.th_pri_head, in __kmp_print_thread_storage_map()
469 sizeof(kmp_local_t), "th_%d.th_local", gtid); in __kmp_print_thread_storage_map()
472 gtid, &thr->th.th_bar[0], &thr->th.th_bar[bs_last_barrier], in __kmp_print_thread_storage_map()
473 sizeof(kmp_balign_t) * bs_last_barrier, "th_%d.th_bar", gtid); in __kmp_print_thread_storage_map()
475 __kmp_print_storage_map_gtid(gtid, &thr->th.th_bar[bs_plain_barrier], in __kmp_print_thread_storage_map()
478 gtid); in __kmp_print_thread_storage_map()
480 __kmp_print_storage_map_gtid(gtid, &thr->th.th_bar[bs_forkjoin_barrier], in __kmp_print_thread_storage_map()
483 gtid); in __kmp_print_thread_storage_map()
486 __kmp_print_storage_map_gtid(gtid, &thr->th.th_bar[bs_reduction_barrier], in __kmp_print_thread_storage_map()
489 gtid); in __kmp_print_thread_storage_map()
575 int gtid = th->th.th_info.ds.ds_gtid; in __kmp_reset_locks_on_process_detach() local
576 if (gtid == gtid_req) in __kmp_reset_locks_on_process_detach()
578 if (gtid < 0) in __kmp_reset_locks_on_process_detach()
666 int gtid = *gtid_ref; in __kmp_parallel_deo() local
668 kmp_team_t *team = __kmp_team_from_gtid(gtid); in __kmp_parallel_deo()
672 if (__kmp_threads[gtid]->th.th_root->r.r_active) in __kmp_parallel_deo()
674 __kmp_push_sync(gtid, ct_ordered_in_parallel, loc_ref, NULL, 0); in __kmp_parallel_deo()
676 __kmp_push_sync(gtid, ct_ordered_in_parallel, loc_ref, NULL); in __kmp_parallel_deo()
682 KMP_WAIT(&team->t.t_ordered.dt.t_value, __kmp_tid_from_gtid(gtid), KMP_EQ, in __kmp_parallel_deo()
691 int gtid = *gtid_ref; in __kmp_parallel_dxo() local
693 int tid = __kmp_tid_from_gtid(gtid); in __kmp_parallel_dxo()
694 kmp_team_t *team = __kmp_team_from_gtid(gtid); in __kmp_parallel_dxo()
698 if (__kmp_threads[gtid]->th.th_root->r.r_active) in __kmp_parallel_dxo()
699 __kmp_pop_sync(gtid, ct_ordered_in_parallel, loc_ref); in __kmp_parallel_dxo()
717 int __kmp_enter_single(int gtid, ident_t *id_ref, int push_ws) { in __kmp_enter_single() argument
726 th = __kmp_threads[gtid]; in __kmp_enter_single()
747 KMP_MASTER_GTID(gtid) && th->th.th_teams_microtask == NULL && in __kmp_enter_single()
757 __kmp_push_workshare(gtid, ct_psingle, id_ref); in __kmp_enter_single()
759 __kmp_check_workshare(gtid, ct_psingle, id_ref); in __kmp_enter_single()
764 __kmp_itt_single_start(gtid); in __kmp_enter_single()
770 void __kmp_exit_single(int gtid) { in __kmp_exit_single() argument
772 __kmp_itt_single_end(gtid); in __kmp_exit_single()
775 __kmp_pop_workshare(gtid, ct_psingle, NULL); in __kmp_exit_single()
1398 int __kmp_fork_call(ident_t *loc, int gtid, in __kmp_fork_call() argument
1423 KA_TRACE(20, ("__kmp_fork_call: enter T#%d\n", gtid)); in __kmp_fork_call()
1424 if (__kmp_stkpadding > 0 && __kmp_root[gtid] != NULL) { in __kmp_fork_call()
1441 master_th = __kmp_threads[gtid]; // AC: potentially unsafe, not in sync with in __kmp_fork_call()
1460 return_address = OMPT_LOAD_RETURN_ADDRESS(gtid); in __kmp_fork_call()
1515 __kmpc_serialized_parallel(loc, gtid); in __kmp_fork_call()
1532 __ompt_lw_taskteam_init(&lw_taskteam, master_th, gtid, in __kmp_fork_call()
1543 ->thread_num = __kmp_tid_from_gtid(gtid); in __kmp_fork_call()
1563 __kmp_invoke_microtask(microtask, gtid, 0, argc, parent_team->t.t_argv in __kmp_fork_call()
1604 __ompt_lw_taskteam_init(&lw_taskteam, master_th, gtid, in __kmp_fork_call()
1651 root, parent_team, master_th, gtid)); in __kmp_fork_call()
1652 __kmp_internal_fork(loc, gtid, parent_team); in __kmp_fork_call()
1655 root, parent_team, master_th, gtid)); in __kmp_fork_call()
1661 KA_TRACE(20, ("__kmp_fork_call: T#%d(%d:0) invoke microtask = %p\n", gtid, in __kmp_fork_call()
1664 if (!parent_team->t.t_invoke(gtid)) { in __kmp_fork_call()
1667 KA_TRACE(20, ("__kmp_fork_call: T#%d(%d:0) done microtask = %p\n", gtid, in __kmp_fork_call()
1671 KA_TRACE(20, ("__kmp_fork_call: parallel exit T#%d\n", gtid)); in __kmp_fork_call()
1705 gtid, nthreads)); in __kmp_fork_call()
1743 ("__kmp_fork_call: T#%d serializing parallel region\n", gtid)); in __kmp_fork_call()
1745 __kmpc_serialized_parallel(loc, gtid); in __kmp_fork_call()
1763 __ompt_lw_taskteam_init(&lw_taskteam, master_th, gtid, in __kmp_fork_call()
1773 ->thread_num = __kmp_tid_from_gtid(gtid); in __kmp_fork_call()
1791 __kmp_invoke_microtask(microtask, gtid, 0, argc, in __kmp_fork_call()
1841 invoker(gtid); in __kmp_fork_call()
1873 __ompt_lw_taskteam_init(&lw_taskteam, master_th, gtid, in __kmp_fork_call()
1885 implicit_task_data, 1, __kmp_tid_from_gtid(gtid), in __kmp_fork_call()
1888 ->thread_num = __kmp_tid_from_gtid(gtid); in __kmp_fork_call()
1901 __kmp_invoke_microtask(microtask, gtid, 0, argc, args in __kmp_fork_call()
1934 __ompt_lw_taskteam_init(&lwt, master_th, gtid, &ompt_parallel_data, in __kmp_fork_call()
1943 KA_TRACE(20, ("__kmp_fork_call: T#%d serial exit\n", gtid)); in __kmp_fork_call()
1950 KA_TRACE(20, ("__kmp_fork_call: T#%d serial exit\n", gtid)); in __kmp_fork_call()
2135 gtid, parent_team->t.t_id, team->t.t_master_tid, team->t.t_id, in __kmp_fork_call()
2163 __kmp_fork_team_threads(root, team, master_th, gtid); in __kmp_fork_call()
2193 __kmp_itt_region_forking(gtid, team->t.t_nproc, 0); in __kmp_fork_call()
2199 KMP_DEBUG_ASSERT(team == __kmp_threads[gtid]->th.th_team); in __kmp_fork_call()
2203 root, team, master_th, gtid)); in __kmp_fork_call()
2216 __kmp_internal_fork(loc, gtid, team); in __kmp_fork_call()
2219 root, team, master_th, gtid)); in __kmp_fork_call()
2223 KA_TRACE(20, ("__kmp_fork_call: parallel exit T#%d\n", gtid)); in __kmp_fork_call()
2228 KA_TRACE(20, ("__kmp_fork_call: T#%d(%d:0) invoke microtask = %p\n", gtid, in __kmp_fork_call()
2240 if (!team->t.t_invoke(gtid)) { in __kmp_fork_call()
2251 KA_TRACE(20, ("__kmp_fork_call: T#%d(%d:0) done microtask = %p\n", gtid, in __kmp_fork_call()
2255 KA_TRACE(20, ("__kmp_fork_call: parallel exit T#%d\n", gtid)); in __kmp_fork_call()
2275 static inline void __kmp_join_ompt(int gtid, kmp_info_t *thread, in __kmp_join_ompt() argument
2289 void __kmp_join_call(ident_t *loc, int gtid in __kmp_join_call() argument
2303 KA_TRACE(20, ("__kmp_join_call: enter T#%d\n", gtid)); in __kmp_join_call()
2306 master_th = __kmp_threads[gtid]; in __kmp_join_call()
2352 __kmpc_end_serialized_parallel(loc, gtid); in __kmp_join_call()
2368 __kmp_internal_join(loc, gtid, team); in __kmp_join_call()
2395 __kmp_itt_frame_submit(gtid, team->t.t_region_time, in __kmp_join_call()
2400 __kmp_itt_region_joined(gtid); in __kmp_join_call()
2464 __kmp_join_ompt(gtid, master_th, parent_team, &ompt_parallel_data, in __kmp_join_call()
2580 __kmp_join_ompt(gtid, master_th, parent_team, parallel_data, flags, in __kmp_join_call()
2586 KA_TRACE(20, ("__kmp_join_call: exit T#%d\n", gtid)); in __kmp_join_call()
2623 void __kmp_set_num_threads(int new_nth, int gtid) { in __kmp_set_num_threads() argument
2636 thread = __kmp_threads[gtid]; in __kmp_set_num_threads()
2691 void __kmp_set_max_active_levels(int gtid, int max_active_levels) { in __kmp_set_max_active_levels() argument
2696 gtid, max_active_levels)); in __kmp_set_max_active_levels()
2708 gtid, max_active_levels)); in __kmp_set_max_active_levels()
2726 gtid, max_active_levels)); in __kmp_set_max_active_levels()
2728 thread = __kmp_threads[gtid]; in __kmp_set_max_active_levels()
2736 int __kmp_get_max_active_levels(int gtid) { in __kmp_get_max_active_levels() argument
2739 KF_TRACE(10, ("__kmp_get_max_active_levels: thread %d\n", gtid)); in __kmp_get_max_active_levels()
2742 thread = __kmp_threads[gtid]; in __kmp_get_max_active_levels()
2746 gtid, thread->th.th_current_task, in __kmp_get_max_active_levels()
2755 void __kmp_set_schedule(int gtid, kmp_sched_t kind, int chunk) { in __kmp_set_schedule() argument
2761 gtid, (int)kind, chunk)); in __kmp_set_schedule()
2781 thread = __kmp_threads[gtid]; in __kmp_set_schedule()
2812 void __kmp_get_schedule(int gtid, kmp_sched_t *kind, int *chunk) { in __kmp_get_schedule() argument
2816 KF_TRACE(10, ("__kmp_get_schedule: thread %d\n", gtid)); in __kmp_get_schedule()
2819 thread = __kmp_threads[gtid]; in __kmp_get_schedule()
2860 int __kmp_get_ancestor_thread_num(int gtid, int level) { in __kmp_get_ancestor_thread_num() argument
2866 KF_TRACE(10, ("__kmp_get_ancestor_thread_num: thread %d %d\n", gtid, level)); in __kmp_get_ancestor_thread_num()
2874 thr = __kmp_threads[gtid]; in __kmp_get_ancestor_thread_num()
2897 return __kmp_tid_from_gtid(gtid); in __kmp_get_ancestor_thread_num()
2918 int __kmp_get_team_size(int gtid, int level) { in __kmp_get_team_size() argument
2924 KF_TRACE(10, ("__kmp_get_team_size: thread %d %d\n", gtid, level)); in __kmp_get_team_size()
2932 thr = __kmp_threads[gtid]; in __kmp_get_team_size()
3329 int gtid; in __kmp_print_structure() local
3330 for (gtid = 0; gtid < __kmp_threads_capacity; ++gtid) { in __kmp_print_structure()
3331 __kmp_printf("%2d", gtid); in __kmp_print_structure()
3333 __kmp_printf(" %p", __kmp_threads[gtid]); in __kmp_print_structure()
3336 __kmp_printf(" %p", __kmp_root[gtid]); in __kmp_print_structure()
3346 int gtid; in __kmp_print_structure() local
3347 for (gtid = 0; gtid < __kmp_threads_capacity; ++gtid) { in __kmp_print_structure()
3348 kmp_info_t const *thread = __kmp_threads[gtid]; in __kmp_print_structure()
3350 __kmp_printf("GTID %2d %p:\n", gtid, thread); in __kmp_print_structure()
3376 int gtid; in __kmp_print_structure() local
3377 for (gtid = 0; gtid < __kmp_threads_capacity; ++gtid) { in __kmp_print_structure()
3378 kmp_root_t const *root = __kmp_root[gtid]; in __kmp_print_structure()
3380 __kmp_printf("GTID %2d %p:\n", gtid, root); in __kmp_print_structure()
3605 int gtid; in __kmp_register_root() local
3645 for (gtid = (initial_thread ? 0 : 1); TCR_PTR(__kmp_threads[gtid]) != NULL; in __kmp_register_root()
3646 gtid++) in __kmp_register_root()
3649 ("__kmp_register_root: found slot in threads array: T#%d\n", gtid)); in __kmp_register_root()
3650 KMP_ASSERT(gtid < __kmp_threads_capacity); in __kmp_register_root()
3681 if (!(root = __kmp_root[gtid])) { in __kmp_register_root()
3682 root = __kmp_root[gtid] = (kmp_root_t *)__kmp_allocate(sizeof(kmp_root_t)); in __kmp_register_root()
3688 __kmp_stats_thread_ptr = __kmp_stats_list->push_back(gtid); in __kmp_register_root()
3701 __kmp_print_thread_storage_map(root_thread, gtid); in __kmp_register_root()
3703 root_thread->th.th_info.ds.ds_gtid = gtid; in __kmp_register_root()
3709 root_thread->th.th_cons = __kmp_allocate_cons_stack(gtid); in __kmp_register_root()
3738 TCW_SYNC_PTR(__kmp_threads[gtid], root_thread); in __kmp_register_root()
3748 __kmp_initialize_info(root_thread, root->r.r_root_team, 0, gtid); in __kmp_register_root()
3752 __kmp_gtid_set_specific(gtid); in __kmp_register_root()
3755 __kmp_itt_thread_name(gtid); in __kmp_register_root()
3759 __kmp_gtid = gtid; in __kmp_register_root()
3761 __kmp_create_worker(gtid, root_thread, __kmp_stksize); in __kmp_register_root()
3762 KMP_DEBUG_ASSERT(__kmp_gtid_get_specific() == gtid); in __kmp_register_root()
3766 gtid, __kmp_gtid_from_tid(0, root->r.r_hot_team), in __kmp_register_root()
3787 __kmp_affinity_set_init_mask(gtid, TRUE); in __kmp_register_root()
3832 return gtid; in __kmp_register_root()
3864 static int __kmp_reset_root(int gtid, kmp_root_t *root) { in __kmp_reset_root() argument
3951 void __kmp_unregister_root_current_thread(int gtid) { in __kmp_unregister_root_current_thread() argument
3952 KA_TRACE(1, ("__kmp_unregister_root_current_thread: enter T#%d\n", gtid)); in __kmp_unregister_root_current_thread()
3960 gtid)); in __kmp_unregister_root_current_thread()
3964 kmp_root_t *root = __kmp_root[gtid]; in __kmp_unregister_root_current_thread()
3966 KMP_DEBUG_ASSERT(__kmp_threads && __kmp_threads[gtid]); in __kmp_unregister_root_current_thread()
3967 KMP_ASSERT(KMP_UBER_GTID(gtid)); in __kmp_unregister_root_current_thread()
3968 KMP_ASSERT(root == __kmp_threads[gtid]->th.th_root); in __kmp_unregister_root_current_thread()
3973 kmp_info_t *thread = __kmp_threads[gtid]; in __kmp_unregister_root_current_thread()
3986 __kmp_reset_root(gtid, root); in __kmp_unregister_root_current_thread()
3990 ("__kmp_unregister_root_current_thread: T#%d unregistered\n", gtid)); in __kmp_unregister_root_current_thread()
3999 static int __kmp_unregister_root_other_thread(int gtid) { in __kmp_unregister_root_other_thread() argument
4000 kmp_root_t *root = __kmp_root[gtid]; in __kmp_unregister_root_other_thread()
4003 KA_TRACE(1, ("__kmp_unregister_root_other_thread: enter T#%d\n", gtid)); in __kmp_unregister_root_other_thread()
4004 KMP_DEBUG_ASSERT(__kmp_threads && __kmp_threads[gtid]); in __kmp_unregister_root_other_thread()
4005 KMP_ASSERT(KMP_UBER_GTID(gtid)); in __kmp_unregister_root_other_thread()
4006 KMP_ASSERT(root == __kmp_threads[gtid]->th.th_root); in __kmp_unregister_root_other_thread()
4009 r = __kmp_reset_root(gtid, root); in __kmp_unregister_root_other_thread()
4011 ("__kmp_unregister_root_other_thread: T#%d unregistered\n", gtid)); in __kmp_unregister_root_other_thread()
4019 kmp_int32 gtid = __kmp_entry_gtid(); in __kmp_task_info() local
4020 kmp_int32 tid = __kmp_tid_from_gtid(gtid); in __kmp_task_info()
4021 kmp_info_t *this_thr = __kmp_threads[gtid]; in __kmp_task_info()
4028 gtid, tid, this_thr, team, steam, this_thr->th.th_current_task, in __kmp_task_info()
4037 int tid, int gtid) { in __kmp_initialize_info() argument
4077 tid, gtid, this_thr, this_thr->th.th_current_task)); in __kmp_initialize_info()
4083 tid, gtid, this_thr, this_thr->th.th_current_task)); in __kmp_initialize_info()
4097 gtid, this_thr->th.th_pri_common, this_thr->th.th_pri_common + 1, in __kmp_initialize_info()
4098 sizeof(struct common_table), "th_%d.th_pri_common\n", gtid); in __kmp_initialize_info()
4137 KD_TRACE(10, ("__kmp_initialize_info: T#%d max_nproc: %d\n", gtid, in __kmp_initialize_info()
4151 gtid, &dispatch->th_disp_buffer[0], in __kmp_initialize_info()
4157 gtid, team->t.t_id, gtid); in __kmp_initialize_info()
5583 int gtid; in __kmp_free_thread() local
5642 gtid = this_th->th.th_info.ds.ds_gtid; in __kmp_free_thread()
5645 if (__kmp_thread_pool_insert_pt->th.th_info.ds.ds_gtid > gtid) { in __kmp_free_thread()
5660 for (; (*scan != NULL) && ((*scan)->th.th_info.ds.ds_gtid < gtid); in __kmp_free_thread()
5704 int gtid = this_thr->th.th_info.ds.ds_gtid; in __kmp_launch_thread() local
5709 KA_TRACE(10, ("__kmp_launch_thread: T#%d start\n", gtid)); in __kmp_launch_thread()
5712 this_thr->th.th_cons = __kmp_allocate_cons_stack(gtid); // ATT: Memory leak? in __kmp_launch_thread()
5735 KMP_DEBUG_ASSERT(this_thr == __kmp_threads[gtid]); in __kmp_launch_thread()
5739 KA_TRACE(20, ("__kmp_launch_thread: T#%d waiting for work\n", gtid)); in __kmp_launch_thread()
5742 __kmp_fork_barrier(gtid, KMP_GTID_DNE); in __kmp_launch_thread()
5759 gtid, (*pteam)->t.t_id, __kmp_tid_from_gtid(gtid), in __kmp_launch_thread()
5770 rc = (*pteam)->t.t_invoke(gtid); in __kmp_launch_thread()
5775 gtid, (*pteam)->t.t_id, __kmp_tid_from_gtid(gtid), in __kmp_launch_thread()
5787 __kmp_join_barrier(gtid); in __kmp_launch_thread()
5800 __kmp_common_destroy_gtid(gtid); in __kmp_launch_thread()
5802 KA_TRACE(10, ("__kmp_launch_thread: T#%d done\n", gtid)); in __kmp_launch_thread()
5816 int gtid = (kmp_intptr_t)specific_gtid - 1; in __kmp_internal_end_dest() local
5821 KA_TRACE(30, ("__kmp_internal_end_dest: T#%d\n", gtid)); in __kmp_internal_end_dest()
5825 __kmp_internal_end_thread(gtid); in __kmp_internal_end_dest()
5872 int gtid; in __kmp_reap_thread() local
5876 gtid = thread->th.th_info.ds.ds_gtid; in __kmp_reap_thread()
5883 gtid)); in __kmp_reap_thread()
5921 KMP_DEBUG_ASSERT(__kmp_threads[gtid] == thread); in __kmp_reap_thread()
5922 TCW_SYNC_PTR(__kmp_threads[gtid], NULL); in __kmp_reap_thread()
6134 int gtid = (gtid_req >= 0) ? gtid_req : __kmp_gtid_get_specific(); in __kmp_internal_end_library() local
6136 10, ("__kmp_internal_end_library: enter T#%d (%d)\n", gtid, gtid_req)); in __kmp_internal_end_library()
6137 if (gtid == KMP_GTID_SHUTDOWN) { in __kmp_internal_end_library()
6141 } else if (gtid == KMP_GTID_MONITOR) { in __kmp_internal_end_library()
6145 } else if (gtid == KMP_GTID_DNE) { in __kmp_internal_end_library()
6149 } else if (KMP_UBER_GTID(gtid)) { in __kmp_internal_end_library()
6151 if (__kmp_root[gtid]->r.r_active) { in __kmp_internal_end_library()
6157 gtid)); in __kmp_internal_end_library()
6162 ("__kmp_internal_end_library: unregistering sibling T#%d\n", gtid)); in __kmp_internal_end_library()
6163 __kmp_unregister_root_current_thread(gtid); in __kmp_internal_end_library()
6247 int gtid = (gtid_req >= 0) ? gtid_req : __kmp_gtid_get_specific(); in __kmp_internal_end_thread() local
6249 ("__kmp_internal_end_thread: enter T#%d (%d)\n", gtid, gtid_req)); in __kmp_internal_end_thread()
6250 if (gtid == KMP_GTID_SHUTDOWN) { in __kmp_internal_end_thread()
6254 } else if (gtid == KMP_GTID_MONITOR) { in __kmp_internal_end_thread()
6258 } else if (gtid == KMP_GTID_DNE) { in __kmp_internal_end_thread()
6263 } else if (KMP_UBER_GTID(gtid)) { in __kmp_internal_end_thread()
6265 if (__kmp_root[gtid]->r.r_active) { in __kmp_internal_end_thread()
6270 gtid)); in __kmp_internal_end_thread()
6274 gtid)); in __kmp_internal_end_thread()
6275 __kmp_unregister_root_current_thread(gtid); in __kmp_internal_end_thread()
6279 KA_TRACE(10, ("__kmp_internal_end_thread: worker thread T#%d\n", gtid)); in __kmp_internal_end_thread()
6281 if (gtid >= 0) { in __kmp_internal_end_thread()
6282 __kmp_threads[gtid]->th.th_task_team = NULL; in __kmp_internal_end_thread()
6287 gtid)); in __kmp_internal_end_thread()
6630 int i, gtid; in __kmp_do_serial_initialize() local
6854 gtid = __kmp_register_root(TRUE); in __kmp_do_serial_initialize()
6855 KA_TRACE(10, ("__kmp_do_serial_initialize T#%d\n", gtid)); in __kmp_do_serial_initialize()
6856 KMP_ASSERT(KMP_UBER_GTID(gtid)); in __kmp_do_serial_initialize()
6857 KMP_ASSERT(KMP_INITIAL_GTID(gtid)); in __kmp_do_serial_initialize()
7047 int gtid = __kmp_entry_gtid(); // this might be a new root in __kmp_parallel_initialize() local
7076 KMP_ASSERT(KMP_UBER_GTID(gtid)); in __kmp_parallel_initialize()
7120 void __kmp_run_before_invoked_task(int gtid, int tid, kmp_info_t *this_thr, in __kmp_run_before_invoked_task() argument
7140 __kmp_push_parallel(gtid, team->t.t_ident); in __kmp_run_before_invoked_task()
7145 void __kmp_run_after_invoked_task(int gtid, int tid, kmp_info_t *this_thr, in __kmp_run_after_invoked_task() argument
7148 __kmp_pop_parallel(gtid, team->t.t_ident); in __kmp_run_after_invoked_task()
7153 int __kmp_invoke_task_func(int gtid) { in __kmp_invoke_task_func() argument
7155 int tid = __kmp_tid_from_gtid(gtid); in __kmp_invoke_task_func()
7156 kmp_info_t *this_thr = __kmp_threads[gtid]; in __kmp_invoke_task_func()
7159 __kmp_run_before_invoked_task(gtid, tid, this_thr, team); in __kmp_invoke_task_func()
7192 __kmp_tid_from_gtid(gtid), ompt_task_implicit); in __kmp_invoke_task_func()
7193 OMPT_CUR_TASK_INFO(this_thr)->thread_num = __kmp_tid_from_gtid(gtid); in __kmp_invoke_task_func()
7207 rc = __kmp_invoke_microtask((microtask_t)TCR_SYNC_PTR(team->t.t_pkfn), gtid, in __kmp_invoke_task_func()
7233 __kmp_run_after_invoked_task(gtid, tid, this_thr, team); in __kmp_invoke_task_func()
7238 void __kmp_teams_master(int gtid) { in __kmp_teams_master() argument
7240 kmp_info_t *thr = __kmp_threads[gtid]; in __kmp_teams_master()
7246 KA_TRACE(20, ("__kmp_teams_master: T#%d, Tid %d, microtask %p\n", gtid, in __kmp_teams_master()
7247 __kmp_tid_from_gtid(gtid), thr->th.th_teams_microtask)); in __kmp_teams_master()
7266 __kmp_fork_call(loc, gtid, fork_context_intel, team->t.t_argc, in __kmp_teams_master()
7277 __kmp_join_call(loc, gtid in __kmp_teams_master()
7286 int __kmp_invoke_teams_master(int gtid) { in __kmp_invoke_teams_master() argument
7287 kmp_info_t *this_thr = __kmp_threads[gtid]; in __kmp_invoke_teams_master()
7290 if (!__kmp_threads[gtid]->th.th_team->t.t_serialized) in __kmp_invoke_teams_master()
7291 KMP_DEBUG_ASSERT((void *)__kmp_threads[gtid]->th.th_team->t.t_pkfn == in __kmp_invoke_teams_master()
7294 __kmp_run_before_invoked_task(gtid, 0, this_thr, team); in __kmp_invoke_teams_master()
7296 int tid = __kmp_tid_from_gtid(gtid); in __kmp_invoke_teams_master()
7307 __kmp_teams_master(gtid); in __kmp_invoke_teams_master()
7311 __kmp_run_after_invoked_task(gtid, 0, this_thr, team); in __kmp_invoke_teams_master()
7320 void __kmp_push_num_threads(ident_t *id, int gtid, int num_threads) { in __kmp_push_num_threads() argument
7321 kmp_info_t *thr = __kmp_threads[gtid]; in __kmp_push_num_threads()
7329 void __kmp_push_num_teams(ident_t *id, int gtid, int num_teams, in __kmp_push_num_teams() argument
7331 kmp_info_t *thr = __kmp_threads[gtid]; in __kmp_push_num_teams()
7392 void __kmp_push_proc_bind(ident_t *id, int gtid, kmp_proc_bind_t proc_bind) { in __kmp_push_proc_bind() argument
7393 kmp_info_t *thr = __kmp_threads[gtid]; in __kmp_push_proc_bind()
7399 void __kmp_internal_fork(ident_t *id, int gtid, kmp_team_t *team) { in __kmp_internal_fork() argument
7400 kmp_info_t *this_thr = __kmp_threads[gtid]; in __kmp_internal_fork()
7408 KMP_ASSERT(KMP_MASTER_GTID(gtid)); in __kmp_internal_fork()
7439 __kmp_fork_barrier(gtid, 0); in __kmp_internal_fork()
7442 void __kmp_internal_join(ident_t *id, int gtid, kmp_team_t *team) { in __kmp_internal_join() argument
7443 kmp_info_t *this_thr = __kmp_threads[gtid]; in __kmp_internal_join()
7447 KMP_ASSERT(KMP_MASTER_GTID(gtid)); in __kmp_internal_join()
7453 if (__kmp_threads[gtid] && in __kmp_internal_join()
7454 __kmp_threads[gtid]->th.th_team_nproc != team->t.t_nproc) { in __kmp_internal_join()
7455 __kmp_printf("GTID: %d, __kmp_threads[%d]=%p\n", gtid, gtid, in __kmp_internal_join()
7456 __kmp_threads[gtid]); in __kmp_internal_join()
7459 gtid, __kmp_threads[gtid]->th.th_team_nproc, team, in __kmp_internal_join()
7463 KMP_DEBUG_ASSERT(__kmp_threads[gtid] && in __kmp_internal_join()
7464 __kmp_threads[gtid]->th.th_team_nproc == team->t.t_nproc); in __kmp_internal_join()
7467 __kmp_join_barrier(gtid); /* wait for everyone */ in __kmp_internal_join()
7722 int gtid; in __kmp_internal_begin() local
7727 gtid = __kmp_entry_gtid(); in __kmp_internal_begin()
7728 root = __kmp_threads[gtid]->th.th_root; in __kmp_internal_begin()
7729 KMP_ASSERT(KMP_UBER_GTID(gtid)); in __kmp_internal_begin()
7733 __kmp_acquire_lock(&root->r.r_begin_lock, gtid); in __kmp_internal_begin()
7735 __kmp_release_lock(&root->r.r_begin_lock, gtid); in __kmp_internal_begin()
7741 __kmp_release_lock(&root->r.r_begin_lock, gtid); in __kmp_internal_begin()
7747 int gtid; in __kmp_user_set_library() local
7753 gtid = __kmp_entry_gtid(); in __kmp_user_set_library()
7754 thread = __kmp_threads[gtid]; in __kmp_user_set_library()
7758 KA_TRACE(20, ("__kmp_user_set_library: enter T#%d, arg: %d, %d\n", gtid, arg, in __kmp_user_set_library()
7951 static int __kmp_aux_capture_affinity_field(int gtid, const kmp_info_t *th, in __kmp_aux_capture_affinity_field() argument
7961 KMP_DEBUG_ASSERT(gtid >= 0); in __kmp_aux_capture_affinity_field()
8065 rc = __kmp_str_buf_print(field_buffer, format, __kmp_tid_from_gtid(gtid)); in __kmp_aux_capture_affinity_field()
8084 __kmp_get_ancestor_thread_num(gtid, th->th.th_team->t.t_level - 1); in __kmp_aux_capture_affinity_field()
8120 size_t __kmp_aux_capture_affinity(int gtid, const char *format, in __kmp_aux_capture_affinity() argument
8128 KMP_DEBUG_ASSERT(gtid >= 0); in __kmp_aux_capture_affinity()
8133 th = __kmp_threads[gtid]; in __kmp_aux_capture_affinity()
8148 int rc = __kmp_aux_capture_affinity_field(gtid, th, &parse_ptr, &field); in __kmp_aux_capture_affinity()
8163 void __kmp_aux_display_affinity(int gtid, const char *format) { in __kmp_aux_display_affinity() argument
8166 __kmp_aux_capture_affinity(gtid, format, &buf); in __kmp_aux_display_affinity()
8403 for (int gtid = 1; gtid < __kmp_threads_capacity; ++gtid) { in __kmp_resume_if_soft_paused() local
8404 kmp_info_t *thread = __kmp_threads[gtid]; in __kmp_resume_if_soft_paused()
8409 fl.resume(gtid); in __kmp_resume_if_soft_paused()
8415 fl.resume(gtid); in __kmp_resume_if_soft_paused()