1 /*
2 * kmp_taskdeps.h
3 */
4
5
6 //===----------------------------------------------------------------------===//
7 //
8 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
9 // See https://llvm.org/LICENSE.txt for license information.
10 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
11 //
12 //===----------------------------------------------------------------------===//
13
14
15 #ifndef KMP_TASKDEPS_H
16 #define KMP_TASKDEPS_H
17
18 #include "kmp.h"
19
20 #define KMP_ACQUIRE_DEPNODE(gtid, n) __kmp_acquire_lock(&(n)->dn.lock, (gtid))
21 #define KMP_RELEASE_DEPNODE(gtid, n) __kmp_release_lock(&(n)->dn.lock, (gtid))
22
__kmp_node_deref(kmp_info_t * thread,kmp_depnode_t * node)23 static inline void __kmp_node_deref(kmp_info_t *thread, kmp_depnode_t *node) {
24 if (!node)
25 return;
26
27 kmp_int32 n = KMP_ATOMIC_DEC(&node->dn.nrefs) - 1;
28 if (n == 0) {
29 KMP_ASSERT(node->dn.nrefs == 0);
30 #if USE_FAST_MEMORY
31 __kmp_fast_free(thread, node);
32 #else
33 __kmp_thread_free(thread, node);
34 #endif
35 }
36 }
37
__kmp_depnode_list_free(kmp_info_t * thread,kmp_depnode_list * list)38 static inline void __kmp_depnode_list_free(kmp_info_t *thread,
39 kmp_depnode_list *list) {
40 kmp_depnode_list *next;
41
42 for (; list; list = next) {
43 next = list->next;
44
45 __kmp_node_deref(thread, list->node);
46 #if USE_FAST_MEMORY
47 __kmp_fast_free(thread, list);
48 #else
49 __kmp_thread_free(thread, list);
50 #endif
51 }
52 }
53
__kmp_dephash_free_entries(kmp_info_t * thread,kmp_dephash_t * h)54 static inline void __kmp_dephash_free_entries(kmp_info_t *thread,
55 kmp_dephash_t *h) {
56 for (size_t i = 0; i < h->size; i++) {
57 if (h->buckets[i]) {
58 kmp_dephash_entry_t *next;
59 for (kmp_dephash_entry_t *entry = h->buckets[i]; entry; entry = next) {
60 next = entry->next_in_bucket;
61 __kmp_depnode_list_free(thread, entry->last_ins);
62 __kmp_depnode_list_free(thread, entry->last_mtxs);
63 __kmp_node_deref(thread, entry->last_out);
64 if (entry->mtx_lock) {
65 __kmp_destroy_lock(entry->mtx_lock);
66 __kmp_free(entry->mtx_lock);
67 }
68 #if USE_FAST_MEMORY
69 __kmp_fast_free(thread, entry);
70 #else
71 __kmp_thread_free(thread, entry);
72 #endif
73 }
74 h->buckets[i] = 0;
75 }
76 }
77 }
78
__kmp_dephash_free(kmp_info_t * thread,kmp_dephash_t * h)79 static inline void __kmp_dephash_free(kmp_info_t *thread, kmp_dephash_t *h) {
80 __kmp_dephash_free_entries(thread, h);
81 #if USE_FAST_MEMORY
82 __kmp_fast_free(thread, h);
83 #else
84 __kmp_thread_free(thread, h);
85 #endif
86 }
87
__kmp_release_deps(kmp_int32 gtid,kmp_taskdata_t * task)88 static inline void __kmp_release_deps(kmp_int32 gtid, kmp_taskdata_t *task) {
89 kmp_info_t *thread = __kmp_threads[gtid];
90 kmp_depnode_t *node = task->td_depnode;
91
92 // Check mutexinoutset dependencies, release locks
93 if (UNLIKELY(node && (node->dn.mtx_num_locks < 0))) {
94 // negative num_locks means all locks were acquired
95 node->dn.mtx_num_locks = -node->dn.mtx_num_locks;
96 for (int i = node->dn.mtx_num_locks - 1; i >= 0; --i) {
97 KMP_DEBUG_ASSERT(node->dn.mtx_locks[i] != NULL);
98 __kmp_release_lock(node->dn.mtx_locks[i], gtid);
99 }
100 }
101
102 if (task->td_dephash) {
103 KA_TRACE(
104 40, ("__kmp_release_deps: T#%d freeing dependencies hash of task %p.\n",
105 gtid, task));
106 __kmp_dephash_free(thread, task->td_dephash);
107 task->td_dephash = NULL;
108 }
109
110 if (!node)
111 return;
112
113 KA_TRACE(20, ("__kmp_release_deps: T#%d notifying successors of task %p.\n",
114 gtid, task));
115
116 KMP_ACQUIRE_DEPNODE(gtid, node);
117 node->dn.task =
118 NULL; // mark this task as finished, so no new dependencies are generated
119 KMP_RELEASE_DEPNODE(gtid, node);
120
121 kmp_depnode_list_t *next;
122 for (kmp_depnode_list_t *p = node->dn.successors; p; p = next) {
123 kmp_depnode_t *successor = p->node;
124 kmp_int32 npredecessors = KMP_ATOMIC_DEC(&successor->dn.npredecessors) - 1;
125
126 // successor task can be NULL for wait_depends or because deps are still
127 // being processed
128 if (npredecessors == 0) {
129 KMP_MB();
130 if (successor->dn.task) {
131 KA_TRACE(20, ("__kmp_release_deps: T#%d successor %p of %p scheduled "
132 "for execution.\n",
133 gtid, successor->dn.task, task));
134 __kmp_omp_task(gtid, successor->dn.task, false);
135 }
136 }
137
138 next = p->next;
139 __kmp_node_deref(thread, p->node);
140 #if USE_FAST_MEMORY
141 __kmp_fast_free(thread, p);
142 #else
143 __kmp_thread_free(thread, p);
144 #endif
145 }
146
147 __kmp_node_deref(thread, node);
148
149 KA_TRACE(
150 20,
151 ("__kmp_release_deps: T#%d all successors of %p notified of completion\n",
152 gtid, task));
153 }
154
155 #endif // KMP_TASKDEPS_H
156