1 /*
2 * kmp_lock.cpp -- lock-related functions
3 */
4
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include <stddef.h>
14 #include <atomic>
15
16 #include "kmp.h"
17 #include "kmp_i18n.h"
18 #include "kmp_io.h"
19 #include "kmp_itt.h"
20 #include "kmp_lock.h"
21 #include "kmp_wait_release.h"
22 #include "kmp_wrapper_getpid.h"
23
24 #include "tsan_annotations.h"
25
26 #if KMP_USE_FUTEX
27 #include <sys/syscall.h>
28 #include <unistd.h>
29 // We should really include <futex.h>, but that causes compatibility problems on
30 // different Linux* OS distributions that either require that you include (or
31 // break when you try to include) <pci/types.h>. Since all we need is the two
32 // macros below (which are part of the kernel ABI, so can't change) we just
33 // define the constants here and don't include <futex.h>
34 #ifndef FUTEX_WAIT
35 #define FUTEX_WAIT 0
36 #endif
37 #ifndef FUTEX_WAKE
38 #define FUTEX_WAKE 1
39 #endif
40 #endif
41
42 /* Implement spin locks for internal library use. */
43 /* The algorithm implemented is Lamport's bakery lock [1974]. */
44
__kmp_validate_locks(void)45 void __kmp_validate_locks(void) {
46 int i;
47 kmp_uint32 x, y;
48
49 /* Check to make sure unsigned arithmetic does wraps properly */
50 x = ~((kmp_uint32)0) - 2;
51 y = x - 2;
52
53 for (i = 0; i < 8; ++i, ++x, ++y) {
54 kmp_uint32 z = (x - y);
55 KMP_ASSERT(z == 2);
56 }
57
58 KMP_ASSERT(offsetof(kmp_base_queuing_lock, tail_id) % 8 == 0);
59 }
60
61 /* ------------------------------------------------------------------------ */
62 /* test and set locks */
63
64 // For the non-nested locks, we can only assume that the first 4 bytes were
65 // allocated, since gcc only allocates 4 bytes for omp_lock_t, and the Intel
66 // compiler only allocates a 4 byte pointer on IA-32 architecture. On
67 // Windows* OS on Intel(R) 64, we can assume that all 8 bytes were allocated.
68 //
69 // gcc reserves >= 8 bytes for nested locks, so we can assume that the
70 // entire 8 bytes were allocated for nested locks on all 64-bit platforms.
71
__kmp_get_tas_lock_owner(kmp_tas_lock_t * lck)72 static kmp_int32 __kmp_get_tas_lock_owner(kmp_tas_lock_t *lck) {
73 return KMP_LOCK_STRIP(KMP_ATOMIC_LD_RLX(&lck->lk.poll)) - 1;
74 }
75
__kmp_is_tas_lock_nestable(kmp_tas_lock_t * lck)76 static inline bool __kmp_is_tas_lock_nestable(kmp_tas_lock_t *lck) {
77 return lck->lk.depth_locked != -1;
78 }
79
80 __forceinline static int
__kmp_acquire_tas_lock_timed_template(kmp_tas_lock_t * lck,kmp_int32 gtid)81 __kmp_acquire_tas_lock_timed_template(kmp_tas_lock_t *lck, kmp_int32 gtid) {
82 KMP_MB();
83
84 #ifdef USE_LOCK_PROFILE
85 kmp_uint32 curr = KMP_LOCK_STRIP(lck->lk.poll);
86 if ((curr != 0) && (curr != gtid + 1))
87 __kmp_printf("LOCK CONTENTION: %p\n", lck);
88 /* else __kmp_printf( "." );*/
89 #endif /* USE_LOCK_PROFILE */
90
91 kmp_int32 tas_free = KMP_LOCK_FREE(tas);
92 kmp_int32 tas_busy = KMP_LOCK_BUSY(gtid + 1, tas);
93
94 if (KMP_ATOMIC_LD_RLX(&lck->lk.poll) == tas_free &&
95 __kmp_atomic_compare_store_acq(&lck->lk.poll, tas_free, tas_busy)) {
96 KMP_FSYNC_ACQUIRED(lck);
97 return KMP_LOCK_ACQUIRED_FIRST;
98 }
99
100 kmp_uint32 spins;
101 KMP_FSYNC_PREPARE(lck);
102 KMP_INIT_YIELD(spins);
103 kmp_backoff_t backoff = __kmp_spin_backoff_params;
104 do {
105 __kmp_spin_backoff(&backoff);
106 KMP_YIELD_OVERSUB_ELSE_SPIN(spins);
107 } while (KMP_ATOMIC_LD_RLX(&lck->lk.poll) != tas_free ||
108 !__kmp_atomic_compare_store_acq(&lck->lk.poll, tas_free, tas_busy));
109 KMP_FSYNC_ACQUIRED(lck);
110 return KMP_LOCK_ACQUIRED_FIRST;
111 }
112
__kmp_acquire_tas_lock(kmp_tas_lock_t * lck,kmp_int32 gtid)113 int __kmp_acquire_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
114 int retval = __kmp_acquire_tas_lock_timed_template(lck, gtid);
115 ANNOTATE_TAS_ACQUIRED(lck);
116 return retval;
117 }
118
__kmp_acquire_tas_lock_with_checks(kmp_tas_lock_t * lck,kmp_int32 gtid)119 static int __kmp_acquire_tas_lock_with_checks(kmp_tas_lock_t *lck,
120 kmp_int32 gtid) {
121 char const *const func = "omp_set_lock";
122 if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
123 __kmp_is_tas_lock_nestable(lck)) {
124 KMP_FATAL(LockNestableUsedAsSimple, func);
125 }
126 if ((gtid >= 0) && (__kmp_get_tas_lock_owner(lck) == gtid)) {
127 KMP_FATAL(LockIsAlreadyOwned, func);
128 }
129 return __kmp_acquire_tas_lock(lck, gtid);
130 }
131
__kmp_test_tas_lock(kmp_tas_lock_t * lck,kmp_int32 gtid)132 int __kmp_test_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
133 kmp_int32 tas_free = KMP_LOCK_FREE(tas);
134 kmp_int32 tas_busy = KMP_LOCK_BUSY(gtid + 1, tas);
135 if (KMP_ATOMIC_LD_RLX(&lck->lk.poll) == tas_free &&
136 __kmp_atomic_compare_store_acq(&lck->lk.poll, tas_free, tas_busy)) {
137 KMP_FSYNC_ACQUIRED(lck);
138 return TRUE;
139 }
140 return FALSE;
141 }
142
__kmp_test_tas_lock_with_checks(kmp_tas_lock_t * lck,kmp_int32 gtid)143 static int __kmp_test_tas_lock_with_checks(kmp_tas_lock_t *lck,
144 kmp_int32 gtid) {
145 char const *const func = "omp_test_lock";
146 if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
147 __kmp_is_tas_lock_nestable(lck)) {
148 KMP_FATAL(LockNestableUsedAsSimple, func);
149 }
150 return __kmp_test_tas_lock(lck, gtid);
151 }
152
__kmp_release_tas_lock(kmp_tas_lock_t * lck,kmp_int32 gtid)153 int __kmp_release_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
154 KMP_MB(); /* Flush all pending memory write invalidates. */
155
156 KMP_FSYNC_RELEASING(lck);
157 ANNOTATE_TAS_RELEASED(lck);
158 KMP_ATOMIC_ST_REL(&lck->lk.poll, KMP_LOCK_FREE(tas));
159 KMP_MB(); /* Flush all pending memory write invalidates. */
160
161 KMP_YIELD_OVERSUB();
162 return KMP_LOCK_RELEASED;
163 }
164
__kmp_release_tas_lock_with_checks(kmp_tas_lock_t * lck,kmp_int32 gtid)165 static int __kmp_release_tas_lock_with_checks(kmp_tas_lock_t *lck,
166 kmp_int32 gtid) {
167 char const *const func = "omp_unset_lock";
168 KMP_MB(); /* in case another processor initialized lock */
169 if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
170 __kmp_is_tas_lock_nestable(lck)) {
171 KMP_FATAL(LockNestableUsedAsSimple, func);
172 }
173 if (__kmp_get_tas_lock_owner(lck) == -1) {
174 KMP_FATAL(LockUnsettingFree, func);
175 }
176 if ((gtid >= 0) && (__kmp_get_tas_lock_owner(lck) >= 0) &&
177 (__kmp_get_tas_lock_owner(lck) != gtid)) {
178 KMP_FATAL(LockUnsettingSetByAnother, func);
179 }
180 return __kmp_release_tas_lock(lck, gtid);
181 }
182
__kmp_init_tas_lock(kmp_tas_lock_t * lck)183 void __kmp_init_tas_lock(kmp_tas_lock_t *lck) {
184 lck->lk.poll = KMP_LOCK_FREE(tas);
185 }
186
__kmp_destroy_tas_lock(kmp_tas_lock_t * lck)187 void __kmp_destroy_tas_lock(kmp_tas_lock_t *lck) { lck->lk.poll = 0; }
188
__kmp_destroy_tas_lock_with_checks(kmp_tas_lock_t * lck)189 static void __kmp_destroy_tas_lock_with_checks(kmp_tas_lock_t *lck) {
190 char const *const func = "omp_destroy_lock";
191 if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
192 __kmp_is_tas_lock_nestable(lck)) {
193 KMP_FATAL(LockNestableUsedAsSimple, func);
194 }
195 if (__kmp_get_tas_lock_owner(lck) != -1) {
196 KMP_FATAL(LockStillOwned, func);
197 }
198 __kmp_destroy_tas_lock(lck);
199 }
200
201 // nested test and set locks
202
__kmp_acquire_nested_tas_lock(kmp_tas_lock_t * lck,kmp_int32 gtid)203 int __kmp_acquire_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
204 KMP_DEBUG_ASSERT(gtid >= 0);
205
206 if (__kmp_get_tas_lock_owner(lck) == gtid) {
207 lck->lk.depth_locked += 1;
208 return KMP_LOCK_ACQUIRED_NEXT;
209 } else {
210 __kmp_acquire_tas_lock_timed_template(lck, gtid);
211 ANNOTATE_TAS_ACQUIRED(lck);
212 lck->lk.depth_locked = 1;
213 return KMP_LOCK_ACQUIRED_FIRST;
214 }
215 }
216
__kmp_acquire_nested_tas_lock_with_checks(kmp_tas_lock_t * lck,kmp_int32 gtid)217 static int __kmp_acquire_nested_tas_lock_with_checks(kmp_tas_lock_t *lck,
218 kmp_int32 gtid) {
219 char const *const func = "omp_set_nest_lock";
220 if (!__kmp_is_tas_lock_nestable(lck)) {
221 KMP_FATAL(LockSimpleUsedAsNestable, func);
222 }
223 return __kmp_acquire_nested_tas_lock(lck, gtid);
224 }
225
__kmp_test_nested_tas_lock(kmp_tas_lock_t * lck,kmp_int32 gtid)226 int __kmp_test_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
227 int retval;
228
229 KMP_DEBUG_ASSERT(gtid >= 0);
230
231 if (__kmp_get_tas_lock_owner(lck) == gtid) {
232 retval = ++lck->lk.depth_locked;
233 } else if (!__kmp_test_tas_lock(lck, gtid)) {
234 retval = 0;
235 } else {
236 KMP_MB();
237 retval = lck->lk.depth_locked = 1;
238 }
239 return retval;
240 }
241
__kmp_test_nested_tas_lock_with_checks(kmp_tas_lock_t * lck,kmp_int32 gtid)242 static int __kmp_test_nested_tas_lock_with_checks(kmp_tas_lock_t *lck,
243 kmp_int32 gtid) {
244 char const *const func = "omp_test_nest_lock";
245 if (!__kmp_is_tas_lock_nestable(lck)) {
246 KMP_FATAL(LockSimpleUsedAsNestable, func);
247 }
248 return __kmp_test_nested_tas_lock(lck, gtid);
249 }
250
__kmp_release_nested_tas_lock(kmp_tas_lock_t * lck,kmp_int32 gtid)251 int __kmp_release_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
252 KMP_DEBUG_ASSERT(gtid >= 0);
253
254 KMP_MB();
255 if (--(lck->lk.depth_locked) == 0) {
256 __kmp_release_tas_lock(lck, gtid);
257 return KMP_LOCK_RELEASED;
258 }
259 return KMP_LOCK_STILL_HELD;
260 }
261
__kmp_release_nested_tas_lock_with_checks(kmp_tas_lock_t * lck,kmp_int32 gtid)262 static int __kmp_release_nested_tas_lock_with_checks(kmp_tas_lock_t *lck,
263 kmp_int32 gtid) {
264 char const *const func = "omp_unset_nest_lock";
265 KMP_MB(); /* in case another processor initialized lock */
266 if (!__kmp_is_tas_lock_nestable(lck)) {
267 KMP_FATAL(LockSimpleUsedAsNestable, func);
268 }
269 if (__kmp_get_tas_lock_owner(lck) == -1) {
270 KMP_FATAL(LockUnsettingFree, func);
271 }
272 if (__kmp_get_tas_lock_owner(lck) != gtid) {
273 KMP_FATAL(LockUnsettingSetByAnother, func);
274 }
275 return __kmp_release_nested_tas_lock(lck, gtid);
276 }
277
__kmp_init_nested_tas_lock(kmp_tas_lock_t * lck)278 void __kmp_init_nested_tas_lock(kmp_tas_lock_t *lck) {
279 __kmp_init_tas_lock(lck);
280 lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks
281 }
282
__kmp_destroy_nested_tas_lock(kmp_tas_lock_t * lck)283 void __kmp_destroy_nested_tas_lock(kmp_tas_lock_t *lck) {
284 __kmp_destroy_tas_lock(lck);
285 lck->lk.depth_locked = 0;
286 }
287
__kmp_destroy_nested_tas_lock_with_checks(kmp_tas_lock_t * lck)288 static void __kmp_destroy_nested_tas_lock_with_checks(kmp_tas_lock_t *lck) {
289 char const *const func = "omp_destroy_nest_lock";
290 if (!__kmp_is_tas_lock_nestable(lck)) {
291 KMP_FATAL(LockSimpleUsedAsNestable, func);
292 }
293 if (__kmp_get_tas_lock_owner(lck) != -1) {
294 KMP_FATAL(LockStillOwned, func);
295 }
296 __kmp_destroy_nested_tas_lock(lck);
297 }
298
299 #if KMP_USE_FUTEX
300
301 /* ------------------------------------------------------------------------ */
302 /* futex locks */
303
304 // futex locks are really just test and set locks, with a different method
305 // of handling contention. They take the same amount of space as test and
306 // set locks, and are allocated the same way (i.e. use the area allocated by
307 // the compiler for non-nested locks / allocate nested locks on the heap).
308
__kmp_get_futex_lock_owner(kmp_futex_lock_t * lck)309 static kmp_int32 __kmp_get_futex_lock_owner(kmp_futex_lock_t *lck) {
310 return KMP_LOCK_STRIP((TCR_4(lck->lk.poll) >> 1)) - 1;
311 }
312
__kmp_is_futex_lock_nestable(kmp_futex_lock_t * lck)313 static inline bool __kmp_is_futex_lock_nestable(kmp_futex_lock_t *lck) {
314 return lck->lk.depth_locked != -1;
315 }
316
317 __forceinline static int
__kmp_acquire_futex_lock_timed_template(kmp_futex_lock_t * lck,kmp_int32 gtid)318 __kmp_acquire_futex_lock_timed_template(kmp_futex_lock_t *lck, kmp_int32 gtid) {
319 kmp_int32 gtid_code = (gtid + 1) << 1;
320
321 KMP_MB();
322
323 #ifdef USE_LOCK_PROFILE
324 kmp_uint32 curr = KMP_LOCK_STRIP(TCR_4(lck->lk.poll));
325 if ((curr != 0) && (curr != gtid_code))
326 __kmp_printf("LOCK CONTENTION: %p\n", lck);
327 /* else __kmp_printf( "." );*/
328 #endif /* USE_LOCK_PROFILE */
329
330 KMP_FSYNC_PREPARE(lck);
331 KA_TRACE(1000, ("__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d entering\n",
332 lck, lck->lk.poll, gtid));
333
334 kmp_int32 poll_val;
335
336 while ((poll_val = KMP_COMPARE_AND_STORE_RET32(
337 &(lck->lk.poll), KMP_LOCK_FREE(futex),
338 KMP_LOCK_BUSY(gtid_code, futex))) != KMP_LOCK_FREE(futex)) {
339
340 kmp_int32 cond = KMP_LOCK_STRIP(poll_val) & 1;
341 KA_TRACE(
342 1000,
343 ("__kmp_acquire_futex_lock: lck:%p, T#%d poll_val = 0x%x cond = 0x%x\n",
344 lck, gtid, poll_val, cond));
345
346 // NOTE: if you try to use the following condition for this branch
347 //
348 // if ( poll_val & 1 == 0 )
349 //
350 // Then the 12.0 compiler has a bug where the following block will
351 // always be skipped, regardless of the value of the LSB of poll_val.
352 if (!cond) {
353 // Try to set the lsb in the poll to indicate to the owner
354 // thread that they need to wake this thread up.
355 if (!KMP_COMPARE_AND_STORE_REL32(&(lck->lk.poll), poll_val,
356 poll_val | KMP_LOCK_BUSY(1, futex))) {
357 KA_TRACE(
358 1000,
359 ("__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d can't set bit 0\n",
360 lck, lck->lk.poll, gtid));
361 continue;
362 }
363 poll_val |= KMP_LOCK_BUSY(1, futex);
364
365 KA_TRACE(1000,
366 ("__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d bit 0 set\n", lck,
367 lck->lk.poll, gtid));
368 }
369
370 KA_TRACE(
371 1000,
372 ("__kmp_acquire_futex_lock: lck:%p, T#%d before futex_wait(0x%x)\n",
373 lck, gtid, poll_val));
374
375 kmp_int32 rc;
376 if ((rc = syscall(__NR_futex, &(lck->lk.poll), FUTEX_WAIT, poll_val, NULL,
377 NULL, 0)) != 0) {
378 KA_TRACE(1000, ("__kmp_acquire_futex_lock: lck:%p, T#%d futex_wait(0x%x) "
379 "failed (rc=%d errno=%d)\n",
380 lck, gtid, poll_val, rc, errno));
381 continue;
382 }
383
384 KA_TRACE(1000,
385 ("__kmp_acquire_futex_lock: lck:%p, T#%d after futex_wait(0x%x)\n",
386 lck, gtid, poll_val));
387 // This thread has now done a successful futex wait call and was entered on
388 // the OS futex queue. We must now perform a futex wake call when releasing
389 // the lock, as we have no idea how many other threads are in the queue.
390 gtid_code |= 1;
391 }
392
393 KMP_FSYNC_ACQUIRED(lck);
394 KA_TRACE(1000, ("__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d exiting\n", lck,
395 lck->lk.poll, gtid));
396 return KMP_LOCK_ACQUIRED_FIRST;
397 }
398
__kmp_acquire_futex_lock(kmp_futex_lock_t * lck,kmp_int32 gtid)399 int __kmp_acquire_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
400 int retval = __kmp_acquire_futex_lock_timed_template(lck, gtid);
401 ANNOTATE_FUTEX_ACQUIRED(lck);
402 return retval;
403 }
404
__kmp_acquire_futex_lock_with_checks(kmp_futex_lock_t * lck,kmp_int32 gtid)405 static int __kmp_acquire_futex_lock_with_checks(kmp_futex_lock_t *lck,
406 kmp_int32 gtid) {
407 char const *const func = "omp_set_lock";
408 if ((sizeof(kmp_futex_lock_t) <= OMP_LOCK_T_SIZE) &&
409 __kmp_is_futex_lock_nestable(lck)) {
410 KMP_FATAL(LockNestableUsedAsSimple, func);
411 }
412 if ((gtid >= 0) && (__kmp_get_futex_lock_owner(lck) == gtid)) {
413 KMP_FATAL(LockIsAlreadyOwned, func);
414 }
415 return __kmp_acquire_futex_lock(lck, gtid);
416 }
417
__kmp_test_futex_lock(kmp_futex_lock_t * lck,kmp_int32 gtid)418 int __kmp_test_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
419 if (KMP_COMPARE_AND_STORE_ACQ32(&(lck->lk.poll), KMP_LOCK_FREE(futex),
420 KMP_LOCK_BUSY((gtid + 1) << 1, futex))) {
421 KMP_FSYNC_ACQUIRED(lck);
422 return TRUE;
423 }
424 return FALSE;
425 }
426
__kmp_test_futex_lock_with_checks(kmp_futex_lock_t * lck,kmp_int32 gtid)427 static int __kmp_test_futex_lock_with_checks(kmp_futex_lock_t *lck,
428 kmp_int32 gtid) {
429 char const *const func = "omp_test_lock";
430 if ((sizeof(kmp_futex_lock_t) <= OMP_LOCK_T_SIZE) &&
431 __kmp_is_futex_lock_nestable(lck)) {
432 KMP_FATAL(LockNestableUsedAsSimple, func);
433 }
434 return __kmp_test_futex_lock(lck, gtid);
435 }
436
__kmp_release_futex_lock(kmp_futex_lock_t * lck,kmp_int32 gtid)437 int __kmp_release_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
438 KMP_MB(); /* Flush all pending memory write invalidates. */
439
440 KA_TRACE(1000, ("__kmp_release_futex_lock: lck:%p(0x%x), T#%d entering\n",
441 lck, lck->lk.poll, gtid));
442
443 KMP_FSYNC_RELEASING(lck);
444 ANNOTATE_FUTEX_RELEASED(lck);
445
446 kmp_int32 poll_val = KMP_XCHG_FIXED32(&(lck->lk.poll), KMP_LOCK_FREE(futex));
447
448 KA_TRACE(1000,
449 ("__kmp_release_futex_lock: lck:%p, T#%d released poll_val = 0x%x\n",
450 lck, gtid, poll_val));
451
452 if (KMP_LOCK_STRIP(poll_val) & 1) {
453 KA_TRACE(1000,
454 ("__kmp_release_futex_lock: lck:%p, T#%d futex_wake 1 thread\n",
455 lck, gtid));
456 syscall(__NR_futex, &(lck->lk.poll), FUTEX_WAKE, KMP_LOCK_BUSY(1, futex),
457 NULL, NULL, 0);
458 }
459
460 KMP_MB(); /* Flush all pending memory write invalidates. */
461
462 KA_TRACE(1000, ("__kmp_release_futex_lock: lck:%p(0x%x), T#%d exiting\n", lck,
463 lck->lk.poll, gtid));
464
465 KMP_YIELD_OVERSUB();
466 return KMP_LOCK_RELEASED;
467 }
468
__kmp_release_futex_lock_with_checks(kmp_futex_lock_t * lck,kmp_int32 gtid)469 static int __kmp_release_futex_lock_with_checks(kmp_futex_lock_t *lck,
470 kmp_int32 gtid) {
471 char const *const func = "omp_unset_lock";
472 KMP_MB(); /* in case another processor initialized lock */
473 if ((sizeof(kmp_futex_lock_t) <= OMP_LOCK_T_SIZE) &&
474 __kmp_is_futex_lock_nestable(lck)) {
475 KMP_FATAL(LockNestableUsedAsSimple, func);
476 }
477 if (__kmp_get_futex_lock_owner(lck) == -1) {
478 KMP_FATAL(LockUnsettingFree, func);
479 }
480 if ((gtid >= 0) && (__kmp_get_futex_lock_owner(lck) >= 0) &&
481 (__kmp_get_futex_lock_owner(lck) != gtid)) {
482 KMP_FATAL(LockUnsettingSetByAnother, func);
483 }
484 return __kmp_release_futex_lock(lck, gtid);
485 }
486
__kmp_init_futex_lock(kmp_futex_lock_t * lck)487 void __kmp_init_futex_lock(kmp_futex_lock_t *lck) {
488 TCW_4(lck->lk.poll, KMP_LOCK_FREE(futex));
489 }
490
__kmp_destroy_futex_lock(kmp_futex_lock_t * lck)491 void __kmp_destroy_futex_lock(kmp_futex_lock_t *lck) { lck->lk.poll = 0; }
492
__kmp_destroy_futex_lock_with_checks(kmp_futex_lock_t * lck)493 static void __kmp_destroy_futex_lock_with_checks(kmp_futex_lock_t *lck) {
494 char const *const func = "omp_destroy_lock";
495 if ((sizeof(kmp_futex_lock_t) <= OMP_LOCK_T_SIZE) &&
496 __kmp_is_futex_lock_nestable(lck)) {
497 KMP_FATAL(LockNestableUsedAsSimple, func);
498 }
499 if (__kmp_get_futex_lock_owner(lck) != -1) {
500 KMP_FATAL(LockStillOwned, func);
501 }
502 __kmp_destroy_futex_lock(lck);
503 }
504
505 // nested futex locks
506
__kmp_acquire_nested_futex_lock(kmp_futex_lock_t * lck,kmp_int32 gtid)507 int __kmp_acquire_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
508 KMP_DEBUG_ASSERT(gtid >= 0);
509
510 if (__kmp_get_futex_lock_owner(lck) == gtid) {
511 lck->lk.depth_locked += 1;
512 return KMP_LOCK_ACQUIRED_NEXT;
513 } else {
514 __kmp_acquire_futex_lock_timed_template(lck, gtid);
515 ANNOTATE_FUTEX_ACQUIRED(lck);
516 lck->lk.depth_locked = 1;
517 return KMP_LOCK_ACQUIRED_FIRST;
518 }
519 }
520
__kmp_acquire_nested_futex_lock_with_checks(kmp_futex_lock_t * lck,kmp_int32 gtid)521 static int __kmp_acquire_nested_futex_lock_with_checks(kmp_futex_lock_t *lck,
522 kmp_int32 gtid) {
523 char const *const func = "omp_set_nest_lock";
524 if (!__kmp_is_futex_lock_nestable(lck)) {
525 KMP_FATAL(LockSimpleUsedAsNestable, func);
526 }
527 return __kmp_acquire_nested_futex_lock(lck, gtid);
528 }
529
__kmp_test_nested_futex_lock(kmp_futex_lock_t * lck,kmp_int32 gtid)530 int __kmp_test_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
531 int retval;
532
533 KMP_DEBUG_ASSERT(gtid >= 0);
534
535 if (__kmp_get_futex_lock_owner(lck) == gtid) {
536 retval = ++lck->lk.depth_locked;
537 } else if (!__kmp_test_futex_lock(lck, gtid)) {
538 retval = 0;
539 } else {
540 KMP_MB();
541 retval = lck->lk.depth_locked = 1;
542 }
543 return retval;
544 }
545
__kmp_test_nested_futex_lock_with_checks(kmp_futex_lock_t * lck,kmp_int32 gtid)546 static int __kmp_test_nested_futex_lock_with_checks(kmp_futex_lock_t *lck,
547 kmp_int32 gtid) {
548 char const *const func = "omp_test_nest_lock";
549 if (!__kmp_is_futex_lock_nestable(lck)) {
550 KMP_FATAL(LockSimpleUsedAsNestable, func);
551 }
552 return __kmp_test_nested_futex_lock(lck, gtid);
553 }
554
__kmp_release_nested_futex_lock(kmp_futex_lock_t * lck,kmp_int32 gtid)555 int __kmp_release_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
556 KMP_DEBUG_ASSERT(gtid >= 0);
557
558 KMP_MB();
559 if (--(lck->lk.depth_locked) == 0) {
560 __kmp_release_futex_lock(lck, gtid);
561 return KMP_LOCK_RELEASED;
562 }
563 return KMP_LOCK_STILL_HELD;
564 }
565
__kmp_release_nested_futex_lock_with_checks(kmp_futex_lock_t * lck,kmp_int32 gtid)566 static int __kmp_release_nested_futex_lock_with_checks(kmp_futex_lock_t *lck,
567 kmp_int32 gtid) {
568 char const *const func = "omp_unset_nest_lock";
569 KMP_MB(); /* in case another processor initialized lock */
570 if (!__kmp_is_futex_lock_nestable(lck)) {
571 KMP_FATAL(LockSimpleUsedAsNestable, func);
572 }
573 if (__kmp_get_futex_lock_owner(lck) == -1) {
574 KMP_FATAL(LockUnsettingFree, func);
575 }
576 if (__kmp_get_futex_lock_owner(lck) != gtid) {
577 KMP_FATAL(LockUnsettingSetByAnother, func);
578 }
579 return __kmp_release_nested_futex_lock(lck, gtid);
580 }
581
__kmp_init_nested_futex_lock(kmp_futex_lock_t * lck)582 void __kmp_init_nested_futex_lock(kmp_futex_lock_t *lck) {
583 __kmp_init_futex_lock(lck);
584 lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks
585 }
586
__kmp_destroy_nested_futex_lock(kmp_futex_lock_t * lck)587 void __kmp_destroy_nested_futex_lock(kmp_futex_lock_t *lck) {
588 __kmp_destroy_futex_lock(lck);
589 lck->lk.depth_locked = 0;
590 }
591
__kmp_destroy_nested_futex_lock_with_checks(kmp_futex_lock_t * lck)592 static void __kmp_destroy_nested_futex_lock_with_checks(kmp_futex_lock_t *lck) {
593 char const *const func = "omp_destroy_nest_lock";
594 if (!__kmp_is_futex_lock_nestable(lck)) {
595 KMP_FATAL(LockSimpleUsedAsNestable, func);
596 }
597 if (__kmp_get_futex_lock_owner(lck) != -1) {
598 KMP_FATAL(LockStillOwned, func);
599 }
600 __kmp_destroy_nested_futex_lock(lck);
601 }
602
603 #endif // KMP_USE_FUTEX
604
605 /* ------------------------------------------------------------------------ */
606 /* ticket (bakery) locks */
607
__kmp_get_ticket_lock_owner(kmp_ticket_lock_t * lck)608 static kmp_int32 __kmp_get_ticket_lock_owner(kmp_ticket_lock_t *lck) {
609 return std::atomic_load_explicit(&lck->lk.owner_id,
610 std::memory_order_relaxed) -
611 1;
612 }
613
__kmp_is_ticket_lock_nestable(kmp_ticket_lock_t * lck)614 static inline bool __kmp_is_ticket_lock_nestable(kmp_ticket_lock_t *lck) {
615 return std::atomic_load_explicit(&lck->lk.depth_locked,
616 std::memory_order_relaxed) != -1;
617 }
618
__kmp_bakery_check(void * now_serving,kmp_uint32 my_ticket)619 static kmp_uint32 __kmp_bakery_check(void *now_serving, kmp_uint32 my_ticket) {
620 return std::atomic_load_explicit((std::atomic<unsigned> *)now_serving,
621 std::memory_order_acquire) == my_ticket;
622 }
623
624 __forceinline static int
__kmp_acquire_ticket_lock_timed_template(kmp_ticket_lock_t * lck,kmp_int32 gtid)625 __kmp_acquire_ticket_lock_timed_template(kmp_ticket_lock_t *lck,
626 kmp_int32 gtid) {
627 kmp_uint32 my_ticket = std::atomic_fetch_add_explicit(
628 &lck->lk.next_ticket, 1U, std::memory_order_relaxed);
629
630 #ifdef USE_LOCK_PROFILE
631 if (std::atomic_load_explicit(&lck->lk.now_serving,
632 std::memory_order_relaxed) != my_ticket)
633 __kmp_printf("LOCK CONTENTION: %p\n", lck);
634 /* else __kmp_printf( "." );*/
635 #endif /* USE_LOCK_PROFILE */
636
637 if (std::atomic_load_explicit(&lck->lk.now_serving,
638 std::memory_order_acquire) == my_ticket) {
639 return KMP_LOCK_ACQUIRED_FIRST;
640 }
641 KMP_WAIT_PTR(&lck->lk.now_serving, my_ticket, __kmp_bakery_check, lck);
642 return KMP_LOCK_ACQUIRED_FIRST;
643 }
644
__kmp_acquire_ticket_lock(kmp_ticket_lock_t * lck,kmp_int32 gtid)645 int __kmp_acquire_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
646 int retval = __kmp_acquire_ticket_lock_timed_template(lck, gtid);
647 ANNOTATE_TICKET_ACQUIRED(lck);
648 return retval;
649 }
650
__kmp_acquire_ticket_lock_with_checks(kmp_ticket_lock_t * lck,kmp_int32 gtid)651 static int __kmp_acquire_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
652 kmp_int32 gtid) {
653 char const *const func = "omp_set_lock";
654
655 if (!std::atomic_load_explicit(&lck->lk.initialized,
656 std::memory_order_relaxed)) {
657 KMP_FATAL(LockIsUninitialized, func);
658 }
659 if (lck->lk.self != lck) {
660 KMP_FATAL(LockIsUninitialized, func);
661 }
662 if (__kmp_is_ticket_lock_nestable(lck)) {
663 KMP_FATAL(LockNestableUsedAsSimple, func);
664 }
665 if ((gtid >= 0) && (__kmp_get_ticket_lock_owner(lck) == gtid)) {
666 KMP_FATAL(LockIsAlreadyOwned, func);
667 }
668
669 __kmp_acquire_ticket_lock(lck, gtid);
670
671 std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
672 std::memory_order_relaxed);
673 return KMP_LOCK_ACQUIRED_FIRST;
674 }
675
__kmp_test_ticket_lock(kmp_ticket_lock_t * lck,kmp_int32 gtid)676 int __kmp_test_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
677 kmp_uint32 my_ticket = std::atomic_load_explicit(&lck->lk.next_ticket,
678 std::memory_order_relaxed);
679
680 if (std::atomic_load_explicit(&lck->lk.now_serving,
681 std::memory_order_relaxed) == my_ticket) {
682 kmp_uint32 next_ticket = my_ticket + 1;
683 if (std::atomic_compare_exchange_strong_explicit(
684 &lck->lk.next_ticket, &my_ticket, next_ticket,
685 std::memory_order_acquire, std::memory_order_acquire)) {
686 return TRUE;
687 }
688 }
689 return FALSE;
690 }
691
__kmp_test_ticket_lock_with_checks(kmp_ticket_lock_t * lck,kmp_int32 gtid)692 static int __kmp_test_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
693 kmp_int32 gtid) {
694 char const *const func = "omp_test_lock";
695
696 if (!std::atomic_load_explicit(&lck->lk.initialized,
697 std::memory_order_relaxed)) {
698 KMP_FATAL(LockIsUninitialized, func);
699 }
700 if (lck->lk.self != lck) {
701 KMP_FATAL(LockIsUninitialized, func);
702 }
703 if (__kmp_is_ticket_lock_nestable(lck)) {
704 KMP_FATAL(LockNestableUsedAsSimple, func);
705 }
706
707 int retval = __kmp_test_ticket_lock(lck, gtid);
708
709 if (retval) {
710 std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
711 std::memory_order_relaxed);
712 }
713 return retval;
714 }
715
__kmp_release_ticket_lock(kmp_ticket_lock_t * lck,kmp_int32 gtid)716 int __kmp_release_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
717 kmp_uint32 distance = std::atomic_load_explicit(&lck->lk.next_ticket,
718 std::memory_order_relaxed) -
719 std::atomic_load_explicit(&lck->lk.now_serving,
720 std::memory_order_relaxed);
721
722 ANNOTATE_TICKET_RELEASED(lck);
723 std::atomic_fetch_add_explicit(&lck->lk.now_serving, 1U,
724 std::memory_order_release);
725
726 KMP_YIELD(distance >
727 (kmp_uint32)(__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc));
728 return KMP_LOCK_RELEASED;
729 }
730
__kmp_release_ticket_lock_with_checks(kmp_ticket_lock_t * lck,kmp_int32 gtid)731 static int __kmp_release_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
732 kmp_int32 gtid) {
733 char const *const func = "omp_unset_lock";
734
735 if (!std::atomic_load_explicit(&lck->lk.initialized,
736 std::memory_order_relaxed)) {
737 KMP_FATAL(LockIsUninitialized, func);
738 }
739 if (lck->lk.self != lck) {
740 KMP_FATAL(LockIsUninitialized, func);
741 }
742 if (__kmp_is_ticket_lock_nestable(lck)) {
743 KMP_FATAL(LockNestableUsedAsSimple, func);
744 }
745 if (__kmp_get_ticket_lock_owner(lck) == -1) {
746 KMP_FATAL(LockUnsettingFree, func);
747 }
748 if ((gtid >= 0) && (__kmp_get_ticket_lock_owner(lck) >= 0) &&
749 (__kmp_get_ticket_lock_owner(lck) != gtid)) {
750 KMP_FATAL(LockUnsettingSetByAnother, func);
751 }
752 std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed);
753 return __kmp_release_ticket_lock(lck, gtid);
754 }
755
__kmp_init_ticket_lock(kmp_ticket_lock_t * lck)756 void __kmp_init_ticket_lock(kmp_ticket_lock_t *lck) {
757 lck->lk.location = NULL;
758 lck->lk.self = lck;
759 std::atomic_store_explicit(&lck->lk.next_ticket, 0U,
760 std::memory_order_relaxed);
761 std::atomic_store_explicit(&lck->lk.now_serving, 0U,
762 std::memory_order_relaxed);
763 std::atomic_store_explicit(
764 &lck->lk.owner_id, 0,
765 std::memory_order_relaxed); // no thread owns the lock.
766 std::atomic_store_explicit(
767 &lck->lk.depth_locked, -1,
768 std::memory_order_relaxed); // -1 => not a nested lock.
769 std::atomic_store_explicit(&lck->lk.initialized, true,
770 std::memory_order_release);
771 }
772
__kmp_destroy_ticket_lock(kmp_ticket_lock_t * lck)773 void __kmp_destroy_ticket_lock(kmp_ticket_lock_t *lck) {
774 std::atomic_store_explicit(&lck->lk.initialized, false,
775 std::memory_order_release);
776 lck->lk.self = NULL;
777 lck->lk.location = NULL;
778 std::atomic_store_explicit(&lck->lk.next_ticket, 0U,
779 std::memory_order_relaxed);
780 std::atomic_store_explicit(&lck->lk.now_serving, 0U,
781 std::memory_order_relaxed);
782 std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed);
783 std::atomic_store_explicit(&lck->lk.depth_locked, -1,
784 std::memory_order_relaxed);
785 }
786
__kmp_destroy_ticket_lock_with_checks(kmp_ticket_lock_t * lck)787 static void __kmp_destroy_ticket_lock_with_checks(kmp_ticket_lock_t *lck) {
788 char const *const func = "omp_destroy_lock";
789
790 if (!std::atomic_load_explicit(&lck->lk.initialized,
791 std::memory_order_relaxed)) {
792 KMP_FATAL(LockIsUninitialized, func);
793 }
794 if (lck->lk.self != lck) {
795 KMP_FATAL(LockIsUninitialized, func);
796 }
797 if (__kmp_is_ticket_lock_nestable(lck)) {
798 KMP_FATAL(LockNestableUsedAsSimple, func);
799 }
800 if (__kmp_get_ticket_lock_owner(lck) != -1) {
801 KMP_FATAL(LockStillOwned, func);
802 }
803 __kmp_destroy_ticket_lock(lck);
804 }
805
806 // nested ticket locks
807
__kmp_acquire_nested_ticket_lock(kmp_ticket_lock_t * lck,kmp_int32 gtid)808 int __kmp_acquire_nested_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
809 KMP_DEBUG_ASSERT(gtid >= 0);
810
811 if (__kmp_get_ticket_lock_owner(lck) == gtid) {
812 std::atomic_fetch_add_explicit(&lck->lk.depth_locked, 1,
813 std::memory_order_relaxed);
814 return KMP_LOCK_ACQUIRED_NEXT;
815 } else {
816 __kmp_acquire_ticket_lock_timed_template(lck, gtid);
817 ANNOTATE_TICKET_ACQUIRED(lck);
818 std::atomic_store_explicit(&lck->lk.depth_locked, 1,
819 std::memory_order_relaxed);
820 std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
821 std::memory_order_relaxed);
822 return KMP_LOCK_ACQUIRED_FIRST;
823 }
824 }
825
__kmp_acquire_nested_ticket_lock_with_checks(kmp_ticket_lock_t * lck,kmp_int32 gtid)826 static int __kmp_acquire_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
827 kmp_int32 gtid) {
828 char const *const func = "omp_set_nest_lock";
829
830 if (!std::atomic_load_explicit(&lck->lk.initialized,
831 std::memory_order_relaxed)) {
832 KMP_FATAL(LockIsUninitialized, func);
833 }
834 if (lck->lk.self != lck) {
835 KMP_FATAL(LockIsUninitialized, func);
836 }
837 if (!__kmp_is_ticket_lock_nestable(lck)) {
838 KMP_FATAL(LockSimpleUsedAsNestable, func);
839 }
840 return __kmp_acquire_nested_ticket_lock(lck, gtid);
841 }
842
__kmp_test_nested_ticket_lock(kmp_ticket_lock_t * lck,kmp_int32 gtid)843 int __kmp_test_nested_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
844 int retval;
845
846 KMP_DEBUG_ASSERT(gtid >= 0);
847
848 if (__kmp_get_ticket_lock_owner(lck) == gtid) {
849 retval = std::atomic_fetch_add_explicit(&lck->lk.depth_locked, 1,
850 std::memory_order_relaxed) +
851 1;
852 } else if (!__kmp_test_ticket_lock(lck, gtid)) {
853 retval = 0;
854 } else {
855 std::atomic_store_explicit(&lck->lk.depth_locked, 1,
856 std::memory_order_relaxed);
857 std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
858 std::memory_order_relaxed);
859 retval = 1;
860 }
861 return retval;
862 }
863
__kmp_test_nested_ticket_lock_with_checks(kmp_ticket_lock_t * lck,kmp_int32 gtid)864 static int __kmp_test_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
865 kmp_int32 gtid) {
866 char const *const func = "omp_test_nest_lock";
867
868 if (!std::atomic_load_explicit(&lck->lk.initialized,
869 std::memory_order_relaxed)) {
870 KMP_FATAL(LockIsUninitialized, func);
871 }
872 if (lck->lk.self != lck) {
873 KMP_FATAL(LockIsUninitialized, func);
874 }
875 if (!__kmp_is_ticket_lock_nestable(lck)) {
876 KMP_FATAL(LockSimpleUsedAsNestable, func);
877 }
878 return __kmp_test_nested_ticket_lock(lck, gtid);
879 }
880
__kmp_release_nested_ticket_lock(kmp_ticket_lock_t * lck,kmp_int32 gtid)881 int __kmp_release_nested_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
882 KMP_DEBUG_ASSERT(gtid >= 0);
883
884 if ((std::atomic_fetch_add_explicit(&lck->lk.depth_locked, -1,
885 std::memory_order_relaxed) -
886 1) == 0) {
887 std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed);
888 __kmp_release_ticket_lock(lck, gtid);
889 return KMP_LOCK_RELEASED;
890 }
891 return KMP_LOCK_STILL_HELD;
892 }
893
__kmp_release_nested_ticket_lock_with_checks(kmp_ticket_lock_t * lck,kmp_int32 gtid)894 static int __kmp_release_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
895 kmp_int32 gtid) {
896 char const *const func = "omp_unset_nest_lock";
897
898 if (!std::atomic_load_explicit(&lck->lk.initialized,
899 std::memory_order_relaxed)) {
900 KMP_FATAL(LockIsUninitialized, func);
901 }
902 if (lck->lk.self != lck) {
903 KMP_FATAL(LockIsUninitialized, func);
904 }
905 if (!__kmp_is_ticket_lock_nestable(lck)) {
906 KMP_FATAL(LockSimpleUsedAsNestable, func);
907 }
908 if (__kmp_get_ticket_lock_owner(lck) == -1) {
909 KMP_FATAL(LockUnsettingFree, func);
910 }
911 if (__kmp_get_ticket_lock_owner(lck) != gtid) {
912 KMP_FATAL(LockUnsettingSetByAnother, func);
913 }
914 return __kmp_release_nested_ticket_lock(lck, gtid);
915 }
916
__kmp_init_nested_ticket_lock(kmp_ticket_lock_t * lck)917 void __kmp_init_nested_ticket_lock(kmp_ticket_lock_t *lck) {
918 __kmp_init_ticket_lock(lck);
919 std::atomic_store_explicit(&lck->lk.depth_locked, 0,
920 std::memory_order_relaxed);
921 // >= 0 for nestable locks, -1 for simple locks
922 }
923
__kmp_destroy_nested_ticket_lock(kmp_ticket_lock_t * lck)924 void __kmp_destroy_nested_ticket_lock(kmp_ticket_lock_t *lck) {
925 __kmp_destroy_ticket_lock(lck);
926 std::atomic_store_explicit(&lck->lk.depth_locked, 0,
927 std::memory_order_relaxed);
928 }
929
930 static void
__kmp_destroy_nested_ticket_lock_with_checks(kmp_ticket_lock_t * lck)931 __kmp_destroy_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck) {
932 char const *const func = "omp_destroy_nest_lock";
933
934 if (!std::atomic_load_explicit(&lck->lk.initialized,
935 std::memory_order_relaxed)) {
936 KMP_FATAL(LockIsUninitialized, func);
937 }
938 if (lck->lk.self != lck) {
939 KMP_FATAL(LockIsUninitialized, func);
940 }
941 if (!__kmp_is_ticket_lock_nestable(lck)) {
942 KMP_FATAL(LockSimpleUsedAsNestable, func);
943 }
944 if (__kmp_get_ticket_lock_owner(lck) != -1) {
945 KMP_FATAL(LockStillOwned, func);
946 }
947 __kmp_destroy_nested_ticket_lock(lck);
948 }
949
950 // access functions to fields which don't exist for all lock kinds.
951
__kmp_get_ticket_lock_location(kmp_ticket_lock_t * lck)952 static const ident_t *__kmp_get_ticket_lock_location(kmp_ticket_lock_t *lck) {
953 return lck->lk.location;
954 }
955
__kmp_set_ticket_lock_location(kmp_ticket_lock_t * lck,const ident_t * loc)956 static void __kmp_set_ticket_lock_location(kmp_ticket_lock_t *lck,
957 const ident_t *loc) {
958 lck->lk.location = loc;
959 }
960
__kmp_get_ticket_lock_flags(kmp_ticket_lock_t * lck)961 static kmp_lock_flags_t __kmp_get_ticket_lock_flags(kmp_ticket_lock_t *lck) {
962 return lck->lk.flags;
963 }
964
__kmp_set_ticket_lock_flags(kmp_ticket_lock_t * lck,kmp_lock_flags_t flags)965 static void __kmp_set_ticket_lock_flags(kmp_ticket_lock_t *lck,
966 kmp_lock_flags_t flags) {
967 lck->lk.flags = flags;
968 }
969
970 /* ------------------------------------------------------------------------ */
971 /* queuing locks */
972
973 /* First the states
974 (head,tail) = 0, 0 means lock is unheld, nobody on queue
975 UINT_MAX or -1, 0 means lock is held, nobody on queue
976 h, h means lock held or about to transition,
977 1 element on queue
978 h, t h <> t, means lock is held or about to
979 transition, >1 elements on queue
980
981 Now the transitions
982 Acquire(0,0) = -1 ,0
983 Release(0,0) = Error
984 Acquire(-1,0) = h ,h h > 0
985 Release(-1,0) = 0 ,0
986 Acquire(h,h) = h ,t h > 0, t > 0, h <> t
987 Release(h,h) = -1 ,0 h > 0
988 Acquire(h,t) = h ,t' h > 0, t > 0, t' > 0, h <> t, h <> t', t <> t'
989 Release(h,t) = h',t h > 0, t > 0, h <> t, h <> h', h' maybe = t
990
991 And pictorially
992
993 +-----+
994 | 0, 0|------- release -------> Error
995 +-----+
996 | ^
997 acquire| |release
998 | |
999 | |
1000 v |
1001 +-----+
1002 |-1, 0|
1003 +-----+
1004 | ^
1005 acquire| |release
1006 | |
1007 | |
1008 v |
1009 +-----+
1010 | h, h|
1011 +-----+
1012 | ^
1013 acquire| |release
1014 | |
1015 | |
1016 v |
1017 +-----+
1018 | h, t|----- acquire, release loopback ---+
1019 +-----+ |
1020 ^ |
1021 | |
1022 +------------------------------------+
1023 */
1024
1025 #ifdef DEBUG_QUEUING_LOCKS
1026
1027 /* Stuff for circular trace buffer */
1028 #define TRACE_BUF_ELE 1024
1029 static char traces[TRACE_BUF_ELE][128] = {0};
1030 static int tc = 0;
1031 #define TRACE_LOCK(X, Y) \
1032 KMP_SNPRINTF(traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s\n", X, Y);
1033 #define TRACE_LOCK_T(X, Y, Z) \
1034 KMP_SNPRINTF(traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s%d\n", X, Y, Z);
1035 #define TRACE_LOCK_HT(X, Y, Z, Q) \
1036 KMP_SNPRINTF(traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s %d,%d\n", X, Y, \
1037 Z, Q);
1038
__kmp_dump_queuing_lock(kmp_info_t * this_thr,kmp_int32 gtid,kmp_queuing_lock_t * lck,kmp_int32 head_id,kmp_int32 tail_id)1039 static void __kmp_dump_queuing_lock(kmp_info_t *this_thr, kmp_int32 gtid,
1040 kmp_queuing_lock_t *lck, kmp_int32 head_id,
1041 kmp_int32 tail_id) {
1042 kmp_int32 t, i;
1043
1044 __kmp_printf_no_lock("\n__kmp_dump_queuing_lock: TRACE BEGINS HERE! \n");
1045
1046 i = tc % TRACE_BUF_ELE;
1047 __kmp_printf_no_lock("%s\n", traces[i]);
1048 i = (i + 1) % TRACE_BUF_ELE;
1049 while (i != (tc % TRACE_BUF_ELE)) {
1050 __kmp_printf_no_lock("%s", traces[i]);
1051 i = (i + 1) % TRACE_BUF_ELE;
1052 }
1053 __kmp_printf_no_lock("\n");
1054
1055 __kmp_printf_no_lock("\n__kmp_dump_queuing_lock: gtid+1:%d, spin_here:%d, "
1056 "next_wait:%d, head_id:%d, tail_id:%d\n",
1057 gtid + 1, this_thr->th.th_spin_here,
1058 this_thr->th.th_next_waiting, head_id, tail_id);
1059
1060 __kmp_printf_no_lock("\t\thead: %d ", lck->lk.head_id);
1061
1062 if (lck->lk.head_id >= 1) {
1063 t = __kmp_threads[lck->lk.head_id - 1]->th.th_next_waiting;
1064 while (t > 0) {
1065 __kmp_printf_no_lock("-> %d ", t);
1066 t = __kmp_threads[t - 1]->th.th_next_waiting;
1067 }
1068 }
1069 __kmp_printf_no_lock("; tail: %d ", lck->lk.tail_id);
1070 __kmp_printf_no_lock("\n\n");
1071 }
1072
1073 #endif /* DEBUG_QUEUING_LOCKS */
1074
__kmp_get_queuing_lock_owner(kmp_queuing_lock_t * lck)1075 static kmp_int32 __kmp_get_queuing_lock_owner(kmp_queuing_lock_t *lck) {
1076 return TCR_4(lck->lk.owner_id) - 1;
1077 }
1078
__kmp_is_queuing_lock_nestable(kmp_queuing_lock_t * lck)1079 static inline bool __kmp_is_queuing_lock_nestable(kmp_queuing_lock_t *lck) {
1080 return lck->lk.depth_locked != -1;
1081 }
1082
1083 /* Acquire a lock using a the queuing lock implementation */
1084 template <bool takeTime>
1085 /* [TLW] The unused template above is left behind because of what BEB believes
1086 is a potential compiler problem with __forceinline. */
1087 __forceinline static int
__kmp_acquire_queuing_lock_timed_template(kmp_queuing_lock_t * lck,kmp_int32 gtid)1088 __kmp_acquire_queuing_lock_timed_template(kmp_queuing_lock_t *lck,
1089 kmp_int32 gtid) {
1090 kmp_info_t *this_thr = __kmp_thread_from_gtid(gtid);
1091 volatile kmp_int32 *head_id_p = &lck->lk.head_id;
1092 volatile kmp_int32 *tail_id_p = &lck->lk.tail_id;
1093 volatile kmp_uint32 *spin_here_p;
1094 kmp_int32 need_mf = 1;
1095
1096 #if OMPT_SUPPORT
1097 ompt_state_t prev_state = ompt_state_undefined;
1098 #endif
1099
1100 KA_TRACE(1000,
1101 ("__kmp_acquire_queuing_lock: lck:%p, T#%d entering\n", lck, gtid));
1102
1103 KMP_FSYNC_PREPARE(lck);
1104 KMP_DEBUG_ASSERT(this_thr != NULL);
1105 spin_here_p = &this_thr->th.th_spin_here;
1106
1107 #ifdef DEBUG_QUEUING_LOCKS
1108 TRACE_LOCK(gtid + 1, "acq ent");
1109 if (*spin_here_p)
1110 __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
1111 if (this_thr->th.th_next_waiting != 0)
1112 __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
1113 #endif
1114 KMP_DEBUG_ASSERT(!*spin_here_p);
1115 KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0);
1116
1117 /* The following st.rel to spin_here_p needs to precede the cmpxchg.acq to
1118 head_id_p that may follow, not just in execution order, but also in
1119 visibility order. This way, when a releasing thread observes the changes to
1120 the queue by this thread, it can rightly assume that spin_here_p has
1121 already been set to TRUE, so that when it sets spin_here_p to FALSE, it is
1122 not premature. If the releasing thread sets spin_here_p to FALSE before
1123 this thread sets it to TRUE, this thread will hang. */
1124 *spin_here_p = TRUE; /* before enqueuing to prevent race */
1125
1126 while (1) {
1127 kmp_int32 enqueued;
1128 kmp_int32 head;
1129 kmp_int32 tail;
1130
1131 head = *head_id_p;
1132
1133 switch (head) {
1134
1135 case -1: {
1136 #ifdef DEBUG_QUEUING_LOCKS
1137 tail = *tail_id_p;
1138 TRACE_LOCK_HT(gtid + 1, "acq read: ", head, tail);
1139 #endif
1140 tail = 0; /* to make sure next link asynchronously read is not set
1141 accidentally; this assignment prevents us from entering the
1142 if ( t > 0 ) condition in the enqueued case below, which is not
1143 necessary for this state transition */
1144
1145 need_mf = 0;
1146 /* try (-1,0)->(tid,tid) */
1147 enqueued = KMP_COMPARE_AND_STORE_ACQ64((volatile kmp_int64 *)tail_id_p,
1148 KMP_PACK_64(-1, 0),
1149 KMP_PACK_64(gtid + 1, gtid + 1));
1150 #ifdef DEBUG_QUEUING_LOCKS
1151 if (enqueued)
1152 TRACE_LOCK(gtid + 1, "acq enq: (-1,0)->(tid,tid)");
1153 #endif
1154 } break;
1155
1156 default: {
1157 tail = *tail_id_p;
1158 KMP_DEBUG_ASSERT(tail != gtid + 1);
1159
1160 #ifdef DEBUG_QUEUING_LOCKS
1161 TRACE_LOCK_HT(gtid + 1, "acq read: ", head, tail);
1162 #endif
1163
1164 if (tail == 0) {
1165 enqueued = FALSE;
1166 } else {
1167 need_mf = 0;
1168 /* try (h,t) or (h,h)->(h,tid) */
1169 enqueued = KMP_COMPARE_AND_STORE_ACQ32(tail_id_p, tail, gtid + 1);
1170
1171 #ifdef DEBUG_QUEUING_LOCKS
1172 if (enqueued)
1173 TRACE_LOCK(gtid + 1, "acq enq: (h,t)->(h,tid)");
1174 #endif
1175 }
1176 } break;
1177
1178 case 0: /* empty queue */
1179 {
1180 kmp_int32 grabbed_lock;
1181
1182 #ifdef DEBUG_QUEUING_LOCKS
1183 tail = *tail_id_p;
1184 TRACE_LOCK_HT(gtid + 1, "acq read: ", head, tail);
1185 #endif
1186 /* try (0,0)->(-1,0) */
1187
1188 /* only legal transition out of head = 0 is head = -1 with no change to
1189 * tail */
1190 grabbed_lock = KMP_COMPARE_AND_STORE_ACQ32(head_id_p, 0, -1);
1191
1192 if (grabbed_lock) {
1193
1194 *spin_here_p = FALSE;
1195
1196 KA_TRACE(
1197 1000,
1198 ("__kmp_acquire_queuing_lock: lck:%p, T#%d exiting: no queuing\n",
1199 lck, gtid));
1200 #ifdef DEBUG_QUEUING_LOCKS
1201 TRACE_LOCK_HT(gtid + 1, "acq exit: ", head, 0);
1202 #endif
1203
1204 #if OMPT_SUPPORT
1205 if (ompt_enabled.enabled && prev_state != ompt_state_undefined) {
1206 /* change the state before clearing wait_id */
1207 this_thr->th.ompt_thread_info.state = prev_state;
1208 this_thr->th.ompt_thread_info.wait_id = 0;
1209 }
1210 #endif
1211
1212 KMP_FSYNC_ACQUIRED(lck);
1213 return KMP_LOCK_ACQUIRED_FIRST; /* lock holder cannot be on queue */
1214 }
1215 enqueued = FALSE;
1216 } break;
1217 }
1218
1219 #if OMPT_SUPPORT
1220 if (ompt_enabled.enabled && prev_state == ompt_state_undefined) {
1221 /* this thread will spin; set wait_id before entering wait state */
1222 prev_state = this_thr->th.ompt_thread_info.state;
1223 this_thr->th.ompt_thread_info.wait_id = (uint64_t)lck;
1224 this_thr->th.ompt_thread_info.state = ompt_state_wait_lock;
1225 }
1226 #endif
1227
1228 if (enqueued) {
1229 if (tail > 0) {
1230 kmp_info_t *tail_thr = __kmp_thread_from_gtid(tail - 1);
1231 KMP_ASSERT(tail_thr != NULL);
1232 tail_thr->th.th_next_waiting = gtid + 1;
1233 /* corresponding wait for this write in release code */
1234 }
1235 KA_TRACE(1000,
1236 ("__kmp_acquire_queuing_lock: lck:%p, T#%d waiting for lock\n",
1237 lck, gtid));
1238
1239 KMP_MB();
1240 // ToDo: Use __kmp_wait_sleep or similar when blocktime != inf
1241 KMP_WAIT(spin_here_p, FALSE, KMP_EQ, lck);
1242 // Synchronize writes to both runtime thread structures
1243 // and writes in user code.
1244 KMP_MB();
1245
1246 #ifdef DEBUG_QUEUING_LOCKS
1247 TRACE_LOCK(gtid + 1, "acq spin");
1248
1249 if (this_thr->th.th_next_waiting != 0)
1250 __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
1251 #endif
1252 KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0);
1253 KA_TRACE(1000, ("__kmp_acquire_queuing_lock: lck:%p, T#%d exiting: after "
1254 "waiting on queue\n",
1255 lck, gtid));
1256
1257 #ifdef DEBUG_QUEUING_LOCKS
1258 TRACE_LOCK(gtid + 1, "acq exit 2");
1259 #endif
1260
1261 #if OMPT_SUPPORT
1262 /* change the state before clearing wait_id */
1263 this_thr->th.ompt_thread_info.state = prev_state;
1264 this_thr->th.ompt_thread_info.wait_id = 0;
1265 #endif
1266
1267 /* got lock, we were dequeued by the thread that released lock */
1268 return KMP_LOCK_ACQUIRED_FIRST;
1269 }
1270
1271 /* Yield if number of threads > number of logical processors */
1272 /* ToDo: Not sure why this should only be in oversubscription case,
1273 maybe should be traditional YIELD_INIT/YIELD_WHEN loop */
1274 KMP_YIELD_OVERSUB();
1275
1276 #ifdef DEBUG_QUEUING_LOCKS
1277 TRACE_LOCK(gtid + 1, "acq retry");
1278 #endif
1279 }
1280 KMP_ASSERT2(0, "should not get here");
1281 return KMP_LOCK_ACQUIRED_FIRST;
1282 }
1283
__kmp_acquire_queuing_lock(kmp_queuing_lock_t * lck,kmp_int32 gtid)1284 int __kmp_acquire_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1285 KMP_DEBUG_ASSERT(gtid >= 0);
1286
1287 int retval = __kmp_acquire_queuing_lock_timed_template<false>(lck, gtid);
1288 ANNOTATE_QUEUING_ACQUIRED(lck);
1289 return retval;
1290 }
1291
__kmp_acquire_queuing_lock_with_checks(kmp_queuing_lock_t * lck,kmp_int32 gtid)1292 static int __kmp_acquire_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1293 kmp_int32 gtid) {
1294 char const *const func = "omp_set_lock";
1295 if (lck->lk.initialized != lck) {
1296 KMP_FATAL(LockIsUninitialized, func);
1297 }
1298 if (__kmp_is_queuing_lock_nestable(lck)) {
1299 KMP_FATAL(LockNestableUsedAsSimple, func);
1300 }
1301 if (__kmp_get_queuing_lock_owner(lck) == gtid) {
1302 KMP_FATAL(LockIsAlreadyOwned, func);
1303 }
1304
1305 __kmp_acquire_queuing_lock(lck, gtid);
1306
1307 lck->lk.owner_id = gtid + 1;
1308 return KMP_LOCK_ACQUIRED_FIRST;
1309 }
1310
__kmp_test_queuing_lock(kmp_queuing_lock_t * lck,kmp_int32 gtid)1311 int __kmp_test_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1312 volatile kmp_int32 *head_id_p = &lck->lk.head_id;
1313 kmp_int32 head;
1314 #ifdef KMP_DEBUG
1315 kmp_info_t *this_thr;
1316 #endif
1317
1318 KA_TRACE(1000, ("__kmp_test_queuing_lock: T#%d entering\n", gtid));
1319 KMP_DEBUG_ASSERT(gtid >= 0);
1320 #ifdef KMP_DEBUG
1321 this_thr = __kmp_thread_from_gtid(gtid);
1322 KMP_DEBUG_ASSERT(this_thr != NULL);
1323 KMP_DEBUG_ASSERT(!this_thr->th.th_spin_here);
1324 #endif
1325
1326 head = *head_id_p;
1327
1328 if (head == 0) { /* nobody on queue, nobody holding */
1329 /* try (0,0)->(-1,0) */
1330 if (KMP_COMPARE_AND_STORE_ACQ32(head_id_p, 0, -1)) {
1331 KA_TRACE(1000,
1332 ("__kmp_test_queuing_lock: T#%d exiting: holding lock\n", gtid));
1333 KMP_FSYNC_ACQUIRED(lck);
1334 ANNOTATE_QUEUING_ACQUIRED(lck);
1335 return TRUE;
1336 }
1337 }
1338
1339 KA_TRACE(1000,
1340 ("__kmp_test_queuing_lock: T#%d exiting: without lock\n", gtid));
1341 return FALSE;
1342 }
1343
__kmp_test_queuing_lock_with_checks(kmp_queuing_lock_t * lck,kmp_int32 gtid)1344 static int __kmp_test_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1345 kmp_int32 gtid) {
1346 char const *const func = "omp_test_lock";
1347 if (lck->lk.initialized != lck) {
1348 KMP_FATAL(LockIsUninitialized, func);
1349 }
1350 if (__kmp_is_queuing_lock_nestable(lck)) {
1351 KMP_FATAL(LockNestableUsedAsSimple, func);
1352 }
1353
1354 int retval = __kmp_test_queuing_lock(lck, gtid);
1355
1356 if (retval) {
1357 lck->lk.owner_id = gtid + 1;
1358 }
1359 return retval;
1360 }
1361
__kmp_release_queuing_lock(kmp_queuing_lock_t * lck,kmp_int32 gtid)1362 int __kmp_release_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1363 kmp_info_t *this_thr;
1364 volatile kmp_int32 *head_id_p = &lck->lk.head_id;
1365 volatile kmp_int32 *tail_id_p = &lck->lk.tail_id;
1366
1367 KA_TRACE(1000,
1368 ("__kmp_release_queuing_lock: lck:%p, T#%d entering\n", lck, gtid));
1369 KMP_DEBUG_ASSERT(gtid >= 0);
1370 this_thr = __kmp_thread_from_gtid(gtid);
1371 KMP_DEBUG_ASSERT(this_thr != NULL);
1372 #ifdef DEBUG_QUEUING_LOCKS
1373 TRACE_LOCK(gtid + 1, "rel ent");
1374
1375 if (this_thr->th.th_spin_here)
1376 __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
1377 if (this_thr->th.th_next_waiting != 0)
1378 __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
1379 #endif
1380 KMP_DEBUG_ASSERT(!this_thr->th.th_spin_here);
1381 KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0);
1382
1383 KMP_FSYNC_RELEASING(lck);
1384 ANNOTATE_QUEUING_RELEASED(lck);
1385
1386 while (1) {
1387 kmp_int32 dequeued;
1388 kmp_int32 head;
1389 kmp_int32 tail;
1390
1391 head = *head_id_p;
1392
1393 #ifdef DEBUG_QUEUING_LOCKS
1394 tail = *tail_id_p;
1395 TRACE_LOCK_HT(gtid + 1, "rel read: ", head, tail);
1396 if (head == 0)
1397 __kmp_dump_queuing_lock(this_thr, gtid, lck, head, tail);
1398 #endif
1399 KMP_DEBUG_ASSERT(head !=
1400 0); /* holding the lock, head must be -1 or queue head */
1401
1402 if (head == -1) { /* nobody on queue */
1403 /* try (-1,0)->(0,0) */
1404 if (KMP_COMPARE_AND_STORE_REL32(head_id_p, -1, 0)) {
1405 KA_TRACE(
1406 1000,
1407 ("__kmp_release_queuing_lock: lck:%p, T#%d exiting: queue empty\n",
1408 lck, gtid));
1409 #ifdef DEBUG_QUEUING_LOCKS
1410 TRACE_LOCK_HT(gtid + 1, "rel exit: ", 0, 0);
1411 #endif
1412
1413 #if OMPT_SUPPORT
1414 /* nothing to do - no other thread is trying to shift blame */
1415 #endif
1416 return KMP_LOCK_RELEASED;
1417 }
1418 dequeued = FALSE;
1419 } else {
1420 KMP_MB();
1421 tail = *tail_id_p;
1422 if (head == tail) { /* only one thread on the queue */
1423 #ifdef DEBUG_QUEUING_LOCKS
1424 if (head <= 0)
1425 __kmp_dump_queuing_lock(this_thr, gtid, lck, head, tail);
1426 #endif
1427 KMP_DEBUG_ASSERT(head > 0);
1428
1429 /* try (h,h)->(-1,0) */
1430 dequeued = KMP_COMPARE_AND_STORE_REL64(
1431 RCAST(volatile kmp_int64 *, tail_id_p), KMP_PACK_64(head, head),
1432 KMP_PACK_64(-1, 0));
1433 #ifdef DEBUG_QUEUING_LOCKS
1434 TRACE_LOCK(gtid + 1, "rel deq: (h,h)->(-1,0)");
1435 #endif
1436
1437 } else {
1438 volatile kmp_int32 *waiting_id_p;
1439 kmp_info_t *head_thr = __kmp_thread_from_gtid(head - 1);
1440 KMP_DEBUG_ASSERT(head_thr != NULL);
1441 waiting_id_p = &head_thr->th.th_next_waiting;
1442
1443 /* Does this require synchronous reads? */
1444 #ifdef DEBUG_QUEUING_LOCKS
1445 if (head <= 0 || tail <= 0)
1446 __kmp_dump_queuing_lock(this_thr, gtid, lck, head, tail);
1447 #endif
1448 KMP_DEBUG_ASSERT(head > 0 && tail > 0);
1449
1450 /* try (h,t)->(h',t) or (t,t) */
1451 KMP_MB();
1452 /* make sure enqueuing thread has time to update next waiting thread
1453 * field */
1454 *head_id_p =
1455 KMP_WAIT((volatile kmp_uint32 *)waiting_id_p, 0, KMP_NEQ, NULL);
1456 #ifdef DEBUG_QUEUING_LOCKS
1457 TRACE_LOCK(gtid + 1, "rel deq: (h,t)->(h',t)");
1458 #endif
1459 dequeued = TRUE;
1460 }
1461 }
1462
1463 if (dequeued) {
1464 kmp_info_t *head_thr = __kmp_thread_from_gtid(head - 1);
1465 KMP_DEBUG_ASSERT(head_thr != NULL);
1466
1467 /* Does this require synchronous reads? */
1468 #ifdef DEBUG_QUEUING_LOCKS
1469 if (head <= 0 || tail <= 0)
1470 __kmp_dump_queuing_lock(this_thr, gtid, lck, head, tail);
1471 #endif
1472 KMP_DEBUG_ASSERT(head > 0 && tail > 0);
1473
1474 /* For clean code only. Thread not released until next statement prevents
1475 race with acquire code. */
1476 head_thr->th.th_next_waiting = 0;
1477 #ifdef DEBUG_QUEUING_LOCKS
1478 TRACE_LOCK_T(gtid + 1, "rel nw=0 for t=", head);
1479 #endif
1480
1481 KMP_MB();
1482 /* reset spin value */
1483 head_thr->th.th_spin_here = FALSE;
1484
1485 KA_TRACE(1000, ("__kmp_release_queuing_lock: lck:%p, T#%d exiting: after "
1486 "dequeuing\n",
1487 lck, gtid));
1488 #ifdef DEBUG_QUEUING_LOCKS
1489 TRACE_LOCK(gtid + 1, "rel exit 2");
1490 #endif
1491 return KMP_LOCK_RELEASED;
1492 }
1493 /* KMP_CPU_PAUSE(); don't want to make releasing thread hold up acquiring
1494 threads */
1495
1496 #ifdef DEBUG_QUEUING_LOCKS
1497 TRACE_LOCK(gtid + 1, "rel retry");
1498 #endif
1499
1500 } /* while */
1501 KMP_ASSERT2(0, "should not get here");
1502 return KMP_LOCK_RELEASED;
1503 }
1504
__kmp_release_queuing_lock_with_checks(kmp_queuing_lock_t * lck,kmp_int32 gtid)1505 static int __kmp_release_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1506 kmp_int32 gtid) {
1507 char const *const func = "omp_unset_lock";
1508 KMP_MB(); /* in case another processor initialized lock */
1509 if (lck->lk.initialized != lck) {
1510 KMP_FATAL(LockIsUninitialized, func);
1511 }
1512 if (__kmp_is_queuing_lock_nestable(lck)) {
1513 KMP_FATAL(LockNestableUsedAsSimple, func);
1514 }
1515 if (__kmp_get_queuing_lock_owner(lck) == -1) {
1516 KMP_FATAL(LockUnsettingFree, func);
1517 }
1518 if (__kmp_get_queuing_lock_owner(lck) != gtid) {
1519 KMP_FATAL(LockUnsettingSetByAnother, func);
1520 }
1521 lck->lk.owner_id = 0;
1522 return __kmp_release_queuing_lock(lck, gtid);
1523 }
1524
__kmp_init_queuing_lock(kmp_queuing_lock_t * lck)1525 void __kmp_init_queuing_lock(kmp_queuing_lock_t *lck) {
1526 lck->lk.location = NULL;
1527 lck->lk.head_id = 0;
1528 lck->lk.tail_id = 0;
1529 lck->lk.next_ticket = 0;
1530 lck->lk.now_serving = 0;
1531 lck->lk.owner_id = 0; // no thread owns the lock.
1532 lck->lk.depth_locked = -1; // >= 0 for nestable locks, -1 for simple locks.
1533 lck->lk.initialized = lck;
1534
1535 KA_TRACE(1000, ("__kmp_init_queuing_lock: lock %p initialized\n", lck));
1536 }
1537
__kmp_destroy_queuing_lock(kmp_queuing_lock_t * lck)1538 void __kmp_destroy_queuing_lock(kmp_queuing_lock_t *lck) {
1539 lck->lk.initialized = NULL;
1540 lck->lk.location = NULL;
1541 lck->lk.head_id = 0;
1542 lck->lk.tail_id = 0;
1543 lck->lk.next_ticket = 0;
1544 lck->lk.now_serving = 0;
1545 lck->lk.owner_id = 0;
1546 lck->lk.depth_locked = -1;
1547 }
1548
__kmp_destroy_queuing_lock_with_checks(kmp_queuing_lock_t * lck)1549 static void __kmp_destroy_queuing_lock_with_checks(kmp_queuing_lock_t *lck) {
1550 char const *const func = "omp_destroy_lock";
1551 if (lck->lk.initialized != lck) {
1552 KMP_FATAL(LockIsUninitialized, func);
1553 }
1554 if (__kmp_is_queuing_lock_nestable(lck)) {
1555 KMP_FATAL(LockNestableUsedAsSimple, func);
1556 }
1557 if (__kmp_get_queuing_lock_owner(lck) != -1) {
1558 KMP_FATAL(LockStillOwned, func);
1559 }
1560 __kmp_destroy_queuing_lock(lck);
1561 }
1562
1563 // nested queuing locks
1564
__kmp_acquire_nested_queuing_lock(kmp_queuing_lock_t * lck,kmp_int32 gtid)1565 int __kmp_acquire_nested_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1566 KMP_DEBUG_ASSERT(gtid >= 0);
1567
1568 if (__kmp_get_queuing_lock_owner(lck) == gtid) {
1569 lck->lk.depth_locked += 1;
1570 return KMP_LOCK_ACQUIRED_NEXT;
1571 } else {
1572 __kmp_acquire_queuing_lock_timed_template<false>(lck, gtid);
1573 ANNOTATE_QUEUING_ACQUIRED(lck);
1574 KMP_MB();
1575 lck->lk.depth_locked = 1;
1576 KMP_MB();
1577 lck->lk.owner_id = gtid + 1;
1578 return KMP_LOCK_ACQUIRED_FIRST;
1579 }
1580 }
1581
1582 static int
__kmp_acquire_nested_queuing_lock_with_checks(kmp_queuing_lock_t * lck,kmp_int32 gtid)1583 __kmp_acquire_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1584 kmp_int32 gtid) {
1585 char const *const func = "omp_set_nest_lock";
1586 if (lck->lk.initialized != lck) {
1587 KMP_FATAL(LockIsUninitialized, func);
1588 }
1589 if (!__kmp_is_queuing_lock_nestable(lck)) {
1590 KMP_FATAL(LockSimpleUsedAsNestable, func);
1591 }
1592 return __kmp_acquire_nested_queuing_lock(lck, gtid);
1593 }
1594
__kmp_test_nested_queuing_lock(kmp_queuing_lock_t * lck,kmp_int32 gtid)1595 int __kmp_test_nested_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1596 int retval;
1597
1598 KMP_DEBUG_ASSERT(gtid >= 0);
1599
1600 if (__kmp_get_queuing_lock_owner(lck) == gtid) {
1601 retval = ++lck->lk.depth_locked;
1602 } else if (!__kmp_test_queuing_lock(lck, gtid)) {
1603 retval = 0;
1604 } else {
1605 KMP_MB();
1606 retval = lck->lk.depth_locked = 1;
1607 KMP_MB();
1608 lck->lk.owner_id = gtid + 1;
1609 }
1610 return retval;
1611 }
1612
__kmp_test_nested_queuing_lock_with_checks(kmp_queuing_lock_t * lck,kmp_int32 gtid)1613 static int __kmp_test_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1614 kmp_int32 gtid) {
1615 char const *const func = "omp_test_nest_lock";
1616 if (lck->lk.initialized != lck) {
1617 KMP_FATAL(LockIsUninitialized, func);
1618 }
1619 if (!__kmp_is_queuing_lock_nestable(lck)) {
1620 KMP_FATAL(LockSimpleUsedAsNestable, func);
1621 }
1622 return __kmp_test_nested_queuing_lock(lck, gtid);
1623 }
1624
__kmp_release_nested_queuing_lock(kmp_queuing_lock_t * lck,kmp_int32 gtid)1625 int __kmp_release_nested_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1626 KMP_DEBUG_ASSERT(gtid >= 0);
1627
1628 KMP_MB();
1629 if (--(lck->lk.depth_locked) == 0) {
1630 KMP_MB();
1631 lck->lk.owner_id = 0;
1632 __kmp_release_queuing_lock(lck, gtid);
1633 return KMP_LOCK_RELEASED;
1634 }
1635 return KMP_LOCK_STILL_HELD;
1636 }
1637
1638 static int
__kmp_release_nested_queuing_lock_with_checks(kmp_queuing_lock_t * lck,kmp_int32 gtid)1639 __kmp_release_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1640 kmp_int32 gtid) {
1641 char const *const func = "omp_unset_nest_lock";
1642 KMP_MB(); /* in case another processor initialized lock */
1643 if (lck->lk.initialized != lck) {
1644 KMP_FATAL(LockIsUninitialized, func);
1645 }
1646 if (!__kmp_is_queuing_lock_nestable(lck)) {
1647 KMP_FATAL(LockSimpleUsedAsNestable, func);
1648 }
1649 if (__kmp_get_queuing_lock_owner(lck) == -1) {
1650 KMP_FATAL(LockUnsettingFree, func);
1651 }
1652 if (__kmp_get_queuing_lock_owner(lck) != gtid) {
1653 KMP_FATAL(LockUnsettingSetByAnother, func);
1654 }
1655 return __kmp_release_nested_queuing_lock(lck, gtid);
1656 }
1657
__kmp_init_nested_queuing_lock(kmp_queuing_lock_t * lck)1658 void __kmp_init_nested_queuing_lock(kmp_queuing_lock_t *lck) {
1659 __kmp_init_queuing_lock(lck);
1660 lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks
1661 }
1662
__kmp_destroy_nested_queuing_lock(kmp_queuing_lock_t * lck)1663 void __kmp_destroy_nested_queuing_lock(kmp_queuing_lock_t *lck) {
1664 __kmp_destroy_queuing_lock(lck);
1665 lck->lk.depth_locked = 0;
1666 }
1667
1668 static void
__kmp_destroy_nested_queuing_lock_with_checks(kmp_queuing_lock_t * lck)1669 __kmp_destroy_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck) {
1670 char const *const func = "omp_destroy_nest_lock";
1671 if (lck->lk.initialized != lck) {
1672 KMP_FATAL(LockIsUninitialized, func);
1673 }
1674 if (!__kmp_is_queuing_lock_nestable(lck)) {
1675 KMP_FATAL(LockSimpleUsedAsNestable, func);
1676 }
1677 if (__kmp_get_queuing_lock_owner(lck) != -1) {
1678 KMP_FATAL(LockStillOwned, func);
1679 }
1680 __kmp_destroy_nested_queuing_lock(lck);
1681 }
1682
1683 // access functions to fields which don't exist for all lock kinds.
1684
__kmp_get_queuing_lock_location(kmp_queuing_lock_t * lck)1685 static const ident_t *__kmp_get_queuing_lock_location(kmp_queuing_lock_t *lck) {
1686 return lck->lk.location;
1687 }
1688
__kmp_set_queuing_lock_location(kmp_queuing_lock_t * lck,const ident_t * loc)1689 static void __kmp_set_queuing_lock_location(kmp_queuing_lock_t *lck,
1690 const ident_t *loc) {
1691 lck->lk.location = loc;
1692 }
1693
__kmp_get_queuing_lock_flags(kmp_queuing_lock_t * lck)1694 static kmp_lock_flags_t __kmp_get_queuing_lock_flags(kmp_queuing_lock_t *lck) {
1695 return lck->lk.flags;
1696 }
1697
__kmp_set_queuing_lock_flags(kmp_queuing_lock_t * lck,kmp_lock_flags_t flags)1698 static void __kmp_set_queuing_lock_flags(kmp_queuing_lock_t *lck,
1699 kmp_lock_flags_t flags) {
1700 lck->lk.flags = flags;
1701 }
1702
1703 #if KMP_USE_ADAPTIVE_LOCKS
1704
1705 /* RTM Adaptive locks */
1706
1707 #if KMP_HAVE_RTM_INTRINSICS
1708 #include <immintrin.h>
1709 #define SOFT_ABORT_MASK (_XABORT_RETRY | _XABORT_CONFLICT | _XABORT_EXPLICIT)
1710
1711 #else
1712
1713 // Values from the status register after failed speculation.
1714 #define _XBEGIN_STARTED (~0u)
1715 #define _XABORT_EXPLICIT (1 << 0)
1716 #define _XABORT_RETRY (1 << 1)
1717 #define _XABORT_CONFLICT (1 << 2)
1718 #define _XABORT_CAPACITY (1 << 3)
1719 #define _XABORT_DEBUG (1 << 4)
1720 #define _XABORT_NESTED (1 << 5)
1721 #define _XABORT_CODE(x) ((unsigned char)(((x) >> 24) & 0xFF))
1722
1723 // Aborts for which it's worth trying again immediately
1724 #define SOFT_ABORT_MASK (_XABORT_RETRY | _XABORT_CONFLICT | _XABORT_EXPLICIT)
1725
1726 #define STRINGIZE_INTERNAL(arg) #arg
1727 #define STRINGIZE(arg) STRINGIZE_INTERNAL(arg)
1728
1729 // Access to RTM instructions
1730 /*A version of XBegin which returns -1 on speculation, and the value of EAX on
1731 an abort. This is the same definition as the compiler intrinsic that will be
1732 supported at some point. */
_xbegin()1733 static __inline int _xbegin() {
1734 int res = -1;
1735
1736 #if KMP_OS_WINDOWS
1737 #if KMP_ARCH_X86_64
1738 _asm {
1739 _emit 0xC7
1740 _emit 0xF8
1741 _emit 2
1742 _emit 0
1743 _emit 0
1744 _emit 0
1745 jmp L2
1746 mov res, eax
1747 L2:
1748 }
1749 #else /* IA32 */
1750 _asm {
1751 _emit 0xC7
1752 _emit 0xF8
1753 _emit 2
1754 _emit 0
1755 _emit 0
1756 _emit 0
1757 jmp L2
1758 mov res, eax
1759 L2:
1760 }
1761 #endif // KMP_ARCH_X86_64
1762 #else
1763 /* Note that %eax must be noted as killed (clobbered), because the XSR is
1764 returned in %eax(%rax) on abort. Other register values are restored, so
1765 don't need to be killed.
1766
1767 We must also mark 'res' as an input and an output, since otherwise
1768 'res=-1' may be dropped as being dead, whereas we do need the assignment on
1769 the successful (i.e., non-abort) path. */
1770 __asm__ volatile("1: .byte 0xC7; .byte 0xF8;\n"
1771 " .long 1f-1b-6\n"
1772 " jmp 2f\n"
1773 "1: movl %%eax,%0\n"
1774 "2:"
1775 : "+r"(res)::"memory", "%eax");
1776 #endif // KMP_OS_WINDOWS
1777 return res;
1778 }
1779
1780 /* Transaction end */
_xend()1781 static __inline void _xend() {
1782 #if KMP_OS_WINDOWS
1783 __asm {
1784 _emit 0x0f
1785 _emit 0x01
1786 _emit 0xd5
1787 }
1788 #else
1789 __asm__ volatile(".byte 0x0f; .byte 0x01; .byte 0xd5" ::: "memory");
1790 #endif
1791 }
1792
1793 /* This is a macro, the argument must be a single byte constant which can be
1794 evaluated by the inline assembler, since it is emitted as a byte into the
1795 assembly code. */
1796 // clang-format off
1797 #if KMP_OS_WINDOWS
1798 #define _xabort(ARG) _asm _emit 0xc6 _asm _emit 0xf8 _asm _emit ARG
1799 #else
1800 #define _xabort(ARG) \
1801 __asm__ volatile(".byte 0xC6; .byte 0xF8; .byte " STRINGIZE(ARG):::"memory");
1802 #endif
1803 // clang-format on
1804 #endif // KMP_COMPILER_ICC && __INTEL_COMPILER >= 1300
1805
1806 // Statistics is collected for testing purpose
1807 #if KMP_DEBUG_ADAPTIVE_LOCKS
1808
1809 // We accumulate speculative lock statistics when the lock is destroyed. We
1810 // keep locks that haven't been destroyed in the liveLocks list so that we can
1811 // grab their statistics too.
1812 static kmp_adaptive_lock_statistics_t destroyedStats;
1813
1814 // To hold the list of live locks.
1815 static kmp_adaptive_lock_info_t liveLocks;
1816
1817 // A lock so we can safely update the list of locks.
1818 static kmp_bootstrap_lock_t chain_lock =
1819 KMP_BOOTSTRAP_LOCK_INITIALIZER(chain_lock);
1820
1821 // Initialize the list of stats.
__kmp_init_speculative_stats()1822 void __kmp_init_speculative_stats() {
1823 kmp_adaptive_lock_info_t *lck = &liveLocks;
1824
1825 memset(CCAST(kmp_adaptive_lock_statistics_t *, &(lck->stats)), 0,
1826 sizeof(lck->stats));
1827 lck->stats.next = lck;
1828 lck->stats.prev = lck;
1829
1830 KMP_ASSERT(lck->stats.next->stats.prev == lck);
1831 KMP_ASSERT(lck->stats.prev->stats.next == lck);
1832
1833 __kmp_init_bootstrap_lock(&chain_lock);
1834 }
1835
1836 // Insert the lock into the circular list
__kmp_remember_lock(kmp_adaptive_lock_info_t * lck)1837 static void __kmp_remember_lock(kmp_adaptive_lock_info_t *lck) {
1838 __kmp_acquire_bootstrap_lock(&chain_lock);
1839
1840 lck->stats.next = liveLocks.stats.next;
1841 lck->stats.prev = &liveLocks;
1842
1843 liveLocks.stats.next = lck;
1844 lck->stats.next->stats.prev = lck;
1845
1846 KMP_ASSERT(lck->stats.next->stats.prev == lck);
1847 KMP_ASSERT(lck->stats.prev->stats.next == lck);
1848
1849 __kmp_release_bootstrap_lock(&chain_lock);
1850 }
1851
__kmp_forget_lock(kmp_adaptive_lock_info_t * lck)1852 static void __kmp_forget_lock(kmp_adaptive_lock_info_t *lck) {
1853 KMP_ASSERT(lck->stats.next->stats.prev == lck);
1854 KMP_ASSERT(lck->stats.prev->stats.next == lck);
1855
1856 kmp_adaptive_lock_info_t *n = lck->stats.next;
1857 kmp_adaptive_lock_info_t *p = lck->stats.prev;
1858
1859 n->stats.prev = p;
1860 p->stats.next = n;
1861 }
1862
__kmp_zero_speculative_stats(kmp_adaptive_lock_info_t * lck)1863 static void __kmp_zero_speculative_stats(kmp_adaptive_lock_info_t *lck) {
1864 memset(CCAST(kmp_adaptive_lock_statistics_t *, &lck->stats), 0,
1865 sizeof(lck->stats));
1866 __kmp_remember_lock(lck);
1867 }
1868
__kmp_add_stats(kmp_adaptive_lock_statistics_t * t,kmp_adaptive_lock_info_t * lck)1869 static void __kmp_add_stats(kmp_adaptive_lock_statistics_t *t,
1870 kmp_adaptive_lock_info_t *lck) {
1871 kmp_adaptive_lock_statistics_t volatile *s = &lck->stats;
1872
1873 t->nonSpeculativeAcquireAttempts += lck->acquire_attempts;
1874 t->successfulSpeculations += s->successfulSpeculations;
1875 t->hardFailedSpeculations += s->hardFailedSpeculations;
1876 t->softFailedSpeculations += s->softFailedSpeculations;
1877 t->nonSpeculativeAcquires += s->nonSpeculativeAcquires;
1878 t->lemmingYields += s->lemmingYields;
1879 }
1880
__kmp_accumulate_speculative_stats(kmp_adaptive_lock_info_t * lck)1881 static void __kmp_accumulate_speculative_stats(kmp_adaptive_lock_info_t *lck) {
1882 __kmp_acquire_bootstrap_lock(&chain_lock);
1883
1884 __kmp_add_stats(&destroyedStats, lck);
1885 __kmp_forget_lock(lck);
1886
1887 __kmp_release_bootstrap_lock(&chain_lock);
1888 }
1889
percent(kmp_uint32 count,kmp_uint32 total)1890 static float percent(kmp_uint32 count, kmp_uint32 total) {
1891 return (total == 0) ? 0.0 : (100.0 * count) / total;
1892 }
1893
__kmp_open_stats_file()1894 static FILE *__kmp_open_stats_file() {
1895 if (strcmp(__kmp_speculative_statsfile, "-") == 0)
1896 return stdout;
1897
1898 size_t buffLen = KMP_STRLEN(__kmp_speculative_statsfile) + 20;
1899 char buffer[buffLen];
1900 KMP_SNPRINTF(&buffer[0], buffLen, __kmp_speculative_statsfile,
1901 (kmp_int32)getpid());
1902 FILE *result = fopen(&buffer[0], "w");
1903
1904 // Maybe we should issue a warning here...
1905 return result ? result : stdout;
1906 }
1907
__kmp_print_speculative_stats()1908 void __kmp_print_speculative_stats() {
1909 kmp_adaptive_lock_statistics_t total = destroyedStats;
1910 kmp_adaptive_lock_info_t *lck;
1911
1912 for (lck = liveLocks.stats.next; lck != &liveLocks; lck = lck->stats.next) {
1913 __kmp_add_stats(&total, lck);
1914 }
1915 kmp_adaptive_lock_statistics_t *t = &total;
1916 kmp_uint32 totalSections =
1917 t->nonSpeculativeAcquires + t->successfulSpeculations;
1918 kmp_uint32 totalSpeculations = t->successfulSpeculations +
1919 t->hardFailedSpeculations +
1920 t->softFailedSpeculations;
1921 if (totalSections <= 0)
1922 return;
1923
1924 FILE *statsFile = __kmp_open_stats_file();
1925
1926 fprintf(statsFile, "Speculative lock statistics (all approximate!)\n");
1927 fprintf(statsFile, " Lock parameters: \n"
1928 " max_soft_retries : %10d\n"
1929 " max_badness : %10d\n",
1930 __kmp_adaptive_backoff_params.max_soft_retries,
1931 __kmp_adaptive_backoff_params.max_badness);
1932 fprintf(statsFile, " Non-speculative acquire attempts : %10d\n",
1933 t->nonSpeculativeAcquireAttempts);
1934 fprintf(statsFile, " Total critical sections : %10d\n",
1935 totalSections);
1936 fprintf(statsFile, " Successful speculations : %10d (%5.1f%%)\n",
1937 t->successfulSpeculations,
1938 percent(t->successfulSpeculations, totalSections));
1939 fprintf(statsFile, " Non-speculative acquires : %10d (%5.1f%%)\n",
1940 t->nonSpeculativeAcquires,
1941 percent(t->nonSpeculativeAcquires, totalSections));
1942 fprintf(statsFile, " Lemming yields : %10d\n\n",
1943 t->lemmingYields);
1944
1945 fprintf(statsFile, " Speculative acquire attempts : %10d\n",
1946 totalSpeculations);
1947 fprintf(statsFile, " Successes : %10d (%5.1f%%)\n",
1948 t->successfulSpeculations,
1949 percent(t->successfulSpeculations, totalSpeculations));
1950 fprintf(statsFile, " Soft failures : %10d (%5.1f%%)\n",
1951 t->softFailedSpeculations,
1952 percent(t->softFailedSpeculations, totalSpeculations));
1953 fprintf(statsFile, " Hard failures : %10d (%5.1f%%)\n",
1954 t->hardFailedSpeculations,
1955 percent(t->hardFailedSpeculations, totalSpeculations));
1956
1957 if (statsFile != stdout)
1958 fclose(statsFile);
1959 }
1960
1961 #define KMP_INC_STAT(lck, stat) (lck->lk.adaptive.stats.stat++)
1962 #else
1963 #define KMP_INC_STAT(lck, stat)
1964
1965 #endif // KMP_DEBUG_ADAPTIVE_LOCKS
1966
__kmp_is_unlocked_queuing_lock(kmp_queuing_lock_t * lck)1967 static inline bool __kmp_is_unlocked_queuing_lock(kmp_queuing_lock_t *lck) {
1968 // It is enough to check that the head_id is zero.
1969 // We don't also need to check the tail.
1970 bool res = lck->lk.head_id == 0;
1971
1972 // We need a fence here, since we must ensure that no memory operations
1973 // from later in this thread float above that read.
1974 #if KMP_COMPILER_ICC
1975 _mm_mfence();
1976 #else
1977 __sync_synchronize();
1978 #endif
1979
1980 return res;
1981 }
1982
1983 // Functions for manipulating the badness
1984 static __inline void
__kmp_update_badness_after_success(kmp_adaptive_lock_t * lck)1985 __kmp_update_badness_after_success(kmp_adaptive_lock_t *lck) {
1986 // Reset the badness to zero so we eagerly try to speculate again
1987 lck->lk.adaptive.badness = 0;
1988 KMP_INC_STAT(lck, successfulSpeculations);
1989 }
1990
1991 // Create a bit mask with one more set bit.
__kmp_step_badness(kmp_adaptive_lock_t * lck)1992 static __inline void __kmp_step_badness(kmp_adaptive_lock_t *lck) {
1993 kmp_uint32 newBadness = (lck->lk.adaptive.badness << 1) | 1;
1994 if (newBadness > lck->lk.adaptive.max_badness) {
1995 return;
1996 } else {
1997 lck->lk.adaptive.badness = newBadness;
1998 }
1999 }
2000
2001 // Check whether speculation should be attempted.
2002 KMP_ATTRIBUTE_TARGET_RTM
__kmp_should_speculate(kmp_adaptive_lock_t * lck,kmp_int32 gtid)2003 static __inline int __kmp_should_speculate(kmp_adaptive_lock_t *lck,
2004 kmp_int32 gtid) {
2005 kmp_uint32 badness = lck->lk.adaptive.badness;
2006 kmp_uint32 attempts = lck->lk.adaptive.acquire_attempts;
2007 int res = (attempts & badness) == 0;
2008 return res;
2009 }
2010
2011 // Attempt to acquire only the speculative lock.
2012 // Does not back off to the non-speculative lock.
2013 KMP_ATTRIBUTE_TARGET_RTM
__kmp_test_adaptive_lock_only(kmp_adaptive_lock_t * lck,kmp_int32 gtid)2014 static int __kmp_test_adaptive_lock_only(kmp_adaptive_lock_t *lck,
2015 kmp_int32 gtid) {
2016 int retries = lck->lk.adaptive.max_soft_retries;
2017
2018 // We don't explicitly count the start of speculation, rather we record the
2019 // results (success, hard fail, soft fail). The sum of all of those is the
2020 // total number of times we started speculation since all speculations must
2021 // end one of those ways.
2022 do {
2023 kmp_uint32 status = _xbegin();
2024 // Switch this in to disable actual speculation but exercise at least some
2025 // of the rest of the code. Useful for debugging...
2026 // kmp_uint32 status = _XABORT_NESTED;
2027
2028 if (status == _XBEGIN_STARTED) {
2029 /* We have successfully started speculation. Check that no-one acquired
2030 the lock for real between when we last looked and now. This also gets
2031 the lock cache line into our read-set, which we need so that we'll
2032 abort if anyone later claims it for real. */
2033 if (!__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(lck))) {
2034 // Lock is now visibly acquired, so someone beat us to it. Abort the
2035 // transaction so we'll restart from _xbegin with the failure status.
2036 _xabort(0x01);
2037 KMP_ASSERT2(0, "should not get here");
2038 }
2039 return 1; // Lock has been acquired (speculatively)
2040 } else {
2041 // We have aborted, update the statistics
2042 if (status & SOFT_ABORT_MASK) {
2043 KMP_INC_STAT(lck, softFailedSpeculations);
2044 // and loop round to retry.
2045 } else {
2046 KMP_INC_STAT(lck, hardFailedSpeculations);
2047 // Give up if we had a hard failure.
2048 break;
2049 }
2050 }
2051 } while (retries--); // Loop while we have retries, and didn't fail hard.
2052
2053 // Either we had a hard failure or we didn't succeed softly after
2054 // the full set of attempts, so back off the badness.
2055 __kmp_step_badness(lck);
2056 return 0;
2057 }
2058
2059 // Attempt to acquire the speculative lock, or back off to the non-speculative
2060 // one if the speculative lock cannot be acquired.
2061 // We can succeed speculatively, non-speculatively, or fail.
__kmp_test_adaptive_lock(kmp_adaptive_lock_t * lck,kmp_int32 gtid)2062 static int __kmp_test_adaptive_lock(kmp_adaptive_lock_t *lck, kmp_int32 gtid) {
2063 // First try to acquire the lock speculatively
2064 if (__kmp_should_speculate(lck, gtid) &&
2065 __kmp_test_adaptive_lock_only(lck, gtid))
2066 return 1;
2067
2068 // Speculative acquisition failed, so try to acquire it non-speculatively.
2069 // Count the non-speculative acquire attempt
2070 lck->lk.adaptive.acquire_attempts++;
2071
2072 // Use base, non-speculative lock.
2073 if (__kmp_test_queuing_lock(GET_QLK_PTR(lck), gtid)) {
2074 KMP_INC_STAT(lck, nonSpeculativeAcquires);
2075 return 1; // Lock is acquired (non-speculatively)
2076 } else {
2077 return 0; // Failed to acquire the lock, it's already visibly locked.
2078 }
2079 }
2080
__kmp_test_adaptive_lock_with_checks(kmp_adaptive_lock_t * lck,kmp_int32 gtid)2081 static int __kmp_test_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck,
2082 kmp_int32 gtid) {
2083 char const *const func = "omp_test_lock";
2084 if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
2085 KMP_FATAL(LockIsUninitialized, func);
2086 }
2087
2088 int retval = __kmp_test_adaptive_lock(lck, gtid);
2089
2090 if (retval) {
2091 lck->lk.qlk.owner_id = gtid + 1;
2092 }
2093 return retval;
2094 }
2095
2096 // Block until we can acquire a speculative, adaptive lock. We check whether we
2097 // should be trying to speculate. If we should be, we check the real lock to see
2098 // if it is free, and, if not, pause without attempting to acquire it until it
2099 // is. Then we try the speculative acquire. This means that although we suffer
2100 // from lemmings a little (because all we can't acquire the lock speculatively
2101 // until the queue of threads waiting has cleared), we don't get into a state
2102 // where we can never acquire the lock speculatively (because we force the queue
2103 // to clear by preventing new arrivals from entering the queue). This does mean
2104 // that when we're trying to break lemmings, the lock is no longer fair. However
2105 // OpenMP makes no guarantee that its locks are fair, so this isn't a real
2106 // problem.
__kmp_acquire_adaptive_lock(kmp_adaptive_lock_t * lck,kmp_int32 gtid)2107 static void __kmp_acquire_adaptive_lock(kmp_adaptive_lock_t *lck,
2108 kmp_int32 gtid) {
2109 if (__kmp_should_speculate(lck, gtid)) {
2110 if (__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(lck))) {
2111 if (__kmp_test_adaptive_lock_only(lck, gtid))
2112 return;
2113 // We tried speculation and failed, so give up.
2114 } else {
2115 // We can't try speculation until the lock is free, so we pause here
2116 // (without suspending on the queueing lock, to allow it to drain, then
2117 // try again. All other threads will also see the same result for
2118 // shouldSpeculate, so will be doing the same if they try to claim the
2119 // lock from now on.
2120 while (!__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(lck))) {
2121 KMP_INC_STAT(lck, lemmingYields);
2122 KMP_YIELD(TRUE);
2123 }
2124
2125 if (__kmp_test_adaptive_lock_only(lck, gtid))
2126 return;
2127 }
2128 }
2129
2130 // Speculative acquisition failed, so acquire it non-speculatively.
2131 // Count the non-speculative acquire attempt
2132 lck->lk.adaptive.acquire_attempts++;
2133
2134 __kmp_acquire_queuing_lock_timed_template<FALSE>(GET_QLK_PTR(lck), gtid);
2135 // We have acquired the base lock, so count that.
2136 KMP_INC_STAT(lck, nonSpeculativeAcquires);
2137 ANNOTATE_QUEUING_ACQUIRED(lck);
2138 }
2139
__kmp_acquire_adaptive_lock_with_checks(kmp_adaptive_lock_t * lck,kmp_int32 gtid)2140 static void __kmp_acquire_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck,
2141 kmp_int32 gtid) {
2142 char const *const func = "omp_set_lock";
2143 if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
2144 KMP_FATAL(LockIsUninitialized, func);
2145 }
2146 if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) == gtid) {
2147 KMP_FATAL(LockIsAlreadyOwned, func);
2148 }
2149
2150 __kmp_acquire_adaptive_lock(lck, gtid);
2151
2152 lck->lk.qlk.owner_id = gtid + 1;
2153 }
2154
2155 KMP_ATTRIBUTE_TARGET_RTM
__kmp_release_adaptive_lock(kmp_adaptive_lock_t * lck,kmp_int32 gtid)2156 static int __kmp_release_adaptive_lock(kmp_adaptive_lock_t *lck,
2157 kmp_int32 gtid) {
2158 if (__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(
2159 lck))) { // If the lock doesn't look claimed we must be speculating.
2160 // (Or the user's code is buggy and they're releasing without locking;
2161 // if we had XTEST we'd be able to check that case...)
2162 _xend(); // Exit speculation
2163 __kmp_update_badness_after_success(lck);
2164 } else { // Since the lock *is* visibly locked we're not speculating,
2165 // so should use the underlying lock's release scheme.
2166 __kmp_release_queuing_lock(GET_QLK_PTR(lck), gtid);
2167 }
2168 return KMP_LOCK_RELEASED;
2169 }
2170
__kmp_release_adaptive_lock_with_checks(kmp_adaptive_lock_t * lck,kmp_int32 gtid)2171 static int __kmp_release_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck,
2172 kmp_int32 gtid) {
2173 char const *const func = "omp_unset_lock";
2174 KMP_MB(); /* in case another processor initialized lock */
2175 if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
2176 KMP_FATAL(LockIsUninitialized, func);
2177 }
2178 if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) == -1) {
2179 KMP_FATAL(LockUnsettingFree, func);
2180 }
2181 if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) != gtid) {
2182 KMP_FATAL(LockUnsettingSetByAnother, func);
2183 }
2184 lck->lk.qlk.owner_id = 0;
2185 __kmp_release_adaptive_lock(lck, gtid);
2186 return KMP_LOCK_RELEASED;
2187 }
2188
__kmp_init_adaptive_lock(kmp_adaptive_lock_t * lck)2189 static void __kmp_init_adaptive_lock(kmp_adaptive_lock_t *lck) {
2190 __kmp_init_queuing_lock(GET_QLK_PTR(lck));
2191 lck->lk.adaptive.badness = 0;
2192 lck->lk.adaptive.acquire_attempts = 0; // nonSpeculativeAcquireAttempts = 0;
2193 lck->lk.adaptive.max_soft_retries =
2194 __kmp_adaptive_backoff_params.max_soft_retries;
2195 lck->lk.adaptive.max_badness = __kmp_adaptive_backoff_params.max_badness;
2196 #if KMP_DEBUG_ADAPTIVE_LOCKS
2197 __kmp_zero_speculative_stats(&lck->lk.adaptive);
2198 #endif
2199 KA_TRACE(1000, ("__kmp_init_adaptive_lock: lock %p initialized\n", lck));
2200 }
2201
__kmp_destroy_adaptive_lock(kmp_adaptive_lock_t * lck)2202 static void __kmp_destroy_adaptive_lock(kmp_adaptive_lock_t *lck) {
2203 #if KMP_DEBUG_ADAPTIVE_LOCKS
2204 __kmp_accumulate_speculative_stats(&lck->lk.adaptive);
2205 #endif
2206 __kmp_destroy_queuing_lock(GET_QLK_PTR(lck));
2207 // Nothing needed for the speculative part.
2208 }
2209
__kmp_destroy_adaptive_lock_with_checks(kmp_adaptive_lock_t * lck)2210 static void __kmp_destroy_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck) {
2211 char const *const func = "omp_destroy_lock";
2212 if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
2213 KMP_FATAL(LockIsUninitialized, func);
2214 }
2215 if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) != -1) {
2216 KMP_FATAL(LockStillOwned, func);
2217 }
2218 __kmp_destroy_adaptive_lock(lck);
2219 }
2220
2221 #endif // KMP_USE_ADAPTIVE_LOCKS
2222
2223 /* ------------------------------------------------------------------------ */
2224 /* DRDPA ticket locks */
2225 /* "DRDPA" means Dynamically Reconfigurable Distributed Polling Area */
2226
__kmp_get_drdpa_lock_owner(kmp_drdpa_lock_t * lck)2227 static kmp_int32 __kmp_get_drdpa_lock_owner(kmp_drdpa_lock_t *lck) {
2228 return lck->lk.owner_id - 1;
2229 }
2230
__kmp_is_drdpa_lock_nestable(kmp_drdpa_lock_t * lck)2231 static inline bool __kmp_is_drdpa_lock_nestable(kmp_drdpa_lock_t *lck) {
2232 return lck->lk.depth_locked != -1;
2233 }
2234
2235 __forceinline static int
__kmp_acquire_drdpa_lock_timed_template(kmp_drdpa_lock_t * lck,kmp_int32 gtid)2236 __kmp_acquire_drdpa_lock_timed_template(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2237 kmp_uint64 ticket = KMP_ATOMIC_INC(&lck->lk.next_ticket);
2238 kmp_uint64 mask = lck->lk.mask; // atomic load
2239 std::atomic<kmp_uint64> *polls = lck->lk.polls;
2240
2241 #ifdef USE_LOCK_PROFILE
2242 if (polls[ticket & mask] != ticket)
2243 __kmp_printf("LOCK CONTENTION: %p\n", lck);
2244 /* else __kmp_printf( "." );*/
2245 #endif /* USE_LOCK_PROFILE */
2246
2247 // Now spin-wait, but reload the polls pointer and mask, in case the
2248 // polling area has been reconfigured. Unless it is reconfigured, the
2249 // reloads stay in L1 cache and are cheap.
2250 //
2251 // Keep this code in sync with KMP_WAIT, in kmp_dispatch.cpp !!!
2252 // The current implementation of KMP_WAIT doesn't allow for mask
2253 // and poll to be re-read every spin iteration.
2254 kmp_uint32 spins;
2255 KMP_FSYNC_PREPARE(lck);
2256 KMP_INIT_YIELD(spins);
2257 while (polls[ticket & mask] < ticket) { // atomic load
2258 KMP_YIELD_OVERSUB_ELSE_SPIN(spins);
2259 // Re-read the mask and the poll pointer from the lock structure.
2260 //
2261 // Make certain that "mask" is read before "polls" !!!
2262 //
2263 // If another thread picks reconfigures the polling area and updates their
2264 // values, and we get the new value of mask and the old polls pointer, we
2265 // could access memory beyond the end of the old polling area.
2266 mask = lck->lk.mask; // atomic load
2267 polls = lck->lk.polls; // atomic load
2268 }
2269
2270 // Critical section starts here
2271 KMP_FSYNC_ACQUIRED(lck);
2272 KA_TRACE(1000, ("__kmp_acquire_drdpa_lock: ticket #%lld acquired lock %p\n",
2273 ticket, lck));
2274 lck->lk.now_serving = ticket; // non-volatile store
2275
2276 // Deallocate a garbage polling area if we know that we are the last
2277 // thread that could possibly access it.
2278 //
2279 // The >= check is in case __kmp_test_drdpa_lock() allocated the cleanup
2280 // ticket.
2281 if ((lck->lk.old_polls != NULL) && (ticket >= lck->lk.cleanup_ticket)) {
2282 __kmp_free(lck->lk.old_polls);
2283 lck->lk.old_polls = NULL;
2284 lck->lk.cleanup_ticket = 0;
2285 }
2286
2287 // Check to see if we should reconfigure the polling area.
2288 // If there is still a garbage polling area to be deallocated from a
2289 // previous reconfiguration, let a later thread reconfigure it.
2290 if (lck->lk.old_polls == NULL) {
2291 bool reconfigure = false;
2292 std::atomic<kmp_uint64> *old_polls = polls;
2293 kmp_uint32 num_polls = TCR_4(lck->lk.num_polls);
2294
2295 if (TCR_4(__kmp_nth) >
2296 (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) {
2297 // We are in oversubscription mode. Contract the polling area
2298 // down to a single location, if that hasn't been done already.
2299 if (num_polls > 1) {
2300 reconfigure = true;
2301 num_polls = TCR_4(lck->lk.num_polls);
2302 mask = 0;
2303 num_polls = 1;
2304 polls = (std::atomic<kmp_uint64> *)__kmp_allocate(num_polls *
2305 sizeof(*polls));
2306 polls[0] = ticket;
2307 }
2308 } else {
2309 // We are in under/fully subscribed mode. Check the number of
2310 // threads waiting on the lock. The size of the polling area
2311 // should be at least the number of threads waiting.
2312 kmp_uint64 num_waiting = TCR_8(lck->lk.next_ticket) - ticket - 1;
2313 if (num_waiting > num_polls) {
2314 kmp_uint32 old_num_polls = num_polls;
2315 reconfigure = true;
2316 do {
2317 mask = (mask << 1) | 1;
2318 num_polls *= 2;
2319 } while (num_polls <= num_waiting);
2320
2321 // Allocate the new polling area, and copy the relevant portion
2322 // of the old polling area to the new area. __kmp_allocate()
2323 // zeroes the memory it allocates, and most of the old area is
2324 // just zero padding, so we only copy the release counters.
2325 polls = (std::atomic<kmp_uint64> *)__kmp_allocate(num_polls *
2326 sizeof(*polls));
2327 kmp_uint32 i;
2328 for (i = 0; i < old_num_polls; i++) {
2329 polls[i].store(old_polls[i]);
2330 }
2331 }
2332 }
2333
2334 if (reconfigure) {
2335 // Now write the updated fields back to the lock structure.
2336 //
2337 // Make certain that "polls" is written before "mask" !!!
2338 //
2339 // If another thread picks up the new value of mask and the old polls
2340 // pointer , it could access memory beyond the end of the old polling
2341 // area.
2342 //
2343 // On x86, we need memory fences.
2344 KA_TRACE(1000, ("__kmp_acquire_drdpa_lock: ticket #%lld reconfiguring "
2345 "lock %p to %d polls\n",
2346 ticket, lck, num_polls));
2347
2348 lck->lk.old_polls = old_polls;
2349 lck->lk.polls = polls; // atomic store
2350
2351 KMP_MB();
2352
2353 lck->lk.num_polls = num_polls;
2354 lck->lk.mask = mask; // atomic store
2355
2356 KMP_MB();
2357
2358 // Only after the new polling area and mask have been flushed
2359 // to main memory can we update the cleanup ticket field.
2360 //
2361 // volatile load / non-volatile store
2362 lck->lk.cleanup_ticket = lck->lk.next_ticket;
2363 }
2364 }
2365 return KMP_LOCK_ACQUIRED_FIRST;
2366 }
2367
__kmp_acquire_drdpa_lock(kmp_drdpa_lock_t * lck,kmp_int32 gtid)2368 int __kmp_acquire_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2369 int retval = __kmp_acquire_drdpa_lock_timed_template(lck, gtid);
2370 ANNOTATE_DRDPA_ACQUIRED(lck);
2371 return retval;
2372 }
2373
__kmp_acquire_drdpa_lock_with_checks(kmp_drdpa_lock_t * lck,kmp_int32 gtid)2374 static int __kmp_acquire_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2375 kmp_int32 gtid) {
2376 char const *const func = "omp_set_lock";
2377 if (lck->lk.initialized != lck) {
2378 KMP_FATAL(LockIsUninitialized, func);
2379 }
2380 if (__kmp_is_drdpa_lock_nestable(lck)) {
2381 KMP_FATAL(LockNestableUsedAsSimple, func);
2382 }
2383 if ((gtid >= 0) && (__kmp_get_drdpa_lock_owner(lck) == gtid)) {
2384 KMP_FATAL(LockIsAlreadyOwned, func);
2385 }
2386
2387 __kmp_acquire_drdpa_lock(lck, gtid);
2388
2389 lck->lk.owner_id = gtid + 1;
2390 return KMP_LOCK_ACQUIRED_FIRST;
2391 }
2392
__kmp_test_drdpa_lock(kmp_drdpa_lock_t * lck,kmp_int32 gtid)2393 int __kmp_test_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2394 // First get a ticket, then read the polls pointer and the mask.
2395 // The polls pointer must be read before the mask!!! (See above)
2396 kmp_uint64 ticket = lck->lk.next_ticket; // atomic load
2397 std::atomic<kmp_uint64> *polls = lck->lk.polls;
2398 kmp_uint64 mask = lck->lk.mask; // atomic load
2399 if (polls[ticket & mask] == ticket) {
2400 kmp_uint64 next_ticket = ticket + 1;
2401 if (__kmp_atomic_compare_store_acq(&lck->lk.next_ticket, ticket,
2402 next_ticket)) {
2403 KMP_FSYNC_ACQUIRED(lck);
2404 KA_TRACE(1000, ("__kmp_test_drdpa_lock: ticket #%lld acquired lock %p\n",
2405 ticket, lck));
2406 lck->lk.now_serving = ticket; // non-volatile store
2407
2408 // Since no threads are waiting, there is no possibility that we would
2409 // want to reconfigure the polling area. We might have the cleanup ticket
2410 // value (which says that it is now safe to deallocate old_polls), but
2411 // we'll let a later thread which calls __kmp_acquire_lock do that - this
2412 // routine isn't supposed to block, and we would risk blocks if we called
2413 // __kmp_free() to do the deallocation.
2414 return TRUE;
2415 }
2416 }
2417 return FALSE;
2418 }
2419
__kmp_test_drdpa_lock_with_checks(kmp_drdpa_lock_t * lck,kmp_int32 gtid)2420 static int __kmp_test_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2421 kmp_int32 gtid) {
2422 char const *const func = "omp_test_lock";
2423 if (lck->lk.initialized != lck) {
2424 KMP_FATAL(LockIsUninitialized, func);
2425 }
2426 if (__kmp_is_drdpa_lock_nestable(lck)) {
2427 KMP_FATAL(LockNestableUsedAsSimple, func);
2428 }
2429
2430 int retval = __kmp_test_drdpa_lock(lck, gtid);
2431
2432 if (retval) {
2433 lck->lk.owner_id = gtid + 1;
2434 }
2435 return retval;
2436 }
2437
__kmp_release_drdpa_lock(kmp_drdpa_lock_t * lck,kmp_int32 gtid)2438 int __kmp_release_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2439 // Read the ticket value from the lock data struct, then the polls pointer and
2440 // the mask. The polls pointer must be read before the mask!!! (See above)
2441 kmp_uint64 ticket = lck->lk.now_serving + 1; // non-atomic load
2442 std::atomic<kmp_uint64> *polls = lck->lk.polls; // atomic load
2443 kmp_uint64 mask = lck->lk.mask; // atomic load
2444 KA_TRACE(1000, ("__kmp_release_drdpa_lock: ticket #%lld released lock %p\n",
2445 ticket - 1, lck));
2446 KMP_FSYNC_RELEASING(lck);
2447 ANNOTATE_DRDPA_RELEASED(lck);
2448 polls[ticket & mask] = ticket; // atomic store
2449 return KMP_LOCK_RELEASED;
2450 }
2451
__kmp_release_drdpa_lock_with_checks(kmp_drdpa_lock_t * lck,kmp_int32 gtid)2452 static int __kmp_release_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2453 kmp_int32 gtid) {
2454 char const *const func = "omp_unset_lock";
2455 KMP_MB(); /* in case another processor initialized lock */
2456 if (lck->lk.initialized != lck) {
2457 KMP_FATAL(LockIsUninitialized, func);
2458 }
2459 if (__kmp_is_drdpa_lock_nestable(lck)) {
2460 KMP_FATAL(LockNestableUsedAsSimple, func);
2461 }
2462 if (__kmp_get_drdpa_lock_owner(lck) == -1) {
2463 KMP_FATAL(LockUnsettingFree, func);
2464 }
2465 if ((gtid >= 0) && (__kmp_get_drdpa_lock_owner(lck) >= 0) &&
2466 (__kmp_get_drdpa_lock_owner(lck) != gtid)) {
2467 KMP_FATAL(LockUnsettingSetByAnother, func);
2468 }
2469 lck->lk.owner_id = 0;
2470 return __kmp_release_drdpa_lock(lck, gtid);
2471 }
2472
__kmp_init_drdpa_lock(kmp_drdpa_lock_t * lck)2473 void __kmp_init_drdpa_lock(kmp_drdpa_lock_t *lck) {
2474 lck->lk.location = NULL;
2475 lck->lk.mask = 0;
2476 lck->lk.num_polls = 1;
2477 lck->lk.polls = (std::atomic<kmp_uint64> *)__kmp_allocate(
2478 lck->lk.num_polls * sizeof(*(lck->lk.polls)));
2479 lck->lk.cleanup_ticket = 0;
2480 lck->lk.old_polls = NULL;
2481 lck->lk.next_ticket = 0;
2482 lck->lk.now_serving = 0;
2483 lck->lk.owner_id = 0; // no thread owns the lock.
2484 lck->lk.depth_locked = -1; // >= 0 for nestable locks, -1 for simple locks.
2485 lck->lk.initialized = lck;
2486
2487 KA_TRACE(1000, ("__kmp_init_drdpa_lock: lock %p initialized\n", lck));
2488 }
2489
__kmp_destroy_drdpa_lock(kmp_drdpa_lock_t * lck)2490 void __kmp_destroy_drdpa_lock(kmp_drdpa_lock_t *lck) {
2491 lck->lk.initialized = NULL;
2492 lck->lk.location = NULL;
2493 if (lck->lk.polls.load() != NULL) {
2494 __kmp_free(lck->lk.polls.load());
2495 lck->lk.polls = NULL;
2496 }
2497 if (lck->lk.old_polls != NULL) {
2498 __kmp_free(lck->lk.old_polls);
2499 lck->lk.old_polls = NULL;
2500 }
2501 lck->lk.mask = 0;
2502 lck->lk.num_polls = 0;
2503 lck->lk.cleanup_ticket = 0;
2504 lck->lk.next_ticket = 0;
2505 lck->lk.now_serving = 0;
2506 lck->lk.owner_id = 0;
2507 lck->lk.depth_locked = -1;
2508 }
2509
__kmp_destroy_drdpa_lock_with_checks(kmp_drdpa_lock_t * lck)2510 static void __kmp_destroy_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) {
2511 char const *const func = "omp_destroy_lock";
2512 if (lck->lk.initialized != lck) {
2513 KMP_FATAL(LockIsUninitialized, func);
2514 }
2515 if (__kmp_is_drdpa_lock_nestable(lck)) {
2516 KMP_FATAL(LockNestableUsedAsSimple, func);
2517 }
2518 if (__kmp_get_drdpa_lock_owner(lck) != -1) {
2519 KMP_FATAL(LockStillOwned, func);
2520 }
2521 __kmp_destroy_drdpa_lock(lck);
2522 }
2523
2524 // nested drdpa ticket locks
2525
__kmp_acquire_nested_drdpa_lock(kmp_drdpa_lock_t * lck,kmp_int32 gtid)2526 int __kmp_acquire_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2527 KMP_DEBUG_ASSERT(gtid >= 0);
2528
2529 if (__kmp_get_drdpa_lock_owner(lck) == gtid) {
2530 lck->lk.depth_locked += 1;
2531 return KMP_LOCK_ACQUIRED_NEXT;
2532 } else {
2533 __kmp_acquire_drdpa_lock_timed_template(lck, gtid);
2534 ANNOTATE_DRDPA_ACQUIRED(lck);
2535 KMP_MB();
2536 lck->lk.depth_locked = 1;
2537 KMP_MB();
2538 lck->lk.owner_id = gtid + 1;
2539 return KMP_LOCK_ACQUIRED_FIRST;
2540 }
2541 }
2542
__kmp_acquire_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t * lck,kmp_int32 gtid)2543 static void __kmp_acquire_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2544 kmp_int32 gtid) {
2545 char const *const func = "omp_set_nest_lock";
2546 if (lck->lk.initialized != lck) {
2547 KMP_FATAL(LockIsUninitialized, func);
2548 }
2549 if (!__kmp_is_drdpa_lock_nestable(lck)) {
2550 KMP_FATAL(LockSimpleUsedAsNestable, func);
2551 }
2552 __kmp_acquire_nested_drdpa_lock(lck, gtid);
2553 }
2554
__kmp_test_nested_drdpa_lock(kmp_drdpa_lock_t * lck,kmp_int32 gtid)2555 int __kmp_test_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2556 int retval;
2557
2558 KMP_DEBUG_ASSERT(gtid >= 0);
2559
2560 if (__kmp_get_drdpa_lock_owner(lck) == gtid) {
2561 retval = ++lck->lk.depth_locked;
2562 } else if (!__kmp_test_drdpa_lock(lck, gtid)) {
2563 retval = 0;
2564 } else {
2565 KMP_MB();
2566 retval = lck->lk.depth_locked = 1;
2567 KMP_MB();
2568 lck->lk.owner_id = gtid + 1;
2569 }
2570 return retval;
2571 }
2572
__kmp_test_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t * lck,kmp_int32 gtid)2573 static int __kmp_test_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2574 kmp_int32 gtid) {
2575 char const *const func = "omp_test_nest_lock";
2576 if (lck->lk.initialized != lck) {
2577 KMP_FATAL(LockIsUninitialized, func);
2578 }
2579 if (!__kmp_is_drdpa_lock_nestable(lck)) {
2580 KMP_FATAL(LockSimpleUsedAsNestable, func);
2581 }
2582 return __kmp_test_nested_drdpa_lock(lck, gtid);
2583 }
2584
__kmp_release_nested_drdpa_lock(kmp_drdpa_lock_t * lck,kmp_int32 gtid)2585 int __kmp_release_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2586 KMP_DEBUG_ASSERT(gtid >= 0);
2587
2588 KMP_MB();
2589 if (--(lck->lk.depth_locked) == 0) {
2590 KMP_MB();
2591 lck->lk.owner_id = 0;
2592 __kmp_release_drdpa_lock(lck, gtid);
2593 return KMP_LOCK_RELEASED;
2594 }
2595 return KMP_LOCK_STILL_HELD;
2596 }
2597
__kmp_release_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t * lck,kmp_int32 gtid)2598 static int __kmp_release_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2599 kmp_int32 gtid) {
2600 char const *const func = "omp_unset_nest_lock";
2601 KMP_MB(); /* in case another processor initialized lock */
2602 if (lck->lk.initialized != lck) {
2603 KMP_FATAL(LockIsUninitialized, func);
2604 }
2605 if (!__kmp_is_drdpa_lock_nestable(lck)) {
2606 KMP_FATAL(LockSimpleUsedAsNestable, func);
2607 }
2608 if (__kmp_get_drdpa_lock_owner(lck) == -1) {
2609 KMP_FATAL(LockUnsettingFree, func);
2610 }
2611 if (__kmp_get_drdpa_lock_owner(lck) != gtid) {
2612 KMP_FATAL(LockUnsettingSetByAnother, func);
2613 }
2614 return __kmp_release_nested_drdpa_lock(lck, gtid);
2615 }
2616
__kmp_init_nested_drdpa_lock(kmp_drdpa_lock_t * lck)2617 void __kmp_init_nested_drdpa_lock(kmp_drdpa_lock_t *lck) {
2618 __kmp_init_drdpa_lock(lck);
2619 lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks
2620 }
2621
__kmp_destroy_nested_drdpa_lock(kmp_drdpa_lock_t * lck)2622 void __kmp_destroy_nested_drdpa_lock(kmp_drdpa_lock_t *lck) {
2623 __kmp_destroy_drdpa_lock(lck);
2624 lck->lk.depth_locked = 0;
2625 }
2626
__kmp_destroy_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t * lck)2627 static void __kmp_destroy_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) {
2628 char const *const func = "omp_destroy_nest_lock";
2629 if (lck->lk.initialized != lck) {
2630 KMP_FATAL(LockIsUninitialized, func);
2631 }
2632 if (!__kmp_is_drdpa_lock_nestable(lck)) {
2633 KMP_FATAL(LockSimpleUsedAsNestable, func);
2634 }
2635 if (__kmp_get_drdpa_lock_owner(lck) != -1) {
2636 KMP_FATAL(LockStillOwned, func);
2637 }
2638 __kmp_destroy_nested_drdpa_lock(lck);
2639 }
2640
2641 // access functions to fields which don't exist for all lock kinds.
2642
__kmp_get_drdpa_lock_location(kmp_drdpa_lock_t * lck)2643 static const ident_t *__kmp_get_drdpa_lock_location(kmp_drdpa_lock_t *lck) {
2644 return lck->lk.location;
2645 }
2646
__kmp_set_drdpa_lock_location(kmp_drdpa_lock_t * lck,const ident_t * loc)2647 static void __kmp_set_drdpa_lock_location(kmp_drdpa_lock_t *lck,
2648 const ident_t *loc) {
2649 lck->lk.location = loc;
2650 }
2651
__kmp_get_drdpa_lock_flags(kmp_drdpa_lock_t * lck)2652 static kmp_lock_flags_t __kmp_get_drdpa_lock_flags(kmp_drdpa_lock_t *lck) {
2653 return lck->lk.flags;
2654 }
2655
__kmp_set_drdpa_lock_flags(kmp_drdpa_lock_t * lck,kmp_lock_flags_t flags)2656 static void __kmp_set_drdpa_lock_flags(kmp_drdpa_lock_t *lck,
2657 kmp_lock_flags_t flags) {
2658 lck->lk.flags = flags;
2659 }
2660
2661 // Time stamp counter
2662 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
2663 #define __kmp_tsc() __kmp_hardware_timestamp()
2664 // Runtime's default backoff parameters
2665 kmp_backoff_t __kmp_spin_backoff_params = {1, 4096, 100};
2666 #else
2667 // Use nanoseconds for other platforms
2668 extern kmp_uint64 __kmp_now_nsec();
2669 kmp_backoff_t __kmp_spin_backoff_params = {1, 256, 100};
2670 #define __kmp_tsc() __kmp_now_nsec()
2671 #endif
2672
2673 // A useful predicate for dealing with timestamps that may wrap.
2674 // Is a before b? Since the timestamps may wrap, this is asking whether it's
2675 // shorter to go clockwise from a to b around the clock-face, or anti-clockwise.
2676 // Times where going clockwise is less distance than going anti-clockwise
2677 // are in the future, others are in the past. e.g. a = MAX-1, b = MAX+1 (=0),
2678 // then a > b (true) does not mean a reached b; whereas signed(a) = -2,
2679 // signed(b) = 0 captures the actual difference
before(kmp_uint64 a,kmp_uint64 b)2680 static inline bool before(kmp_uint64 a, kmp_uint64 b) {
2681 return ((kmp_int64)b - (kmp_int64)a) > 0;
2682 }
2683
2684 // Truncated binary exponential backoff function
__kmp_spin_backoff(kmp_backoff_t * boff)2685 void __kmp_spin_backoff(kmp_backoff_t *boff) {
2686 // We could flatten this loop, but making it a nested loop gives better result
2687 kmp_uint32 i;
2688 for (i = boff->step; i > 0; i--) {
2689 kmp_uint64 goal = __kmp_tsc() + boff->min_tick;
2690 do {
2691 KMP_CPU_PAUSE();
2692 } while (before(__kmp_tsc(), goal));
2693 }
2694 boff->step = (boff->step << 1 | 1) & (boff->max_backoff - 1);
2695 }
2696
2697 #if KMP_USE_DYNAMIC_LOCK
2698
2699 // Direct lock initializers. It simply writes a tag to the low 8 bits of the
2700 // lock word.
__kmp_init_direct_lock(kmp_dyna_lock_t * lck,kmp_dyna_lockseq_t seq)2701 static void __kmp_init_direct_lock(kmp_dyna_lock_t *lck,
2702 kmp_dyna_lockseq_t seq) {
2703 TCW_4(*lck, KMP_GET_D_TAG(seq));
2704 KA_TRACE(
2705 20,
2706 ("__kmp_init_direct_lock: initialized direct lock with type#%d\n", seq));
2707 }
2708
2709 #if KMP_USE_TSX
2710
2711 // HLE lock functions - imported from the testbed runtime.
2712 #define HLE_ACQUIRE ".byte 0xf2;"
2713 #define HLE_RELEASE ".byte 0xf3;"
2714
swap4(kmp_uint32 volatile * p,kmp_uint32 v)2715 static inline kmp_uint32 swap4(kmp_uint32 volatile *p, kmp_uint32 v) {
2716 __asm__ volatile(HLE_ACQUIRE "xchg %1,%0" : "+r"(v), "+m"(*p) : : "memory");
2717 return v;
2718 }
2719
__kmp_destroy_hle_lock(kmp_dyna_lock_t * lck)2720 static void __kmp_destroy_hle_lock(kmp_dyna_lock_t *lck) { TCW_4(*lck, 0); }
2721
__kmp_destroy_hle_lock_with_checks(kmp_dyna_lock_t * lck)2722 static void __kmp_destroy_hle_lock_with_checks(kmp_dyna_lock_t *lck) {
2723 TCW_4(*lck, 0);
2724 }
2725
__kmp_acquire_hle_lock(kmp_dyna_lock_t * lck,kmp_int32 gtid)2726 static void __kmp_acquire_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid) {
2727 // Use gtid for KMP_LOCK_BUSY if necessary
2728 if (swap4(lck, KMP_LOCK_BUSY(1, hle)) != KMP_LOCK_FREE(hle)) {
2729 int delay = 1;
2730 do {
2731 while (*(kmp_uint32 volatile *)lck != KMP_LOCK_FREE(hle)) {
2732 for (int i = delay; i != 0; --i)
2733 KMP_CPU_PAUSE();
2734 delay = ((delay << 1) | 1) & 7;
2735 }
2736 } while (swap4(lck, KMP_LOCK_BUSY(1, hle)) != KMP_LOCK_FREE(hle));
2737 }
2738 }
2739
__kmp_acquire_hle_lock_with_checks(kmp_dyna_lock_t * lck,kmp_int32 gtid)2740 static void __kmp_acquire_hle_lock_with_checks(kmp_dyna_lock_t *lck,
2741 kmp_int32 gtid) {
2742 __kmp_acquire_hle_lock(lck, gtid); // TODO: add checks
2743 }
2744
__kmp_release_hle_lock(kmp_dyna_lock_t * lck,kmp_int32 gtid)2745 static int __kmp_release_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid) {
2746 __asm__ volatile(HLE_RELEASE "movl %1,%0"
2747 : "=m"(*lck)
2748 : "r"(KMP_LOCK_FREE(hle))
2749 : "memory");
2750 return KMP_LOCK_RELEASED;
2751 }
2752
__kmp_release_hle_lock_with_checks(kmp_dyna_lock_t * lck,kmp_int32 gtid)2753 static int __kmp_release_hle_lock_with_checks(kmp_dyna_lock_t *lck,
2754 kmp_int32 gtid) {
2755 return __kmp_release_hle_lock(lck, gtid); // TODO: add checks
2756 }
2757
__kmp_test_hle_lock(kmp_dyna_lock_t * lck,kmp_int32 gtid)2758 static int __kmp_test_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid) {
2759 return swap4(lck, KMP_LOCK_BUSY(1, hle)) == KMP_LOCK_FREE(hle);
2760 }
2761
__kmp_test_hle_lock_with_checks(kmp_dyna_lock_t * lck,kmp_int32 gtid)2762 static int __kmp_test_hle_lock_with_checks(kmp_dyna_lock_t *lck,
2763 kmp_int32 gtid) {
2764 return __kmp_test_hle_lock(lck, gtid); // TODO: add checks
2765 }
2766
__kmp_init_rtm_lock(kmp_queuing_lock_t * lck)2767 static void __kmp_init_rtm_lock(kmp_queuing_lock_t *lck) {
2768 __kmp_init_queuing_lock(lck);
2769 }
2770
__kmp_destroy_rtm_lock(kmp_queuing_lock_t * lck)2771 static void __kmp_destroy_rtm_lock(kmp_queuing_lock_t *lck) {
2772 __kmp_destroy_queuing_lock(lck);
2773 }
2774
__kmp_destroy_rtm_lock_with_checks(kmp_queuing_lock_t * lck)2775 static void __kmp_destroy_rtm_lock_with_checks(kmp_queuing_lock_t *lck) {
2776 __kmp_destroy_queuing_lock_with_checks(lck);
2777 }
2778
2779 KMP_ATTRIBUTE_TARGET_RTM
__kmp_acquire_rtm_lock(kmp_queuing_lock_t * lck,kmp_int32 gtid)2780 static void __kmp_acquire_rtm_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
2781 unsigned retries = 3, status;
2782 do {
2783 status = _xbegin();
2784 if (status == _XBEGIN_STARTED) {
2785 if (__kmp_is_unlocked_queuing_lock(lck))
2786 return;
2787 _xabort(0xff);
2788 }
2789 if ((status & _XABORT_EXPLICIT) && _XABORT_CODE(status) == 0xff) {
2790 // Wait until lock becomes free
2791 while (!__kmp_is_unlocked_queuing_lock(lck)) {
2792 KMP_YIELD(TRUE);
2793 }
2794 } else if (!(status & _XABORT_RETRY))
2795 break;
2796 } while (retries--);
2797
2798 // Fall-back non-speculative lock (xchg)
2799 __kmp_acquire_queuing_lock(lck, gtid);
2800 }
2801
__kmp_acquire_rtm_lock_with_checks(kmp_queuing_lock_t * lck,kmp_int32 gtid)2802 static void __kmp_acquire_rtm_lock_with_checks(kmp_queuing_lock_t *lck,
2803 kmp_int32 gtid) {
2804 __kmp_acquire_rtm_lock(lck, gtid);
2805 }
2806
2807 KMP_ATTRIBUTE_TARGET_RTM
__kmp_release_rtm_lock(kmp_queuing_lock_t * lck,kmp_int32 gtid)2808 static int __kmp_release_rtm_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
2809 if (__kmp_is_unlocked_queuing_lock(lck)) {
2810 // Releasing from speculation
2811 _xend();
2812 } else {
2813 // Releasing from a real lock
2814 __kmp_release_queuing_lock(lck, gtid);
2815 }
2816 return KMP_LOCK_RELEASED;
2817 }
2818
__kmp_release_rtm_lock_with_checks(kmp_queuing_lock_t * lck,kmp_int32 gtid)2819 static int __kmp_release_rtm_lock_with_checks(kmp_queuing_lock_t *lck,
2820 kmp_int32 gtid) {
2821 return __kmp_release_rtm_lock(lck, gtid);
2822 }
2823
2824 KMP_ATTRIBUTE_TARGET_RTM
__kmp_test_rtm_lock(kmp_queuing_lock_t * lck,kmp_int32 gtid)2825 static int __kmp_test_rtm_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
2826 unsigned retries = 3, status;
2827 do {
2828 status = _xbegin();
2829 if (status == _XBEGIN_STARTED && __kmp_is_unlocked_queuing_lock(lck)) {
2830 return 1;
2831 }
2832 if (!(status & _XABORT_RETRY))
2833 break;
2834 } while (retries--);
2835
2836 return (__kmp_is_unlocked_queuing_lock(lck)) ? 1 : 0;
2837 }
2838
__kmp_test_rtm_lock_with_checks(kmp_queuing_lock_t * lck,kmp_int32 gtid)2839 static int __kmp_test_rtm_lock_with_checks(kmp_queuing_lock_t *lck,
2840 kmp_int32 gtid) {
2841 return __kmp_test_rtm_lock(lck, gtid);
2842 }
2843
2844 #endif // KMP_USE_TSX
2845
2846 // Entry functions for indirect locks (first element of direct lock jump tables)
2847 static void __kmp_init_indirect_lock(kmp_dyna_lock_t *l,
2848 kmp_dyna_lockseq_t tag);
2849 static void __kmp_destroy_indirect_lock(kmp_dyna_lock_t *lock);
2850 static int __kmp_set_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32);
2851 static int __kmp_unset_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32);
2852 static int __kmp_test_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32);
2853 static int __kmp_set_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
2854 kmp_int32);
2855 static int __kmp_unset_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
2856 kmp_int32);
2857 static int __kmp_test_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
2858 kmp_int32);
2859
2860 // Lock function definitions for the union parameter type
2861 #define KMP_FOREACH_LOCK_KIND(m, a) m(ticket, a) m(queuing, a) m(drdpa, a)
2862
2863 #define expand1(lk, op) \
2864 static void __kmp_##op##_##lk##_##lock(kmp_user_lock_p lock) { \
2865 __kmp_##op##_##lk##_##lock(&lock->lk); \
2866 }
2867 #define expand2(lk, op) \
2868 static int __kmp_##op##_##lk##_##lock(kmp_user_lock_p lock, \
2869 kmp_int32 gtid) { \
2870 return __kmp_##op##_##lk##_##lock(&lock->lk, gtid); \
2871 }
2872 #define expand3(lk, op) \
2873 static void __kmp_set_##lk##_##lock_flags(kmp_user_lock_p lock, \
2874 kmp_lock_flags_t flags) { \
2875 __kmp_set_##lk##_lock_flags(&lock->lk, flags); \
2876 }
2877 #define expand4(lk, op) \
2878 static void __kmp_set_##lk##_##lock_location(kmp_user_lock_p lock, \
2879 const ident_t *loc) { \
2880 __kmp_set_##lk##_lock_location(&lock->lk, loc); \
2881 }
2882
2883 KMP_FOREACH_LOCK_KIND(expand1, init)
2884 KMP_FOREACH_LOCK_KIND(expand1, init_nested)
2885 KMP_FOREACH_LOCK_KIND(expand1, destroy)
2886 KMP_FOREACH_LOCK_KIND(expand1, destroy_nested)
2887 KMP_FOREACH_LOCK_KIND(expand2, acquire)
2888 KMP_FOREACH_LOCK_KIND(expand2, acquire_nested)
2889 KMP_FOREACH_LOCK_KIND(expand2, release)
2890 KMP_FOREACH_LOCK_KIND(expand2, release_nested)
2891 KMP_FOREACH_LOCK_KIND(expand2, test)
2892 KMP_FOREACH_LOCK_KIND(expand2, test_nested)
2893 KMP_FOREACH_LOCK_KIND(expand3, )
2894 KMP_FOREACH_LOCK_KIND(expand4, )
2895
2896 #undef expand1
2897 #undef expand2
2898 #undef expand3
2899 #undef expand4
2900
2901 // Jump tables for the indirect lock functions
2902 // Only fill in the odd entries, that avoids the need to shift out the low bit
2903
2904 // init functions
2905 #define expand(l, op) 0, __kmp_init_direct_lock,
2906 void (*__kmp_direct_init[])(kmp_dyna_lock_t *, kmp_dyna_lockseq_t) = {
2907 __kmp_init_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, init)};
2908 #undef expand
2909
2910 // destroy functions
2911 #define expand(l, op) 0, (void (*)(kmp_dyna_lock_t *))__kmp_##op##_##l##_lock,
2912 static void (*direct_destroy[])(kmp_dyna_lock_t *) = {
2913 __kmp_destroy_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, destroy)};
2914 #undef expand
2915 #define expand(l, op) \
2916 0, (void (*)(kmp_dyna_lock_t *))__kmp_destroy_##l##_lock_with_checks,
2917 static void (*direct_destroy_check[])(kmp_dyna_lock_t *) = {
2918 __kmp_destroy_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, destroy)};
2919 #undef expand
2920
2921 // set/acquire functions
2922 #define expand(l, op) \
2923 0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock,
2924 static int (*direct_set[])(kmp_dyna_lock_t *, kmp_int32) = {
2925 __kmp_set_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, acquire)};
2926 #undef expand
2927 #define expand(l, op) \
2928 0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock_with_checks,
2929 static int (*direct_set_check[])(kmp_dyna_lock_t *, kmp_int32) = {
2930 __kmp_set_indirect_lock_with_checks, 0,
2931 KMP_FOREACH_D_LOCK(expand, acquire)};
2932 #undef expand
2933
2934 // unset/release and test functions
2935 #define expand(l, op) \
2936 0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock,
2937 static int (*direct_unset[])(kmp_dyna_lock_t *, kmp_int32) = {
2938 __kmp_unset_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, release)};
2939 static int (*direct_test[])(kmp_dyna_lock_t *, kmp_int32) = {
2940 __kmp_test_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, test)};
2941 #undef expand
2942 #define expand(l, op) \
2943 0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock_with_checks,
2944 static int (*direct_unset_check[])(kmp_dyna_lock_t *, kmp_int32) = {
2945 __kmp_unset_indirect_lock_with_checks, 0,
2946 KMP_FOREACH_D_LOCK(expand, release)};
2947 static int (*direct_test_check[])(kmp_dyna_lock_t *, kmp_int32) = {
2948 __kmp_test_indirect_lock_with_checks, 0, KMP_FOREACH_D_LOCK(expand, test)};
2949 #undef expand
2950
2951 // Exposes only one set of jump tables (*lock or *lock_with_checks).
2952 void (**__kmp_direct_destroy)(kmp_dyna_lock_t *) = 0;
2953 int (**__kmp_direct_set)(kmp_dyna_lock_t *, kmp_int32) = 0;
2954 int (**__kmp_direct_unset)(kmp_dyna_lock_t *, kmp_int32) = 0;
2955 int (**__kmp_direct_test)(kmp_dyna_lock_t *, kmp_int32) = 0;
2956
2957 // Jump tables for the indirect lock functions
2958 #define expand(l, op) (void (*)(kmp_user_lock_p)) __kmp_##op##_##l##_##lock,
2959 void (*__kmp_indirect_init[])(kmp_user_lock_p) = {
2960 KMP_FOREACH_I_LOCK(expand, init)};
2961 #undef expand
2962
2963 #define expand(l, op) (void (*)(kmp_user_lock_p)) __kmp_##op##_##l##_##lock,
2964 static void (*indirect_destroy[])(kmp_user_lock_p) = {
2965 KMP_FOREACH_I_LOCK(expand, destroy)};
2966 #undef expand
2967 #define expand(l, op) \
2968 (void (*)(kmp_user_lock_p)) __kmp_##op##_##l##_##lock_with_checks,
2969 static void (*indirect_destroy_check[])(kmp_user_lock_p) = {
2970 KMP_FOREACH_I_LOCK(expand, destroy)};
2971 #undef expand
2972
2973 // set/acquire functions
2974 #define expand(l, op) \
2975 (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock,
2976 static int (*indirect_set[])(kmp_user_lock_p,
2977 kmp_int32) = {KMP_FOREACH_I_LOCK(expand, acquire)};
2978 #undef expand
2979 #define expand(l, op) \
2980 (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock_with_checks,
2981 static int (*indirect_set_check[])(kmp_user_lock_p, kmp_int32) = {
2982 KMP_FOREACH_I_LOCK(expand, acquire)};
2983 #undef expand
2984
2985 // unset/release and test functions
2986 #define expand(l, op) \
2987 (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock,
2988 static int (*indirect_unset[])(kmp_user_lock_p, kmp_int32) = {
2989 KMP_FOREACH_I_LOCK(expand, release)};
2990 static int (*indirect_test[])(kmp_user_lock_p,
2991 kmp_int32) = {KMP_FOREACH_I_LOCK(expand, test)};
2992 #undef expand
2993 #define expand(l, op) \
2994 (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock_with_checks,
2995 static int (*indirect_unset_check[])(kmp_user_lock_p, kmp_int32) = {
2996 KMP_FOREACH_I_LOCK(expand, release)};
2997 static int (*indirect_test_check[])(kmp_user_lock_p, kmp_int32) = {
2998 KMP_FOREACH_I_LOCK(expand, test)};
2999 #undef expand
3000
3001 // Exposes only one jump tables (*lock or *lock_with_checks).
3002 void (**__kmp_indirect_destroy)(kmp_user_lock_p) = 0;
3003 int (**__kmp_indirect_set)(kmp_user_lock_p, kmp_int32) = 0;
3004 int (**__kmp_indirect_unset)(kmp_user_lock_p, kmp_int32) = 0;
3005 int (**__kmp_indirect_test)(kmp_user_lock_p, kmp_int32) = 0;
3006
3007 // Lock index table.
3008 kmp_indirect_lock_table_t __kmp_i_lock_table;
3009
3010 // Size of indirect locks.
3011 static kmp_uint32 __kmp_indirect_lock_size[KMP_NUM_I_LOCKS] = {0};
3012
3013 // Jump tables for lock accessor/modifier.
3014 void (*__kmp_indirect_set_location[KMP_NUM_I_LOCKS])(kmp_user_lock_p,
3015 const ident_t *) = {0};
3016 void (*__kmp_indirect_set_flags[KMP_NUM_I_LOCKS])(kmp_user_lock_p,
3017 kmp_lock_flags_t) = {0};
3018 const ident_t *(*__kmp_indirect_get_location[KMP_NUM_I_LOCKS])(
3019 kmp_user_lock_p) = {0};
3020 kmp_lock_flags_t (*__kmp_indirect_get_flags[KMP_NUM_I_LOCKS])(
3021 kmp_user_lock_p) = {0};
3022
3023 // Use different lock pools for different lock types.
3024 static kmp_indirect_lock_t *__kmp_indirect_lock_pool[KMP_NUM_I_LOCKS] = {0};
3025
3026 // User lock allocator for dynamically dispatched indirect locks. Every entry of
3027 // the indirect lock table holds the address and type of the allocated indirect
3028 // lock (kmp_indirect_lock_t), and the size of the table doubles when it is
3029 // full. A destroyed indirect lock object is returned to the reusable pool of
3030 // locks, unique to each lock type.
__kmp_allocate_indirect_lock(void ** user_lock,kmp_int32 gtid,kmp_indirect_locktag_t tag)3031 kmp_indirect_lock_t *__kmp_allocate_indirect_lock(void **user_lock,
3032 kmp_int32 gtid,
3033 kmp_indirect_locktag_t tag) {
3034 kmp_indirect_lock_t *lck;
3035 kmp_lock_index_t idx;
3036
3037 __kmp_acquire_lock(&__kmp_global_lock, gtid);
3038
3039 if (__kmp_indirect_lock_pool[tag] != NULL) {
3040 // Reuse the allocated and destroyed lock object
3041 lck = __kmp_indirect_lock_pool[tag];
3042 if (OMP_LOCK_T_SIZE < sizeof(void *))
3043 idx = lck->lock->pool.index;
3044 __kmp_indirect_lock_pool[tag] = (kmp_indirect_lock_t *)lck->lock->pool.next;
3045 KA_TRACE(20, ("__kmp_allocate_indirect_lock: reusing an existing lock %p\n",
3046 lck));
3047 } else {
3048 idx = __kmp_i_lock_table.next;
3049 // Check capacity and double the size if it is full
3050 if (idx == __kmp_i_lock_table.size) {
3051 // Double up the space for block pointers
3052 int row = __kmp_i_lock_table.size / KMP_I_LOCK_CHUNK;
3053 kmp_indirect_lock_t **new_table = (kmp_indirect_lock_t **)__kmp_allocate(
3054 2 * row * sizeof(kmp_indirect_lock_t *));
3055 KMP_MEMCPY(new_table, __kmp_i_lock_table.table,
3056 row * sizeof(kmp_indirect_lock_t *));
3057 kmp_indirect_lock_t **old_table = __kmp_i_lock_table.table;
3058 __kmp_i_lock_table.table = new_table;
3059 __kmp_free(old_table);
3060 // Allocate new objects in the new blocks
3061 for (int i = row; i < 2 * row; ++i)
3062 *(__kmp_i_lock_table.table + i) = (kmp_indirect_lock_t *)__kmp_allocate(
3063 KMP_I_LOCK_CHUNK * sizeof(kmp_indirect_lock_t));
3064 __kmp_i_lock_table.size = 2 * idx;
3065 }
3066 __kmp_i_lock_table.next++;
3067 lck = KMP_GET_I_LOCK(idx);
3068 // Allocate a new base lock object
3069 lck->lock = (kmp_user_lock_p)__kmp_allocate(__kmp_indirect_lock_size[tag]);
3070 KA_TRACE(20,
3071 ("__kmp_allocate_indirect_lock: allocated a new lock %p\n", lck));
3072 }
3073
3074 __kmp_release_lock(&__kmp_global_lock, gtid);
3075
3076 lck->type = tag;
3077
3078 if (OMP_LOCK_T_SIZE < sizeof(void *)) {
3079 *((kmp_lock_index_t *)user_lock) = idx
3080 << 1; // indirect lock word must be even
3081 } else {
3082 *((kmp_indirect_lock_t **)user_lock) = lck;
3083 }
3084
3085 return lck;
3086 }
3087
3088 // User lock lookup for dynamically dispatched locks.
3089 static __forceinline kmp_indirect_lock_t *
__kmp_lookup_indirect_lock(void ** user_lock,const char * func)3090 __kmp_lookup_indirect_lock(void **user_lock, const char *func) {
3091 if (__kmp_env_consistency_check) {
3092 kmp_indirect_lock_t *lck = NULL;
3093 if (user_lock == NULL) {
3094 KMP_FATAL(LockIsUninitialized, func);
3095 }
3096 if (OMP_LOCK_T_SIZE < sizeof(void *)) {
3097 kmp_lock_index_t idx = KMP_EXTRACT_I_INDEX(user_lock);
3098 if (idx >= __kmp_i_lock_table.size) {
3099 KMP_FATAL(LockIsUninitialized, func);
3100 }
3101 lck = KMP_GET_I_LOCK(idx);
3102 } else {
3103 lck = *((kmp_indirect_lock_t **)user_lock);
3104 }
3105 if (lck == NULL) {
3106 KMP_FATAL(LockIsUninitialized, func);
3107 }
3108 return lck;
3109 } else {
3110 if (OMP_LOCK_T_SIZE < sizeof(void *)) {
3111 return KMP_GET_I_LOCK(KMP_EXTRACT_I_INDEX(user_lock));
3112 } else {
3113 return *((kmp_indirect_lock_t **)user_lock);
3114 }
3115 }
3116 }
3117
__kmp_init_indirect_lock(kmp_dyna_lock_t * lock,kmp_dyna_lockseq_t seq)3118 static void __kmp_init_indirect_lock(kmp_dyna_lock_t *lock,
3119 kmp_dyna_lockseq_t seq) {
3120 #if KMP_USE_ADAPTIVE_LOCKS
3121 if (seq == lockseq_adaptive && !__kmp_cpuinfo.rtm) {
3122 KMP_WARNING(AdaptiveNotSupported, "kmp_lockseq_t", "adaptive");
3123 seq = lockseq_queuing;
3124 }
3125 #endif
3126 #if KMP_USE_TSX
3127 if (seq == lockseq_rtm && !__kmp_cpuinfo.rtm) {
3128 seq = lockseq_queuing;
3129 }
3130 #endif
3131 kmp_indirect_locktag_t tag = KMP_GET_I_TAG(seq);
3132 kmp_indirect_lock_t *l =
3133 __kmp_allocate_indirect_lock((void **)lock, __kmp_entry_gtid(), tag);
3134 KMP_I_LOCK_FUNC(l, init)(l->lock);
3135 KA_TRACE(
3136 20, ("__kmp_init_indirect_lock: initialized indirect lock with type#%d\n",
3137 seq));
3138 }
3139
__kmp_destroy_indirect_lock(kmp_dyna_lock_t * lock)3140 static void __kmp_destroy_indirect_lock(kmp_dyna_lock_t *lock) {
3141 kmp_uint32 gtid = __kmp_entry_gtid();
3142 kmp_indirect_lock_t *l =
3143 __kmp_lookup_indirect_lock((void **)lock, "omp_destroy_lock");
3144 KMP_I_LOCK_FUNC(l, destroy)(l->lock);
3145 kmp_indirect_locktag_t tag = l->type;
3146
3147 __kmp_acquire_lock(&__kmp_global_lock, gtid);
3148
3149 // Use the base lock's space to keep the pool chain.
3150 l->lock->pool.next = (kmp_user_lock_p)__kmp_indirect_lock_pool[tag];
3151 if (OMP_LOCK_T_SIZE < sizeof(void *)) {
3152 l->lock->pool.index = KMP_EXTRACT_I_INDEX(lock);
3153 }
3154 __kmp_indirect_lock_pool[tag] = l;
3155
3156 __kmp_release_lock(&__kmp_global_lock, gtid);
3157 }
3158
__kmp_set_indirect_lock(kmp_dyna_lock_t * lock,kmp_int32 gtid)3159 static int __kmp_set_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32 gtid) {
3160 kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(lock);
3161 return KMP_I_LOCK_FUNC(l, set)(l->lock, gtid);
3162 }
3163
__kmp_unset_indirect_lock(kmp_dyna_lock_t * lock,kmp_int32 gtid)3164 static int __kmp_unset_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32 gtid) {
3165 kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(lock);
3166 return KMP_I_LOCK_FUNC(l, unset)(l->lock, gtid);
3167 }
3168
__kmp_test_indirect_lock(kmp_dyna_lock_t * lock,kmp_int32 gtid)3169 static int __kmp_test_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32 gtid) {
3170 kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(lock);
3171 return KMP_I_LOCK_FUNC(l, test)(l->lock, gtid);
3172 }
3173
__kmp_set_indirect_lock_with_checks(kmp_dyna_lock_t * lock,kmp_int32 gtid)3174 static int __kmp_set_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
3175 kmp_int32 gtid) {
3176 kmp_indirect_lock_t *l =
3177 __kmp_lookup_indirect_lock((void **)lock, "omp_set_lock");
3178 return KMP_I_LOCK_FUNC(l, set)(l->lock, gtid);
3179 }
3180
__kmp_unset_indirect_lock_with_checks(kmp_dyna_lock_t * lock,kmp_int32 gtid)3181 static int __kmp_unset_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
3182 kmp_int32 gtid) {
3183 kmp_indirect_lock_t *l =
3184 __kmp_lookup_indirect_lock((void **)lock, "omp_unset_lock");
3185 return KMP_I_LOCK_FUNC(l, unset)(l->lock, gtid);
3186 }
3187
__kmp_test_indirect_lock_with_checks(kmp_dyna_lock_t * lock,kmp_int32 gtid)3188 static int __kmp_test_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
3189 kmp_int32 gtid) {
3190 kmp_indirect_lock_t *l =
3191 __kmp_lookup_indirect_lock((void **)lock, "omp_test_lock");
3192 return KMP_I_LOCK_FUNC(l, test)(l->lock, gtid);
3193 }
3194
3195 kmp_dyna_lockseq_t __kmp_user_lock_seq = lockseq_queuing;
3196
3197 // This is used only in kmp_error.cpp when consistency checking is on.
__kmp_get_user_lock_owner(kmp_user_lock_p lck,kmp_uint32 seq)3198 kmp_int32 __kmp_get_user_lock_owner(kmp_user_lock_p lck, kmp_uint32 seq) {
3199 switch (seq) {
3200 case lockseq_tas:
3201 case lockseq_nested_tas:
3202 return __kmp_get_tas_lock_owner((kmp_tas_lock_t *)lck);
3203 #if KMP_USE_FUTEX
3204 case lockseq_futex:
3205 case lockseq_nested_futex:
3206 return __kmp_get_futex_lock_owner((kmp_futex_lock_t *)lck);
3207 #endif
3208 case lockseq_ticket:
3209 case lockseq_nested_ticket:
3210 return __kmp_get_ticket_lock_owner((kmp_ticket_lock_t *)lck);
3211 case lockseq_queuing:
3212 case lockseq_nested_queuing:
3213 #if KMP_USE_ADAPTIVE_LOCKS
3214 case lockseq_adaptive:
3215 #endif
3216 return __kmp_get_queuing_lock_owner((kmp_queuing_lock_t *)lck);
3217 case lockseq_drdpa:
3218 case lockseq_nested_drdpa:
3219 return __kmp_get_drdpa_lock_owner((kmp_drdpa_lock_t *)lck);
3220 default:
3221 return 0;
3222 }
3223 }
3224
3225 // Initializes data for dynamic user locks.
__kmp_init_dynamic_user_locks()3226 void __kmp_init_dynamic_user_locks() {
3227 // Initialize jump table for the lock functions
3228 if (__kmp_env_consistency_check) {
3229 __kmp_direct_set = direct_set_check;
3230 __kmp_direct_unset = direct_unset_check;
3231 __kmp_direct_test = direct_test_check;
3232 __kmp_direct_destroy = direct_destroy_check;
3233 __kmp_indirect_set = indirect_set_check;
3234 __kmp_indirect_unset = indirect_unset_check;
3235 __kmp_indirect_test = indirect_test_check;
3236 __kmp_indirect_destroy = indirect_destroy_check;
3237 } else {
3238 __kmp_direct_set = direct_set;
3239 __kmp_direct_unset = direct_unset;
3240 __kmp_direct_test = direct_test;
3241 __kmp_direct_destroy = direct_destroy;
3242 __kmp_indirect_set = indirect_set;
3243 __kmp_indirect_unset = indirect_unset;
3244 __kmp_indirect_test = indirect_test;
3245 __kmp_indirect_destroy = indirect_destroy;
3246 }
3247 // If the user locks have already been initialized, then return. Allow the
3248 // switch between different KMP_CONSISTENCY_CHECK values, but do not allocate
3249 // new lock tables if they have already been allocated.
3250 if (__kmp_init_user_locks)
3251 return;
3252
3253 // Initialize lock index table
3254 __kmp_i_lock_table.size = KMP_I_LOCK_CHUNK;
3255 __kmp_i_lock_table.table =
3256 (kmp_indirect_lock_t **)__kmp_allocate(sizeof(kmp_indirect_lock_t *));
3257 *(__kmp_i_lock_table.table) = (kmp_indirect_lock_t *)__kmp_allocate(
3258 KMP_I_LOCK_CHUNK * sizeof(kmp_indirect_lock_t));
3259 __kmp_i_lock_table.next = 0;
3260
3261 // Indirect lock size
3262 __kmp_indirect_lock_size[locktag_ticket] = sizeof(kmp_ticket_lock_t);
3263 __kmp_indirect_lock_size[locktag_queuing] = sizeof(kmp_queuing_lock_t);
3264 #if KMP_USE_ADAPTIVE_LOCKS
3265 __kmp_indirect_lock_size[locktag_adaptive] = sizeof(kmp_adaptive_lock_t);
3266 #endif
3267 __kmp_indirect_lock_size[locktag_drdpa] = sizeof(kmp_drdpa_lock_t);
3268 #if KMP_USE_TSX
3269 __kmp_indirect_lock_size[locktag_rtm] = sizeof(kmp_queuing_lock_t);
3270 #endif
3271 __kmp_indirect_lock_size[locktag_nested_tas] = sizeof(kmp_tas_lock_t);
3272 #if KMP_USE_FUTEX
3273 __kmp_indirect_lock_size[locktag_nested_futex] = sizeof(kmp_futex_lock_t);
3274 #endif
3275 __kmp_indirect_lock_size[locktag_nested_ticket] = sizeof(kmp_ticket_lock_t);
3276 __kmp_indirect_lock_size[locktag_nested_queuing] = sizeof(kmp_queuing_lock_t);
3277 __kmp_indirect_lock_size[locktag_nested_drdpa] = sizeof(kmp_drdpa_lock_t);
3278
3279 // Initialize lock accessor/modifier
3280 #define fill_jumps(table, expand, sep) \
3281 { \
3282 table[locktag##sep##ticket] = expand(ticket); \
3283 table[locktag##sep##queuing] = expand(queuing); \
3284 table[locktag##sep##drdpa] = expand(drdpa); \
3285 }
3286
3287 #if KMP_USE_ADAPTIVE_LOCKS
3288 #define fill_table(table, expand) \
3289 { \
3290 fill_jumps(table, expand, _); \
3291 table[locktag_adaptive] = expand(queuing); \
3292 fill_jumps(table, expand, _nested_); \
3293 }
3294 #else
3295 #define fill_table(table, expand) \
3296 { \
3297 fill_jumps(table, expand, _); \
3298 fill_jumps(table, expand, _nested_); \
3299 }
3300 #endif // KMP_USE_ADAPTIVE_LOCKS
3301
3302 #define expand(l) \
3303 (void (*)(kmp_user_lock_p, const ident_t *)) __kmp_set_##l##_lock_location
3304 fill_table(__kmp_indirect_set_location, expand);
3305 #undef expand
3306 #define expand(l) \
3307 (void (*)(kmp_user_lock_p, kmp_lock_flags_t)) __kmp_set_##l##_lock_flags
3308 fill_table(__kmp_indirect_set_flags, expand);
3309 #undef expand
3310 #define expand(l) \
3311 (const ident_t *(*)(kmp_user_lock_p)) __kmp_get_##l##_lock_location
3312 fill_table(__kmp_indirect_get_location, expand);
3313 #undef expand
3314 #define expand(l) \
3315 (kmp_lock_flags_t(*)(kmp_user_lock_p)) __kmp_get_##l##_lock_flags
3316 fill_table(__kmp_indirect_get_flags, expand);
3317 #undef expand
3318
3319 __kmp_init_user_locks = TRUE;
3320 }
3321
3322 // Clean up the lock table.
__kmp_cleanup_indirect_user_locks()3323 void __kmp_cleanup_indirect_user_locks() {
3324 kmp_lock_index_t i;
3325 int k;
3326
3327 // Clean up locks in the pools first (they were already destroyed before going
3328 // into the pools).
3329 for (k = 0; k < KMP_NUM_I_LOCKS; ++k) {
3330 kmp_indirect_lock_t *l = __kmp_indirect_lock_pool[k];
3331 while (l != NULL) {
3332 kmp_indirect_lock_t *ll = l;
3333 l = (kmp_indirect_lock_t *)l->lock->pool.next;
3334 KA_TRACE(20, ("__kmp_cleanup_indirect_user_locks: freeing %p from pool\n",
3335 ll));
3336 __kmp_free(ll->lock);
3337 ll->lock = NULL;
3338 }
3339 __kmp_indirect_lock_pool[k] = NULL;
3340 }
3341 // Clean up the remaining undestroyed locks.
3342 for (i = 0; i < __kmp_i_lock_table.next; i++) {
3343 kmp_indirect_lock_t *l = KMP_GET_I_LOCK(i);
3344 if (l->lock != NULL) {
3345 // Locks not destroyed explicitly need to be destroyed here.
3346 KMP_I_LOCK_FUNC(l, destroy)(l->lock);
3347 KA_TRACE(
3348 20,
3349 ("__kmp_cleanup_indirect_user_locks: destroy/freeing %p from table\n",
3350 l));
3351 __kmp_free(l->lock);
3352 }
3353 }
3354 // Free the table
3355 for (i = 0; i < __kmp_i_lock_table.size / KMP_I_LOCK_CHUNK; i++)
3356 __kmp_free(__kmp_i_lock_table.table[i]);
3357 __kmp_free(__kmp_i_lock_table.table);
3358
3359 __kmp_init_user_locks = FALSE;
3360 }
3361
3362 enum kmp_lock_kind __kmp_user_lock_kind = lk_default;
3363 int __kmp_num_locks_in_block = 1; // FIXME - tune this value
3364
3365 #else // KMP_USE_DYNAMIC_LOCK
3366
__kmp_init_tas_lock_with_checks(kmp_tas_lock_t * lck)3367 static void __kmp_init_tas_lock_with_checks(kmp_tas_lock_t *lck) {
3368 __kmp_init_tas_lock(lck);
3369 }
3370
__kmp_init_nested_tas_lock_with_checks(kmp_tas_lock_t * lck)3371 static void __kmp_init_nested_tas_lock_with_checks(kmp_tas_lock_t *lck) {
3372 __kmp_init_nested_tas_lock(lck);
3373 }
3374
3375 #if KMP_USE_FUTEX
__kmp_init_futex_lock_with_checks(kmp_futex_lock_t * lck)3376 static void __kmp_init_futex_lock_with_checks(kmp_futex_lock_t *lck) {
3377 __kmp_init_futex_lock(lck);
3378 }
3379
__kmp_init_nested_futex_lock_with_checks(kmp_futex_lock_t * lck)3380 static void __kmp_init_nested_futex_lock_with_checks(kmp_futex_lock_t *lck) {
3381 __kmp_init_nested_futex_lock(lck);
3382 }
3383 #endif
3384
__kmp_is_ticket_lock_initialized(kmp_ticket_lock_t * lck)3385 static int __kmp_is_ticket_lock_initialized(kmp_ticket_lock_t *lck) {
3386 return lck == lck->lk.self;
3387 }
3388
__kmp_init_ticket_lock_with_checks(kmp_ticket_lock_t * lck)3389 static void __kmp_init_ticket_lock_with_checks(kmp_ticket_lock_t *lck) {
3390 __kmp_init_ticket_lock(lck);
3391 }
3392
__kmp_init_nested_ticket_lock_with_checks(kmp_ticket_lock_t * lck)3393 static void __kmp_init_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck) {
3394 __kmp_init_nested_ticket_lock(lck);
3395 }
3396
__kmp_is_queuing_lock_initialized(kmp_queuing_lock_t * lck)3397 static int __kmp_is_queuing_lock_initialized(kmp_queuing_lock_t *lck) {
3398 return lck == lck->lk.initialized;
3399 }
3400
__kmp_init_queuing_lock_with_checks(kmp_queuing_lock_t * lck)3401 static void __kmp_init_queuing_lock_with_checks(kmp_queuing_lock_t *lck) {
3402 __kmp_init_queuing_lock(lck);
3403 }
3404
3405 static void
__kmp_init_nested_queuing_lock_with_checks(kmp_queuing_lock_t * lck)3406 __kmp_init_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck) {
3407 __kmp_init_nested_queuing_lock(lck);
3408 }
3409
3410 #if KMP_USE_ADAPTIVE_LOCKS
__kmp_init_adaptive_lock_with_checks(kmp_adaptive_lock_t * lck)3411 static void __kmp_init_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck) {
3412 __kmp_init_adaptive_lock(lck);
3413 }
3414 #endif
3415
__kmp_is_drdpa_lock_initialized(kmp_drdpa_lock_t * lck)3416 static int __kmp_is_drdpa_lock_initialized(kmp_drdpa_lock_t *lck) {
3417 return lck == lck->lk.initialized;
3418 }
3419
__kmp_init_drdpa_lock_with_checks(kmp_drdpa_lock_t * lck)3420 static void __kmp_init_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) {
3421 __kmp_init_drdpa_lock(lck);
3422 }
3423
__kmp_init_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t * lck)3424 static void __kmp_init_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) {
3425 __kmp_init_nested_drdpa_lock(lck);
3426 }
3427
3428 /* user locks
3429 * They are implemented as a table of function pointers which are set to the
3430 * lock functions of the appropriate kind, once that has been determined. */
3431
3432 enum kmp_lock_kind __kmp_user_lock_kind = lk_default;
3433
3434 size_t __kmp_base_user_lock_size = 0;
3435 size_t __kmp_user_lock_size = 0;
3436
3437 kmp_int32 (*__kmp_get_user_lock_owner_)(kmp_user_lock_p lck) = NULL;
3438 int (*__kmp_acquire_user_lock_with_checks_)(kmp_user_lock_p lck,
3439 kmp_int32 gtid) = NULL;
3440
3441 int (*__kmp_test_user_lock_with_checks_)(kmp_user_lock_p lck,
3442 kmp_int32 gtid) = NULL;
3443 int (*__kmp_release_user_lock_with_checks_)(kmp_user_lock_p lck,
3444 kmp_int32 gtid) = NULL;
3445 void (*__kmp_init_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL;
3446 void (*__kmp_destroy_user_lock_)(kmp_user_lock_p lck) = NULL;
3447 void (*__kmp_destroy_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL;
3448 int (*__kmp_acquire_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
3449 kmp_int32 gtid) = NULL;
3450
3451 int (*__kmp_test_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
3452 kmp_int32 gtid) = NULL;
3453 int (*__kmp_release_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
3454 kmp_int32 gtid) = NULL;
3455 void (*__kmp_init_nested_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL;
3456 void (*__kmp_destroy_nested_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL;
3457
3458 int (*__kmp_is_user_lock_initialized_)(kmp_user_lock_p lck) = NULL;
3459 const ident_t *(*__kmp_get_user_lock_location_)(kmp_user_lock_p lck) = NULL;
3460 void (*__kmp_set_user_lock_location_)(kmp_user_lock_p lck,
3461 const ident_t *loc) = NULL;
3462 kmp_lock_flags_t (*__kmp_get_user_lock_flags_)(kmp_user_lock_p lck) = NULL;
3463 void (*__kmp_set_user_lock_flags_)(kmp_user_lock_p lck,
3464 kmp_lock_flags_t flags) = NULL;
3465
__kmp_set_user_lock_vptrs(kmp_lock_kind_t user_lock_kind)3466 void __kmp_set_user_lock_vptrs(kmp_lock_kind_t user_lock_kind) {
3467 switch (user_lock_kind) {
3468 case lk_default:
3469 default:
3470 KMP_ASSERT(0);
3471
3472 case lk_tas: {
3473 __kmp_base_user_lock_size = sizeof(kmp_base_tas_lock_t);
3474 __kmp_user_lock_size = sizeof(kmp_tas_lock_t);
3475
3476 __kmp_get_user_lock_owner_ =
3477 (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_tas_lock_owner);
3478
3479 if (__kmp_env_consistency_check) {
3480 KMP_BIND_USER_LOCK_WITH_CHECKS(tas);
3481 KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(tas);
3482 } else {
3483 KMP_BIND_USER_LOCK(tas);
3484 KMP_BIND_NESTED_USER_LOCK(tas);
3485 }
3486
3487 __kmp_destroy_user_lock_ =
3488 (void (*)(kmp_user_lock_p))(&__kmp_destroy_tas_lock);
3489
3490 __kmp_is_user_lock_initialized_ = (int (*)(kmp_user_lock_p))NULL;
3491
3492 __kmp_get_user_lock_location_ = (const ident_t *(*)(kmp_user_lock_p))NULL;
3493
3494 __kmp_set_user_lock_location_ =
3495 (void (*)(kmp_user_lock_p, const ident_t *))NULL;
3496
3497 __kmp_get_user_lock_flags_ = (kmp_lock_flags_t(*)(kmp_user_lock_p))NULL;
3498
3499 __kmp_set_user_lock_flags_ =
3500 (void (*)(kmp_user_lock_p, kmp_lock_flags_t))NULL;
3501 } break;
3502
3503 #if KMP_USE_FUTEX
3504
3505 case lk_futex: {
3506 __kmp_base_user_lock_size = sizeof(kmp_base_futex_lock_t);
3507 __kmp_user_lock_size = sizeof(kmp_futex_lock_t);
3508
3509 __kmp_get_user_lock_owner_ =
3510 (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_futex_lock_owner);
3511
3512 if (__kmp_env_consistency_check) {
3513 KMP_BIND_USER_LOCK_WITH_CHECKS(futex);
3514 KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(futex);
3515 } else {
3516 KMP_BIND_USER_LOCK(futex);
3517 KMP_BIND_NESTED_USER_LOCK(futex);
3518 }
3519
3520 __kmp_destroy_user_lock_ =
3521 (void (*)(kmp_user_lock_p))(&__kmp_destroy_futex_lock);
3522
3523 __kmp_is_user_lock_initialized_ = (int (*)(kmp_user_lock_p))NULL;
3524
3525 __kmp_get_user_lock_location_ = (const ident_t *(*)(kmp_user_lock_p))NULL;
3526
3527 __kmp_set_user_lock_location_ =
3528 (void (*)(kmp_user_lock_p, const ident_t *))NULL;
3529
3530 __kmp_get_user_lock_flags_ = (kmp_lock_flags_t(*)(kmp_user_lock_p))NULL;
3531
3532 __kmp_set_user_lock_flags_ =
3533 (void (*)(kmp_user_lock_p, kmp_lock_flags_t))NULL;
3534 } break;
3535
3536 #endif // KMP_USE_FUTEX
3537
3538 case lk_ticket: {
3539 __kmp_base_user_lock_size = sizeof(kmp_base_ticket_lock_t);
3540 __kmp_user_lock_size = sizeof(kmp_ticket_lock_t);
3541
3542 __kmp_get_user_lock_owner_ =
3543 (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_ticket_lock_owner);
3544
3545 if (__kmp_env_consistency_check) {
3546 KMP_BIND_USER_LOCK_WITH_CHECKS(ticket);
3547 KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(ticket);
3548 } else {
3549 KMP_BIND_USER_LOCK(ticket);
3550 KMP_BIND_NESTED_USER_LOCK(ticket);
3551 }
3552
3553 __kmp_destroy_user_lock_ =
3554 (void (*)(kmp_user_lock_p))(&__kmp_destroy_ticket_lock);
3555
3556 __kmp_is_user_lock_initialized_ =
3557 (int (*)(kmp_user_lock_p))(&__kmp_is_ticket_lock_initialized);
3558
3559 __kmp_get_user_lock_location_ =
3560 (const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_ticket_lock_location);
3561
3562 __kmp_set_user_lock_location_ = (void (*)(
3563 kmp_user_lock_p, const ident_t *))(&__kmp_set_ticket_lock_location);
3564
3565 __kmp_get_user_lock_flags_ =
3566 (kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_ticket_lock_flags);
3567
3568 __kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))(
3569 &__kmp_set_ticket_lock_flags);
3570 } break;
3571
3572 case lk_queuing: {
3573 __kmp_base_user_lock_size = sizeof(kmp_base_queuing_lock_t);
3574 __kmp_user_lock_size = sizeof(kmp_queuing_lock_t);
3575
3576 __kmp_get_user_lock_owner_ =
3577 (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_owner);
3578
3579 if (__kmp_env_consistency_check) {
3580 KMP_BIND_USER_LOCK_WITH_CHECKS(queuing);
3581 KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(queuing);
3582 } else {
3583 KMP_BIND_USER_LOCK(queuing);
3584 KMP_BIND_NESTED_USER_LOCK(queuing);
3585 }
3586
3587 __kmp_destroy_user_lock_ =
3588 (void (*)(kmp_user_lock_p))(&__kmp_destroy_queuing_lock);
3589
3590 __kmp_is_user_lock_initialized_ =
3591 (int (*)(kmp_user_lock_p))(&__kmp_is_queuing_lock_initialized);
3592
3593 __kmp_get_user_lock_location_ =
3594 (const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_location);
3595
3596 __kmp_set_user_lock_location_ = (void (*)(
3597 kmp_user_lock_p, const ident_t *))(&__kmp_set_queuing_lock_location);
3598
3599 __kmp_get_user_lock_flags_ =
3600 (kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_flags);
3601
3602 __kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))(
3603 &__kmp_set_queuing_lock_flags);
3604 } break;
3605
3606 #if KMP_USE_ADAPTIVE_LOCKS
3607 case lk_adaptive: {
3608 __kmp_base_user_lock_size = sizeof(kmp_base_adaptive_lock_t);
3609 __kmp_user_lock_size = sizeof(kmp_adaptive_lock_t);
3610
3611 __kmp_get_user_lock_owner_ =
3612 (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_owner);
3613
3614 if (__kmp_env_consistency_check) {
3615 KMP_BIND_USER_LOCK_WITH_CHECKS(adaptive);
3616 } else {
3617 KMP_BIND_USER_LOCK(adaptive);
3618 }
3619
3620 __kmp_destroy_user_lock_ =
3621 (void (*)(kmp_user_lock_p))(&__kmp_destroy_adaptive_lock);
3622
3623 __kmp_is_user_lock_initialized_ =
3624 (int (*)(kmp_user_lock_p))(&__kmp_is_queuing_lock_initialized);
3625
3626 __kmp_get_user_lock_location_ =
3627 (const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_location);
3628
3629 __kmp_set_user_lock_location_ = (void (*)(
3630 kmp_user_lock_p, const ident_t *))(&__kmp_set_queuing_lock_location);
3631
3632 __kmp_get_user_lock_flags_ =
3633 (kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_flags);
3634
3635 __kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))(
3636 &__kmp_set_queuing_lock_flags);
3637
3638 } break;
3639 #endif // KMP_USE_ADAPTIVE_LOCKS
3640
3641 case lk_drdpa: {
3642 __kmp_base_user_lock_size = sizeof(kmp_base_drdpa_lock_t);
3643 __kmp_user_lock_size = sizeof(kmp_drdpa_lock_t);
3644
3645 __kmp_get_user_lock_owner_ =
3646 (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_drdpa_lock_owner);
3647
3648 if (__kmp_env_consistency_check) {
3649 KMP_BIND_USER_LOCK_WITH_CHECKS(drdpa);
3650 KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(drdpa);
3651 } else {
3652 KMP_BIND_USER_LOCK(drdpa);
3653 KMP_BIND_NESTED_USER_LOCK(drdpa);
3654 }
3655
3656 __kmp_destroy_user_lock_ =
3657 (void (*)(kmp_user_lock_p))(&__kmp_destroy_drdpa_lock);
3658
3659 __kmp_is_user_lock_initialized_ =
3660 (int (*)(kmp_user_lock_p))(&__kmp_is_drdpa_lock_initialized);
3661
3662 __kmp_get_user_lock_location_ =
3663 (const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_drdpa_lock_location);
3664
3665 __kmp_set_user_lock_location_ = (void (*)(
3666 kmp_user_lock_p, const ident_t *))(&__kmp_set_drdpa_lock_location);
3667
3668 __kmp_get_user_lock_flags_ =
3669 (kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_drdpa_lock_flags);
3670
3671 __kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))(
3672 &__kmp_set_drdpa_lock_flags);
3673 } break;
3674 }
3675 }
3676
3677 // ----------------------------------------------------------------------------
3678 // User lock table & lock allocation
3679
3680 kmp_lock_table_t __kmp_user_lock_table = {1, 0, NULL};
3681 kmp_user_lock_p __kmp_lock_pool = NULL;
3682
3683 // Lock block-allocation support.
3684 kmp_block_of_locks *__kmp_lock_blocks = NULL;
3685 int __kmp_num_locks_in_block = 1; // FIXME - tune this value
3686
__kmp_lock_table_insert(kmp_user_lock_p lck)3687 static kmp_lock_index_t __kmp_lock_table_insert(kmp_user_lock_p lck) {
3688 // Assume that kmp_global_lock is held upon entry/exit.
3689 kmp_lock_index_t index;
3690 if (__kmp_user_lock_table.used >= __kmp_user_lock_table.allocated) {
3691 kmp_lock_index_t size;
3692 kmp_user_lock_p *table;
3693 // Reallocate lock table.
3694 if (__kmp_user_lock_table.allocated == 0) {
3695 size = 1024;
3696 } else {
3697 size = __kmp_user_lock_table.allocated * 2;
3698 }
3699 table = (kmp_user_lock_p *)__kmp_allocate(sizeof(kmp_user_lock_p) * size);
3700 KMP_MEMCPY(table + 1, __kmp_user_lock_table.table + 1,
3701 sizeof(kmp_user_lock_p) * (__kmp_user_lock_table.used - 1));
3702 table[0] = (kmp_user_lock_p)__kmp_user_lock_table.table;
3703 // We cannot free the previous table now, since it may be in use by other
3704 // threads. So save the pointer to the previous table in in the first
3705 // element of the new table. All the tables will be organized into a list,
3706 // and could be freed when library shutting down.
3707 __kmp_user_lock_table.table = table;
3708 __kmp_user_lock_table.allocated = size;
3709 }
3710 KMP_DEBUG_ASSERT(__kmp_user_lock_table.used <
3711 __kmp_user_lock_table.allocated);
3712 index = __kmp_user_lock_table.used;
3713 __kmp_user_lock_table.table[index] = lck;
3714 ++__kmp_user_lock_table.used;
3715 return index;
3716 }
3717
__kmp_lock_block_allocate()3718 static kmp_user_lock_p __kmp_lock_block_allocate() {
3719 // Assume that kmp_global_lock is held upon entry/exit.
3720 static int last_index = 0;
3721 if ((last_index >= __kmp_num_locks_in_block) || (__kmp_lock_blocks == NULL)) {
3722 // Restart the index.
3723 last_index = 0;
3724 // Need to allocate a new block.
3725 KMP_DEBUG_ASSERT(__kmp_user_lock_size > 0);
3726 size_t space_for_locks = __kmp_user_lock_size * __kmp_num_locks_in_block;
3727 char *buffer =
3728 (char *)__kmp_allocate(space_for_locks + sizeof(kmp_block_of_locks));
3729 // Set up the new block.
3730 kmp_block_of_locks *new_block =
3731 (kmp_block_of_locks *)(&buffer[space_for_locks]);
3732 new_block->next_block = __kmp_lock_blocks;
3733 new_block->locks = (void *)buffer;
3734 // Publish the new block.
3735 KMP_MB();
3736 __kmp_lock_blocks = new_block;
3737 }
3738 kmp_user_lock_p ret = (kmp_user_lock_p)(&(
3739 ((char *)(__kmp_lock_blocks->locks))[last_index * __kmp_user_lock_size]));
3740 last_index++;
3741 return ret;
3742 }
3743
3744 // Get memory for a lock. It may be freshly allocated memory or reused memory
3745 // from lock pool.
__kmp_user_lock_allocate(void ** user_lock,kmp_int32 gtid,kmp_lock_flags_t flags)3746 kmp_user_lock_p __kmp_user_lock_allocate(void **user_lock, kmp_int32 gtid,
3747 kmp_lock_flags_t flags) {
3748 kmp_user_lock_p lck;
3749 kmp_lock_index_t index;
3750 KMP_DEBUG_ASSERT(user_lock);
3751
3752 __kmp_acquire_lock(&__kmp_global_lock, gtid);
3753
3754 if (__kmp_lock_pool == NULL) {
3755 // Lock pool is empty. Allocate new memory.
3756
3757 // ANNOTATION: Found no good way to express the syncronisation
3758 // between allocation and usage, so ignore the allocation
3759 ANNOTATE_IGNORE_WRITES_BEGIN();
3760 if (__kmp_num_locks_in_block <= 1) { // Tune this cutoff point.
3761 lck = (kmp_user_lock_p)__kmp_allocate(__kmp_user_lock_size);
3762 } else {
3763 lck = __kmp_lock_block_allocate();
3764 }
3765 ANNOTATE_IGNORE_WRITES_END();
3766
3767 // Insert lock in the table so that it can be freed in __kmp_cleanup,
3768 // and debugger has info on all allocated locks.
3769 index = __kmp_lock_table_insert(lck);
3770 } else {
3771 // Pick up lock from pool.
3772 lck = __kmp_lock_pool;
3773 index = __kmp_lock_pool->pool.index;
3774 __kmp_lock_pool = __kmp_lock_pool->pool.next;
3775 }
3776
3777 // We could potentially differentiate between nested and regular locks
3778 // here, and do the lock table lookup for regular locks only.
3779 if (OMP_LOCK_T_SIZE < sizeof(void *)) {
3780 *((kmp_lock_index_t *)user_lock) = index;
3781 } else {
3782 *((kmp_user_lock_p *)user_lock) = lck;
3783 }
3784
3785 // mark the lock if it is critical section lock.
3786 __kmp_set_user_lock_flags(lck, flags);
3787
3788 __kmp_release_lock(&__kmp_global_lock, gtid); // AC: TODO move this line upper
3789
3790 return lck;
3791 }
3792
3793 // Put lock's memory to pool for reusing.
__kmp_user_lock_free(void ** user_lock,kmp_int32 gtid,kmp_user_lock_p lck)3794 void __kmp_user_lock_free(void **user_lock, kmp_int32 gtid,
3795 kmp_user_lock_p lck) {
3796 KMP_DEBUG_ASSERT(user_lock != NULL);
3797 KMP_DEBUG_ASSERT(lck != NULL);
3798
3799 __kmp_acquire_lock(&__kmp_global_lock, gtid);
3800
3801 lck->pool.next = __kmp_lock_pool;
3802 __kmp_lock_pool = lck;
3803 if (OMP_LOCK_T_SIZE < sizeof(void *)) {
3804 kmp_lock_index_t index = *((kmp_lock_index_t *)user_lock);
3805 KMP_DEBUG_ASSERT(0 < index && index <= __kmp_user_lock_table.used);
3806 lck->pool.index = index;
3807 }
3808
3809 __kmp_release_lock(&__kmp_global_lock, gtid);
3810 }
3811
__kmp_lookup_user_lock(void ** user_lock,char const * func)3812 kmp_user_lock_p __kmp_lookup_user_lock(void **user_lock, char const *func) {
3813 kmp_user_lock_p lck = NULL;
3814
3815 if (__kmp_env_consistency_check) {
3816 if (user_lock == NULL) {
3817 KMP_FATAL(LockIsUninitialized, func);
3818 }
3819 }
3820
3821 if (OMP_LOCK_T_SIZE < sizeof(void *)) {
3822 kmp_lock_index_t index = *((kmp_lock_index_t *)user_lock);
3823 if (__kmp_env_consistency_check) {
3824 if (!(0 < index && index < __kmp_user_lock_table.used)) {
3825 KMP_FATAL(LockIsUninitialized, func);
3826 }
3827 }
3828 KMP_DEBUG_ASSERT(0 < index && index < __kmp_user_lock_table.used);
3829 KMP_DEBUG_ASSERT(__kmp_user_lock_size > 0);
3830 lck = __kmp_user_lock_table.table[index];
3831 } else {
3832 lck = *((kmp_user_lock_p *)user_lock);
3833 }
3834
3835 if (__kmp_env_consistency_check) {
3836 if (lck == NULL) {
3837 KMP_FATAL(LockIsUninitialized, func);
3838 }
3839 }
3840
3841 return lck;
3842 }
3843
__kmp_cleanup_user_locks(void)3844 void __kmp_cleanup_user_locks(void) {
3845 // Reset lock pool. Don't worry about lock in the pool--we will free them when
3846 // iterating through lock table (it includes all the locks, dead or alive).
3847 __kmp_lock_pool = NULL;
3848
3849 #define IS_CRITICAL(lck) \
3850 ((__kmp_get_user_lock_flags_ != NULL) && \
3851 ((*__kmp_get_user_lock_flags_)(lck)&kmp_lf_critical_section))
3852
3853 // Loop through lock table, free all locks.
3854 // Do not free item [0], it is reserved for lock tables list.
3855 //
3856 // FIXME - we are iterating through a list of (pointers to) objects of type
3857 // union kmp_user_lock, but we have no way of knowing whether the base type is
3858 // currently "pool" or whatever the global user lock type is.
3859 //
3860 // We are relying on the fact that for all of the user lock types
3861 // (except "tas"), the first field in the lock struct is the "initialized"
3862 // field, which is set to the address of the lock object itself when
3863 // the lock is initialized. When the union is of type "pool", the
3864 // first field is a pointer to the next object in the free list, which
3865 // will not be the same address as the object itself.
3866 //
3867 // This means that the check (*__kmp_is_user_lock_initialized_)(lck) will fail
3868 // for "pool" objects on the free list. This must happen as the "location"
3869 // field of real user locks overlaps the "index" field of "pool" objects.
3870 //
3871 // It would be better to run through the free list, and remove all "pool"
3872 // objects from the lock table before executing this loop. However,
3873 // "pool" objects do not always have their index field set (only on
3874 // lin_32e), and I don't want to search the lock table for the address
3875 // of every "pool" object on the free list.
3876 while (__kmp_user_lock_table.used > 1) {
3877 const ident *loc;
3878
3879 // reduce __kmp_user_lock_table.used before freeing the lock,
3880 // so that state of locks is consistent
3881 kmp_user_lock_p lck =
3882 __kmp_user_lock_table.table[--__kmp_user_lock_table.used];
3883
3884 if ((__kmp_is_user_lock_initialized_ != NULL) &&
3885 (*__kmp_is_user_lock_initialized_)(lck)) {
3886 // Issue a warning if: KMP_CONSISTENCY_CHECK AND lock is initialized AND
3887 // it is NOT a critical section (user is not responsible for destroying
3888 // criticals) AND we know source location to report.
3889 if (__kmp_env_consistency_check && (!IS_CRITICAL(lck)) &&
3890 ((loc = __kmp_get_user_lock_location(lck)) != NULL) &&
3891 (loc->psource != NULL)) {
3892 kmp_str_loc_t str_loc = __kmp_str_loc_init(loc->psource, false);
3893 KMP_WARNING(CnsLockNotDestroyed, str_loc.file, str_loc.line);
3894 __kmp_str_loc_free(&str_loc);
3895 }
3896
3897 #ifdef KMP_DEBUG
3898 if (IS_CRITICAL(lck)) {
3899 KA_TRACE(
3900 20,
3901 ("__kmp_cleanup_user_locks: free critical section lock %p (%p)\n",
3902 lck, *(void **)lck));
3903 } else {
3904 KA_TRACE(20, ("__kmp_cleanup_user_locks: free lock %p (%p)\n", lck,
3905 *(void **)lck));
3906 }
3907 #endif // KMP_DEBUG
3908
3909 // Cleanup internal lock dynamic resources (for drdpa locks particularly).
3910 __kmp_destroy_user_lock(lck);
3911 }
3912
3913 // Free the lock if block allocation of locks is not used.
3914 if (__kmp_lock_blocks == NULL) {
3915 __kmp_free(lck);
3916 }
3917 }
3918
3919 #undef IS_CRITICAL
3920
3921 // delete lock table(s).
3922 kmp_user_lock_p *table_ptr = __kmp_user_lock_table.table;
3923 __kmp_user_lock_table.table = NULL;
3924 __kmp_user_lock_table.allocated = 0;
3925
3926 while (table_ptr != NULL) {
3927 // In the first element we saved the pointer to the previous
3928 // (smaller) lock table.
3929 kmp_user_lock_p *next = (kmp_user_lock_p *)(table_ptr[0]);
3930 __kmp_free(table_ptr);
3931 table_ptr = next;
3932 }
3933
3934 // Free buffers allocated for blocks of locks.
3935 kmp_block_of_locks_t *block_ptr = __kmp_lock_blocks;
3936 __kmp_lock_blocks = NULL;
3937
3938 while (block_ptr != NULL) {
3939 kmp_block_of_locks_t *next = block_ptr->next_block;
3940 __kmp_free(block_ptr->locks);
3941 // *block_ptr itself was allocated at the end of the locks vector.
3942 block_ptr = next;
3943 }
3944
3945 TCW_4(__kmp_init_user_locks, FALSE);
3946 }
3947
3948 #endif // KMP_USE_DYNAMIC_LOCK
3949