Lines Matching refs:gtid
144 extern int __kmp_acquire_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
145 extern int __kmp_test_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
146 extern int __kmp_release_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
150 extern int __kmp_acquire_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
151 extern int __kmp_test_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
152 extern int __kmp_release_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
203 extern int __kmp_acquire_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
204 extern int __kmp_test_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
205 extern int __kmp_release_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
210 kmp_int32 gtid);
211 extern int __kmp_test_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
213 kmp_int32 gtid);
286 extern int __kmp_acquire_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid);
287 extern int __kmp_test_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid);
289 kmp_int32 gtid);
290 extern int __kmp_release_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid);
295 kmp_int32 gtid);
297 kmp_int32 gtid);
299 kmp_int32 gtid);
394 extern int __kmp_acquire_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid);
395 extern int __kmp_test_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid);
396 extern int __kmp_release_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid);
401 kmp_int32 gtid);
403 kmp_int32 gtid);
405 kmp_int32 gtid);
491 extern int __kmp_acquire_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid);
492 extern int __kmp_test_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid);
493 extern int __kmp_release_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid);
498 kmp_int32 gtid);
499 extern int __kmp_test_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid);
501 kmp_int32 gtid);
555 static inline int __kmp_acquire_lock(kmp_lock_t *lck, kmp_int32 gtid) { in __kmp_acquire_lock() argument
556 return __kmp_acquire_ticket_lock(lck, gtid); in __kmp_acquire_lock()
559 static inline int __kmp_test_lock(kmp_lock_t *lck, kmp_int32 gtid) { in __kmp_test_lock() argument
560 return __kmp_test_ticket_lock(lck, gtid); in __kmp_test_lock()
563 static inline void __kmp_release_lock(kmp_lock_t *lck, kmp_int32 gtid) { in __kmp_release_lock() argument
564 __kmp_release_ticket_lock(lck, gtid); in __kmp_release_lock()
633 kmp_int32 gtid);
638 #define __kmp_acquire_user_lock_with_checks(lck, gtid) \ argument
646 if ((gtid >= 0) && (lck->tas.lk.poll - 1 == gtid)) { \
651 !__kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)) { \
659 !__kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)); \
664 (*__kmp_acquire_user_lock_with_checks_)(lck, gtid); \
669 kmp_int32 gtid) { in __kmp_acquire_user_lock_with_checks() argument
671 return (*__kmp_acquire_user_lock_with_checks_)(lck, gtid); in __kmp_acquire_user_lock_with_checks()
676 kmp_int32 gtid);
684 kmp_int32 gtid) { in __kmp_test_user_lock_with_checks() argument
694 __kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)); in __kmp_test_user_lock_with_checks()
697 return (*__kmp_test_user_lock_with_checks_)(lck, gtid); in __kmp_test_user_lock_with_checks()
702 kmp_int32 gtid) { in __kmp_test_user_lock_with_checks() argument
704 return (*__kmp_test_user_lock_with_checks_)(lck, gtid); in __kmp_test_user_lock_with_checks()
709 kmp_int32 gtid);
712 kmp_int32 gtid) { in __kmp_release_user_lock_with_checks() argument
714 (*__kmp_release_user_lock_with_checks_)(lck, gtid); in __kmp_release_user_lock_with_checks()
741 kmp_int32 gtid);
745 #define __kmp_acquire_nested_user_lock_with_checks(lck, gtid, depth) \ argument
754 if (lck->tas.lk.poll - 1 == gtid) { \
759 !__kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)) { \
767 !__kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)); \
775 *depth = (*__kmp_acquire_nested_user_lock_with_checks_)(lck, gtid); \
780 __kmp_acquire_nested_user_lock_with_checks(kmp_user_lock_p lck, kmp_int32 gtid, in __kmp_acquire_nested_user_lock_with_checks() argument
783 *depth = (*__kmp_acquire_nested_user_lock_with_checks_)(lck, gtid); in __kmp_acquire_nested_user_lock_with_checks()
788 kmp_int32 gtid);
792 kmp_int32 gtid) { in __kmp_test_nested_user_lock_with_checks() argument
802 KMP_DEBUG_ASSERT(gtid >= 0); in __kmp_test_nested_user_lock_with_checks()
804 gtid) { /* __kmp_get_tas_lock_owner( lck ) == gtid */ in __kmp_test_nested_user_lock_with_checks()
808 __kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)); in __kmp_test_nested_user_lock_with_checks()
816 return (*__kmp_test_nested_user_lock_with_checks_)(lck, gtid); in __kmp_test_nested_user_lock_with_checks()
821 kmp_int32 gtid) { in __kmp_test_nested_user_lock_with_checks() argument
823 return (*__kmp_test_nested_user_lock_with_checks_)(lck, gtid); in __kmp_test_nested_user_lock_with_checks()
828 kmp_int32 gtid);
832 kmp_int32 gtid) { in __kmp_release_nested_user_lock_with_checks() argument
834 return (*__kmp_release_nested_user_lock_with_checks_)(lck, gtid); in __kmp_release_nested_user_lock_with_checks()
973 kmp_int32 gtid,
975 extern void __kmp_user_lock_free(void **user_lock, kmp_int32 gtid,