Searched refs:locks (Results 1 – 12 of 12) sorted by relevance
45 unmatched-locks = Rcu-lock \ domain(matched)47 and unmatched = unmatched-locks | unmatched-unlocks49 and unmatched-locks-to-unlocks =50 [unmatched-locks] ; po ; [unmatched-unlocks]51 and matched = matched | (unmatched-locks-to-unlocks \61 unmatched-locks = Srcu-lock \ domain(matched)63 and unmatched = unmatched-locks | unmatched-unlocks65 and unmatched-locks-to-unlocks =66 ([unmatched-locks] ; po ; [unmatched-unlocks]) & loc67 and matched = matched | (unmatched-locks-to-unlocks \
17 * File "lock.cat" handles locks and is experimental.18 * It can be replaced by include "cos.cat" for tests that do not use locks.
77 empty ([UNMATCHED-LKW] ; loc ; [UNMATCHED-LKW]) \ id as unmatched-locks
39 static struct rb_root locks = RB_ROOT; variable103 struct rb_node **node = &locks.rb_node; in __get_lock_node()206 rb_insert_color(&l->node, &locks); in __get_lock()215 rb_erase(&lock->node, &locks); in __del_lock()
6 * This test shows that write-write ordering provided by locks
60 u32 locks; /* count of 'lock' transactions */ member
489 if (lock & P(LOCK, LOCKED)) stats->locks++; in c2c_decode_stats()582 stats->locks += add->locks; in c2c_add_stats()
531 as reader-writer locks or sequence locks.
45 adding new "little kernel locks".
400 * Wait for pre-existing local locks. One at
499 Potentially infinite loops, such as those used to wait for locks to562 attention only to those executions in which both locks are actually
2121 fprintf(out, " Locked Load/Store Operations : %10d\n", stats->locks); in print_c2c__display_stats()2168 fprintf(out, " Locked Access on shared lines : %10d\n", stats->locks); in print_shared_cacheline_info()