• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Runtime locking correctness validator
3  *
4  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
6  *
7  * see Documentation/locking/lockdep-design.txt for more details.
8  */
9 #ifndef __LINUX_LOCKDEP_H
10 #define __LINUX_LOCKDEP_H
11 
12 struct task_struct;
13 struct lockdep_map;
14 
15 /* for sysctl */
16 extern int prove_locking;
17 extern int lock_stat;
18 
19 #define MAX_LOCKDEP_SUBCLASSES		8UL
20 
21 #ifdef CONFIG_LOCKDEP
22 
23 #include <linux/linkage.h>
24 #include <linux/list.h>
25 #include <linux/debug_locks.h>
26 #include <linux/stacktrace.h>
27 
28 /*
29  * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
30  * the total number of states... :-(
31  */
32 #define XXX_LOCK_USAGE_STATES		(1+3*4)
33 
34 /*
35  * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
36  * cached in the instance of lockdep_map
37  *
38  * Currently main class (subclass == 0) and signle depth subclass
39  * are cached in lockdep_map. This optimization is mainly targeting
40  * on rq->lock. double_rq_lock() acquires this highly competitive with
41  * single depth.
42  */
43 #define NR_LOCKDEP_CACHING_CLASSES	2
44 
45 /*
46  * Lock-classes are keyed via unique addresses, by embedding the
47  * lockclass-key into the kernel (or module) .data section. (For
48  * static locks we use the lock address itself as the key.)
49  */
50 struct lockdep_subclass_key {
51 	char __one_byte;
52 } __attribute__ ((__packed__));
53 
54 struct lock_class_key {
55 	struct lockdep_subclass_key	subkeys[MAX_LOCKDEP_SUBCLASSES];
56 };
57 
58 extern struct lock_class_key __lockdep_no_validate__;
59 
60 #define LOCKSTAT_POINTS		4
61 
62 /*
63  * The lock-class itself:
64  */
65 struct lock_class {
66 	/*
67 	 * class-hash:
68 	 */
69 	struct hlist_node		hash_entry;
70 
71 	/*
72 	 * global list of all lock-classes:
73 	 */
74 	struct list_head		lock_entry;
75 
76 	struct lockdep_subclass_key	*key;
77 	unsigned int			subclass;
78 	unsigned int			dep_gen_id;
79 
80 	/*
81 	 * IRQ/softirq usage tracking bits:
82 	 */
83 	unsigned long			usage_mask;
84 	struct stack_trace		usage_traces[XXX_LOCK_USAGE_STATES];
85 
86 	/*
87 	 * These fields represent a directed graph of lock dependencies,
88 	 * to every node we attach a list of "forward" and a list of
89 	 * "backward" graph nodes.
90 	 */
91 	struct list_head		locks_after, locks_before;
92 
93 	/*
94 	 * Generation counter, when doing certain classes of graph walking,
95 	 * to ensure that we check one node only once:
96 	 */
97 	unsigned int			version;
98 
99 	/*
100 	 * Statistics counter:
101 	 */
102 	unsigned long			ops;
103 
104 	const char			*name;
105 	int				name_version;
106 
107 #ifdef CONFIG_LOCK_STAT
108 	unsigned long			contention_point[LOCKSTAT_POINTS];
109 	unsigned long			contending_point[LOCKSTAT_POINTS];
110 #endif
111 };
112 
113 #ifdef CONFIG_LOCK_STAT
114 struct lock_time {
115 	s64				min;
116 	s64				max;
117 	s64				total;
118 	unsigned long			nr;
119 };
120 
121 enum bounce_type {
122 	bounce_acquired_write,
123 	bounce_acquired_read,
124 	bounce_contended_write,
125 	bounce_contended_read,
126 	nr_bounce_types,
127 
128 	bounce_acquired = bounce_acquired_write,
129 	bounce_contended = bounce_contended_write,
130 };
131 
132 struct lock_class_stats {
133 	unsigned long			contention_point[LOCKSTAT_POINTS];
134 	unsigned long			contending_point[LOCKSTAT_POINTS];
135 	struct lock_time		read_waittime;
136 	struct lock_time		write_waittime;
137 	struct lock_time		read_holdtime;
138 	struct lock_time		write_holdtime;
139 	unsigned long			bounces[nr_bounce_types];
140 };
141 
142 struct lock_class_stats lock_stats(struct lock_class *class);
143 void clear_lock_stats(struct lock_class *class);
144 #endif
145 
146 /*
147  * Map the lock object (the lock instance) to the lock-class object.
148  * This is embedded into specific lock instances:
149  */
150 struct lockdep_map {
151 	struct lock_class_key		*key;
152 	struct lock_class		*class_cache[NR_LOCKDEP_CACHING_CLASSES];
153 	const char			*name;
154 #ifdef CONFIG_LOCK_STAT
155 	int				cpu;
156 	unsigned long			ip;
157 #endif
158 };
159 
lockdep_copy_map(struct lockdep_map * to,struct lockdep_map * from)160 static inline void lockdep_copy_map(struct lockdep_map *to,
161 				    struct lockdep_map *from)
162 {
163 	int i;
164 
165 	*to = *from;
166 	/*
167 	 * Since the class cache can be modified concurrently we could observe
168 	 * half pointers (64bit arch using 32bit copy insns). Therefore clear
169 	 * the caches and take the performance hit.
170 	 *
171 	 * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
172 	 *     that relies on cache abuse.
173 	 */
174 	for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
175 		to->class_cache[i] = NULL;
176 }
177 
178 /*
179  * Every lock has a list of other locks that were taken after it.
180  * We only grow the list, never remove from it:
181  */
182 struct lock_list {
183 	struct list_head		entry;
184 	struct lock_class		*class;
185 	struct stack_trace		trace;
186 	int				distance;
187 
188 	/*
189 	 * The parent field is used to implement breadth-first search, and the
190 	 * bit 0 is reused to indicate if the lock has been accessed in BFS.
191 	 */
192 	struct lock_list		*parent;
193 };
194 
195 /*
196  * We record lock dependency chains, so that we can cache them:
197  */
198 struct lock_chain {
199 	/* see BUILD_BUG_ON()s in lookup_chain_cache() */
200 	unsigned int			irq_context :  2,
201 					depth       :  6,
202 					base	    : 24;
203 	/* 4 byte hole */
204 	struct hlist_node		entry;
205 	u64				chain_key;
206 };
207 
208 #define MAX_LOCKDEP_KEYS_BITS		13
209 /*
210  * Subtract one because we offset hlock->class_idx by 1 in order
211  * to make 0 mean no class. This avoids overflowing the class_idx
212  * bitfield and hitting the BUG in hlock_class().
213  */
214 #define MAX_LOCKDEP_KEYS		((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
215 
216 struct held_lock {
217 	/*
218 	 * One-way hash of the dependency chain up to this point. We
219 	 * hash the hashes step by step as the dependency chain grows.
220 	 *
221 	 * We use it for dependency-caching and we skip detection
222 	 * passes and dependency-updates if there is a cache-hit, so
223 	 * it is absolutely critical for 100% coverage of the validator
224 	 * to have a unique key value for every unique dependency path
225 	 * that can occur in the system, to make a unique hash value
226 	 * as likely as possible - hence the 64-bit width.
227 	 *
228 	 * The task struct holds the current hash value (initialized
229 	 * with zero), here we store the previous hash value:
230 	 */
231 	u64				prev_chain_key;
232 	unsigned long			acquire_ip;
233 	struct lockdep_map		*instance;
234 	struct lockdep_map		*nest_lock;
235 #ifdef CONFIG_LOCK_STAT
236 	u64 				waittime_stamp;
237 	u64				holdtime_stamp;
238 #endif
239 	unsigned int			class_idx:MAX_LOCKDEP_KEYS_BITS;
240 	/*
241 	 * The lock-stack is unified in that the lock chains of interrupt
242 	 * contexts nest ontop of process context chains, but we 'separate'
243 	 * the hashes by starting with 0 if we cross into an interrupt
244 	 * context, and we also keep do not add cross-context lock
245 	 * dependencies - the lock usage graph walking covers that area
246 	 * anyway, and we'd just unnecessarily increase the number of
247 	 * dependencies otherwise. [Note: hardirq and softirq contexts
248 	 * are separated from each other too.]
249 	 *
250 	 * The following field is used to detect when we cross into an
251 	 * interrupt context:
252 	 */
253 	unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
254 	unsigned int trylock:1;						/* 16 bits */
255 
256 	unsigned int read:2;        /* see lock_acquire() comment */
257 	unsigned int check:1;       /* see lock_acquire() comment */
258 	unsigned int hardirqs_off:1;
259 	unsigned int references:12;					/* 32 bits */
260 	unsigned int pin_count;
261 };
262 
263 /*
264  * Initialization, self-test and debugging-output methods:
265  */
266 extern void lockdep_info(void);
267 extern void lockdep_reset(void);
268 extern void lockdep_reset_lock(struct lockdep_map *lock);
269 extern void lockdep_free_key_range(void *start, unsigned long size);
270 extern asmlinkage void lockdep_sys_exit(void);
271 
272 extern void lockdep_off(void);
273 extern void lockdep_on(void);
274 
275 /*
276  * These methods are used by specific locking variants (spinlocks,
277  * rwlocks, mutexes and rwsems) to pass init/acquire/release events
278  * to lockdep:
279  */
280 
281 extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
282 			     struct lock_class_key *key, int subclass);
283 
284 /*
285  * To initialize a lockdep_map statically use this macro.
286  * Note that _name must not be NULL.
287  */
288 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
289 	{ .name = (_name), .key = (void *)(_key), }
290 
291 /*
292  * Reinitialize a lock key - for cases where there is special locking or
293  * special initialization of locks so that the validator gets the scope
294  * of dependencies wrong: they are either too broad (they need a class-split)
295  * or they are too narrow (they suffer from a false class-split):
296  */
297 #define lockdep_set_class(lock, key) \
298 		lockdep_init_map(&(lock)->dep_map, #key, key, 0)
299 #define lockdep_set_class_and_name(lock, key, name) \
300 		lockdep_init_map(&(lock)->dep_map, name, key, 0)
301 #define lockdep_set_class_and_subclass(lock, key, sub) \
302 		lockdep_init_map(&(lock)->dep_map, #key, key, sub)
303 #define lockdep_set_subclass(lock, sub)	\
304 		lockdep_init_map(&(lock)->dep_map, #lock, \
305 				 (lock)->dep_map.key, sub)
306 
307 #define lockdep_set_novalidate_class(lock) \
308 	lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
309 /*
310  * Compare locking classes
311  */
312 #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
313 
lockdep_match_key(struct lockdep_map * lock,struct lock_class_key * key)314 static inline int lockdep_match_key(struct lockdep_map *lock,
315 				    struct lock_class_key *key)
316 {
317 	return lock->key == key;
318 }
319 
320 /*
321  * Acquire a lock.
322  *
323  * Values for "read":
324  *
325  *   0: exclusive (write) acquire
326  *   1: read-acquire (no recursion allowed)
327  *   2: read-acquire with same-instance recursion allowed
328  *
329  * Values for check:
330  *
331  *   0: simple checks (freeing, held-at-exit-time, etc.)
332  *   1: full validation
333  */
334 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
335 			 int trylock, int read, int check,
336 			 struct lockdep_map *nest_lock, unsigned long ip);
337 
338 extern void lock_release(struct lockdep_map *lock, int nested,
339 			 unsigned long ip);
340 
341 #define lockdep_is_held(lock)	lock_is_held(&(lock)->dep_map)
342 
343 extern int lock_is_held(struct lockdep_map *lock);
344 
345 extern void lock_set_class(struct lockdep_map *lock, const char *name,
346 			   struct lock_class_key *key, unsigned int subclass,
347 			   unsigned long ip);
348 
lock_set_subclass(struct lockdep_map * lock,unsigned int subclass,unsigned long ip)349 static inline void lock_set_subclass(struct lockdep_map *lock,
350 		unsigned int subclass, unsigned long ip)
351 {
352 	lock_set_class(lock, lock->name, lock->key, subclass, ip);
353 }
354 
355 extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
356 extern void lockdep_clear_current_reclaim_state(void);
357 extern void lockdep_trace_alloc(gfp_t mask);
358 
359 struct pin_cookie { unsigned int val; };
360 
361 #define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
362 
363 extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
364 extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
365 extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
366 
367 # define INIT_LOCKDEP				.lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
368 
369 #define lockdep_depth(tsk)	(debug_locks ? (tsk)->lockdep_depth : 0)
370 
371 #define lockdep_assert_held(l)	do {				\
372 		WARN_ON(debug_locks && !lockdep_is_held(l));	\
373 	} while (0)
374 
375 #define lockdep_assert_held_once(l)	do {				\
376 		WARN_ON_ONCE(debug_locks && !lockdep_is_held(l));	\
377 	} while (0)
378 
379 #define lockdep_recursing(tsk)	((tsk)->lockdep_recursion)
380 
381 #define lockdep_pin_lock(l)	lock_pin_lock(&(l)->dep_map)
382 #define lockdep_repin_lock(l,c)	lock_repin_lock(&(l)->dep_map, (c))
383 #define lockdep_unpin_lock(l,c)	lock_unpin_lock(&(l)->dep_map, (c))
384 
385 #else /* !CONFIG_LOCKDEP */
386 
lockdep_off(void)387 static inline void lockdep_off(void)
388 {
389 }
390 
lockdep_on(void)391 static inline void lockdep_on(void)
392 {
393 }
394 
395 # define lock_acquire(l, s, t, r, c, n, i)	do { } while (0)
396 # define lock_release(l, n, i)			do { } while (0)
397 # define lock_set_class(l, n, k, s, i)		do { } while (0)
398 # define lock_set_subclass(l, s, i)		do { } while (0)
399 # define lockdep_set_current_reclaim_state(g)	do { } while (0)
400 # define lockdep_clear_current_reclaim_state()	do { } while (0)
401 # define lockdep_trace_alloc(g)			do { } while (0)
402 # define lockdep_info()				do { } while (0)
403 # define lockdep_init_map(lock, name, key, sub) \
404 		do { (void)(name); (void)(key); } while (0)
405 # define lockdep_set_class(lock, key)		do { (void)(key); } while (0)
406 # define lockdep_set_class_and_name(lock, key, name) \
407 		do { (void)(key); (void)(name); } while (0)
408 #define lockdep_set_class_and_subclass(lock, key, sub) \
409 		do { (void)(key); } while (0)
410 #define lockdep_set_subclass(lock, sub)		do { } while (0)
411 
412 #define lockdep_set_novalidate_class(lock) do { } while (0)
413 
414 /*
415  * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
416  * case since the result is not well defined and the caller should rather
417  * #ifdef the call himself.
418  */
419 
420 # define INIT_LOCKDEP
421 # define lockdep_reset()		do { debug_locks = 1; } while (0)
422 # define lockdep_free_key_range(start, size)	do { } while (0)
423 # define lockdep_sys_exit() 			do { } while (0)
424 /*
425  * The class key takes no space if lockdep is disabled:
426  */
427 struct lock_class_key { };
428 
429 #define lockdep_depth(tsk)	(0)
430 
431 #define lockdep_assert_held(l)			do { (void)(l); } while (0)
432 #define lockdep_assert_held_once(l)		do { (void)(l); } while (0)
433 
434 #define lockdep_recursing(tsk)			(0)
435 
436 struct pin_cookie { };
437 
438 #define NIL_COOKIE (struct pin_cookie){ }
439 
440 #define lockdep_pin_lock(l)			({ struct pin_cookie cookie; cookie; })
441 #define lockdep_repin_lock(l, c)		do { (void)(l); (void)(c); } while (0)
442 #define lockdep_unpin_lock(l, c)		do { (void)(l); (void)(c); } while (0)
443 
444 #endif /* !LOCKDEP */
445 
446 #ifdef CONFIG_LOCK_STAT
447 
448 extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
449 extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
450 
451 #define LOCK_CONTENDED(_lock, try, lock)			\
452 do {								\
453 	if (!try(_lock)) {					\
454 		lock_contended(&(_lock)->dep_map, _RET_IP_);	\
455 		lock(_lock);					\
456 	}							\
457 	lock_acquired(&(_lock)->dep_map, _RET_IP_);			\
458 } while (0)
459 
460 #define LOCK_CONTENDED_RETURN(_lock, try, lock)			\
461 ({								\
462 	int ____err = 0;					\
463 	if (!try(_lock)) {					\
464 		lock_contended(&(_lock)->dep_map, _RET_IP_);	\
465 		____err = lock(_lock);				\
466 	}							\
467 	if (!____err)						\
468 		lock_acquired(&(_lock)->dep_map, _RET_IP_);	\
469 	____err;						\
470 })
471 
472 #else /* CONFIG_LOCK_STAT */
473 
474 #define lock_contended(lockdep_map, ip) do {} while (0)
475 #define lock_acquired(lockdep_map, ip) do {} while (0)
476 
477 #define LOCK_CONTENDED(_lock, try, lock) \
478 	lock(_lock)
479 
480 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \
481 	lock(_lock)
482 
483 #endif /* CONFIG_LOCK_STAT */
484 
485 #ifdef CONFIG_LOCKDEP
486 
487 /*
488  * On lockdep we dont want the hand-coded irq-enable of
489  * _raw_*_lock_flags() code, because lockdep assumes
490  * that interrupts are not re-enabled during lock-acquire:
491  */
492 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
493 	LOCK_CONTENDED((_lock), (try), (lock))
494 
495 #else /* CONFIG_LOCKDEP */
496 
497 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
498 	lockfl((_lock), (flags))
499 
500 #endif /* CONFIG_LOCKDEP */
501 
502 #ifdef CONFIG_TRACE_IRQFLAGS
503 extern void print_irqtrace_events(struct task_struct *curr);
504 #else
print_irqtrace_events(struct task_struct * curr)505 static inline void print_irqtrace_events(struct task_struct *curr)
506 {
507 }
508 #endif
509 
510 /*
511  * For trivial one-depth nesting of a lock-class, the following
512  * global define can be used. (Subsystems with multiple levels
513  * of nesting should define their own lock-nesting subclasses.)
514  */
515 #define SINGLE_DEPTH_NESTING			1
516 
517 /*
518  * Map the dependency ops to NOP or to real lockdep ops, depending
519  * on the per lock-class debug mode:
520  */
521 
522 #define lock_acquire_exclusive(l, s, t, n, i)		lock_acquire(l, s, t, 0, 1, n, i)
523 #define lock_acquire_shared(l, s, t, n, i)		lock_acquire(l, s, t, 1, 1, n, i)
524 #define lock_acquire_shared_recursive(l, s, t, n, i)	lock_acquire(l, s, t, 2, 1, n, i)
525 
526 #define spin_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
527 #define spin_acquire_nest(l, s, t, n, i)	lock_acquire_exclusive(l, s, t, n, i)
528 #define spin_release(l, n, i)			lock_release(l, n, i)
529 
530 #define rwlock_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
531 #define rwlock_acquire_read(l, s, t, i)		lock_acquire_shared_recursive(l, s, t, NULL, i)
532 #define rwlock_release(l, n, i)			lock_release(l, n, i)
533 
534 #define seqcount_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
535 #define seqcount_acquire_read(l, s, t, i)	lock_acquire_shared_recursive(l, s, t, NULL, i)
536 #define seqcount_release(l, n, i)		lock_release(l, n, i)
537 
538 #define mutex_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
539 #define mutex_acquire_nest(l, s, t, n, i)	lock_acquire_exclusive(l, s, t, n, i)
540 #define mutex_release(l, n, i)			lock_release(l, n, i)
541 
542 #define rwsem_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
543 #define rwsem_acquire_nest(l, s, t, n, i)	lock_acquire_exclusive(l, s, t, n, i)
544 #define rwsem_acquire_read(l, s, t, i)		lock_acquire_shared(l, s, t, NULL, i)
545 #define rwsem_release(l, n, i)			lock_release(l, n, i)
546 
547 #define lock_map_acquire(l)			lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
548 #define lock_map_acquire_read(l)		lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
549 #define lock_map_acquire_tryread(l)		lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
550 #define lock_map_release(l)			lock_release(l, 1, _THIS_IP_)
551 
552 #ifdef CONFIG_PROVE_LOCKING
553 # define might_lock(lock) 						\
554 do {									\
555 	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
556 	lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_);	\
557 	lock_release(&(lock)->dep_map, 0, _THIS_IP_);			\
558 } while (0)
559 # define might_lock_read(lock) 						\
560 do {									\
561 	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
562 	lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_);	\
563 	lock_release(&(lock)->dep_map, 0, _THIS_IP_);			\
564 } while (0)
565 #else
566 # define might_lock(lock) do { } while (0)
567 # define might_lock_read(lock) do { } while (0)
568 #endif
569 
570 #ifdef CONFIG_LOCKDEP
571 void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
572 #else
573 static inline void
lockdep_rcu_suspicious(const char * file,const int line,const char * s)574 lockdep_rcu_suspicious(const char *file, const int line, const char *s)
575 {
576 }
577 #endif
578 
579 #endif /* __LINUX_LOCKDEP_H */
580