• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Runtime locking correctness validator
3  *
4  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
6  *
7  * see Documentation/lockdep-design.txt for more details.
8  */
9 #ifndef __LINUX_LOCKDEP_H
10 #define __LINUX_LOCKDEP_H
11 
12 struct task_struct;
13 struct lockdep_map;
14 
15 #ifdef CONFIG_LOCKDEP
16 
17 #include <linux/linkage.h>
18 #include <linux/list.h>
19 #include <linux/debug_locks.h>
20 #include <linux/stacktrace.h>
21 
22 /*
23  * Lock-class usage-state bits:
24  */
25 enum lock_usage_bit
26 {
27 	LOCK_USED = 0,
28 	LOCK_USED_IN_HARDIRQ,
29 	LOCK_USED_IN_SOFTIRQ,
30 	LOCK_ENABLED_SOFTIRQS,
31 	LOCK_ENABLED_HARDIRQS,
32 	LOCK_USED_IN_HARDIRQ_READ,
33 	LOCK_USED_IN_SOFTIRQ_READ,
34 	LOCK_ENABLED_SOFTIRQS_READ,
35 	LOCK_ENABLED_HARDIRQS_READ,
36 	LOCK_USAGE_STATES
37 };
38 
39 /*
40  * Usage-state bitmasks:
41  */
42 #define LOCKF_USED			(1 << LOCK_USED)
43 #define LOCKF_USED_IN_HARDIRQ		(1 << LOCK_USED_IN_HARDIRQ)
44 #define LOCKF_USED_IN_SOFTIRQ		(1 << LOCK_USED_IN_SOFTIRQ)
45 #define LOCKF_ENABLED_HARDIRQS		(1 << LOCK_ENABLED_HARDIRQS)
46 #define LOCKF_ENABLED_SOFTIRQS		(1 << LOCK_ENABLED_SOFTIRQS)
47 
48 #define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS)
49 #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
50 
51 #define LOCKF_USED_IN_HARDIRQ_READ	(1 << LOCK_USED_IN_HARDIRQ_READ)
52 #define LOCKF_USED_IN_SOFTIRQ_READ	(1 << LOCK_USED_IN_SOFTIRQ_READ)
53 #define LOCKF_ENABLED_HARDIRQS_READ	(1 << LOCK_ENABLED_HARDIRQS_READ)
54 #define LOCKF_ENABLED_SOFTIRQS_READ	(1 << LOCK_ENABLED_SOFTIRQS_READ)
55 
56 #define LOCKF_ENABLED_IRQS_READ \
57 		(LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ)
58 #define LOCKF_USED_IN_IRQ_READ \
59 		(LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
60 
61 #define MAX_LOCKDEP_SUBCLASSES		8UL
62 
63 /*
64  * Lock-classes are keyed via unique addresses, by embedding the
65  * lockclass-key into the kernel (or module) .data section. (For
66  * static locks we use the lock address itself as the key.)
67  */
68 struct lockdep_subclass_key {
69 	char __one_byte;
70 } __attribute__ ((__packed__));
71 
72 struct lock_class_key {
73 	struct lockdep_subclass_key	subkeys[MAX_LOCKDEP_SUBCLASSES];
74 };
75 
76 #define LOCKSTAT_POINTS		4
77 
78 /*
79  * The lock-class itself:
80  */
81 struct lock_class {
82 	/*
83 	 * class-hash:
84 	 */
85 	struct list_head		hash_entry;
86 
87 	/*
88 	 * global list of all lock-classes:
89 	 */
90 	struct list_head		lock_entry;
91 
92 	struct lockdep_subclass_key	*key;
93 	unsigned int			subclass;
94 	unsigned int			dep_gen_id;
95 
96 	/*
97 	 * IRQ/softirq usage tracking bits:
98 	 */
99 	unsigned long			usage_mask;
100 	struct stack_trace		usage_traces[LOCK_USAGE_STATES];
101 
102 	/*
103 	 * These fields represent a directed graph of lock dependencies,
104 	 * to every node we attach a list of "forward" and a list of
105 	 * "backward" graph nodes.
106 	 */
107 	struct list_head		locks_after, locks_before;
108 
109 	/*
110 	 * Generation counter, when doing certain classes of graph walking,
111 	 * to ensure that we check one node only once:
112 	 */
113 	unsigned int			version;
114 
115 	/*
116 	 * Statistics counter:
117 	 */
118 	unsigned long			ops;
119 
120 	const char			*name;
121 	int				name_version;
122 
123 #ifdef CONFIG_LOCK_STAT
124 	unsigned long			contention_point[LOCKSTAT_POINTS];
125 	unsigned long			contending_point[LOCKSTAT_POINTS];
126 #endif
127 };
128 
129 #ifdef CONFIG_LOCK_STAT
130 struct lock_time {
131 	s64				min;
132 	s64				max;
133 	s64				total;
134 	unsigned long			nr;
135 };
136 
137 enum bounce_type {
138 	bounce_acquired_write,
139 	bounce_acquired_read,
140 	bounce_contended_write,
141 	bounce_contended_read,
142 	nr_bounce_types,
143 
144 	bounce_acquired = bounce_acquired_write,
145 	bounce_contended = bounce_contended_write,
146 };
147 
148 struct lock_class_stats {
149 	unsigned long			contention_point[4];
150 	unsigned long			contending_point[4];
151 	struct lock_time		read_waittime;
152 	struct lock_time		write_waittime;
153 	struct lock_time		read_holdtime;
154 	struct lock_time		write_holdtime;
155 	unsigned long			bounces[nr_bounce_types];
156 };
157 
158 struct lock_class_stats lock_stats(struct lock_class *class);
159 void clear_lock_stats(struct lock_class *class);
160 #endif
161 
162 /*
163  * Map the lock object (the lock instance) to the lock-class object.
164  * This is embedded into specific lock instances:
165  */
166 struct lockdep_map {
167 	struct lock_class_key		*key;
168 	struct lock_class		*class_cache;
169 	const char			*name;
170 #ifdef CONFIG_LOCK_STAT
171 	int				cpu;
172 	unsigned long			ip;
173 #endif
174 };
175 
176 /*
177  * Every lock has a list of other locks that were taken after it.
178  * We only grow the list, never remove from it:
179  */
180 struct lock_list {
181 	struct list_head		entry;
182 	struct lock_class		*class;
183 	struct stack_trace		trace;
184 	int				distance;
185 };
186 
187 /*
188  * We record lock dependency chains, so that we can cache them:
189  */
190 struct lock_chain {
191 	u8				irq_context;
192 	u8				depth;
193 	u16				base;
194 	struct list_head		entry;
195 	u64				chain_key;
196 };
197 
198 #define MAX_LOCKDEP_KEYS_BITS		13
199 /*
200  * Subtract one because we offset hlock->class_idx by 1 in order
201  * to make 0 mean no class. This avoids overflowing the class_idx
202  * bitfield and hitting the BUG in hlock_class().
203  */
204 #define MAX_LOCKDEP_KEYS		((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
205 
206 struct held_lock {
207 	/*
208 	 * One-way hash of the dependency chain up to this point. We
209 	 * hash the hashes step by step as the dependency chain grows.
210 	 *
211 	 * We use it for dependency-caching and we skip detection
212 	 * passes and dependency-updates if there is a cache-hit, so
213 	 * it is absolutely critical for 100% coverage of the validator
214 	 * to have a unique key value for every unique dependency path
215 	 * that can occur in the system, to make a unique hash value
216 	 * as likely as possible - hence the 64-bit width.
217 	 *
218 	 * The task struct holds the current hash value (initialized
219 	 * with zero), here we store the previous hash value:
220 	 */
221 	u64				prev_chain_key;
222 	unsigned long			acquire_ip;
223 	struct lockdep_map		*instance;
224 	struct lockdep_map		*nest_lock;
225 #ifdef CONFIG_LOCK_STAT
226 	u64 				waittime_stamp;
227 	u64				holdtime_stamp;
228 #endif
229 	unsigned int			class_idx:MAX_LOCKDEP_KEYS_BITS;
230 	/*
231 	 * The lock-stack is unified in that the lock chains of interrupt
232 	 * contexts nest ontop of process context chains, but we 'separate'
233 	 * the hashes by starting with 0 if we cross into an interrupt
234 	 * context, and we also keep do not add cross-context lock
235 	 * dependencies - the lock usage graph walking covers that area
236 	 * anyway, and we'd just unnecessarily increase the number of
237 	 * dependencies otherwise. [Note: hardirq and softirq contexts
238 	 * are separated from each other too.]
239 	 *
240 	 * The following field is used to detect when we cross into an
241 	 * interrupt context:
242 	 */
243 	unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
244 	unsigned int trylock:1;
245 	unsigned int read:2;        /* see lock_acquire() comment */
246 	unsigned int check:2;       /* see lock_acquire() comment */
247 	unsigned int hardirqs_off:1;
248 };
249 
250 /*
251  * Initialization, self-test and debugging-output methods:
252  */
253 extern void lockdep_init(void);
254 extern void lockdep_info(void);
255 extern void lockdep_reset(void);
256 extern void lockdep_reset_lock(struct lockdep_map *lock);
257 extern void lockdep_free_key_range(void *start, unsigned long size);
258 extern void lockdep_sys_exit(void);
259 
260 extern void lockdep_off(void);
261 extern void lockdep_on(void);
262 
263 /*
264  * These methods are used by specific locking variants (spinlocks,
265  * rwlocks, mutexes and rwsems) to pass init/acquire/release events
266  * to lockdep:
267  */
268 
269 extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
270 			     struct lock_class_key *key, int subclass);
271 
272 /*
273  * To initialize a lockdep_map statically use this macro.
274  * Note that _name must not be NULL.
275  */
276 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
277 	{ .name = (_name), .key = (void *)(_key), }
278 
279 /*
280  * Reinitialize a lock key - for cases where there is special locking or
281  * special initialization of locks so that the validator gets the scope
282  * of dependencies wrong: they are either too broad (they need a class-split)
283  * or they are too narrow (they suffer from a false class-split):
284  */
285 #define lockdep_set_class(lock, key) \
286 		lockdep_init_map(&(lock)->dep_map, #key, key, 0)
287 #define lockdep_set_class_and_name(lock, key, name) \
288 		lockdep_init_map(&(lock)->dep_map, name, key, 0)
289 #define lockdep_set_class_and_subclass(lock, key, sub) \
290 		lockdep_init_map(&(lock)->dep_map, #key, key, sub)
291 #define lockdep_set_subclass(lock, sub)	\
292 		lockdep_init_map(&(lock)->dep_map, #lock, \
293 				 (lock)->dep_map.key, sub)
294 
295 /*
296  * Acquire a lock.
297  *
298  * Values for "read":
299  *
300  *   0: exclusive (write) acquire
301  *   1: read-acquire (no recursion allowed)
302  *   2: read-acquire with same-instance recursion allowed
303  *
304  * Values for check:
305  *
306  *   0: disabled
307  *   1: simple checks (freeing, held-at-exit-time, etc.)
308  *   2: full validation
309  */
310 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
311 			 int trylock, int read, int check,
312 			 struct lockdep_map *nest_lock, unsigned long ip);
313 
314 extern void lock_release(struct lockdep_map *lock, int nested,
315 			 unsigned long ip);
316 
317 extern void lock_set_class(struct lockdep_map *lock, const char *name,
318 			   struct lock_class_key *key, unsigned int subclass,
319 			   unsigned long ip);
320 
lock_set_subclass(struct lockdep_map * lock,unsigned int subclass,unsigned long ip)321 static inline void lock_set_subclass(struct lockdep_map *lock,
322 		unsigned int subclass, unsigned long ip)
323 {
324 	lock_set_class(lock, lock->name, lock->key, subclass, ip);
325 }
326 
327 # define INIT_LOCKDEP				.lockdep_recursion = 0,
328 
329 #define lockdep_depth(tsk)	(debug_locks ? (tsk)->lockdep_depth : 0)
330 
331 #else /* !LOCKDEP */
332 
lockdep_off(void)333 static inline void lockdep_off(void)
334 {
335 }
336 
lockdep_on(void)337 static inline void lockdep_on(void)
338 {
339 }
340 
341 # define lock_acquire(l, s, t, r, c, n, i)	do { } while (0)
342 # define lock_release(l, n, i)			do { } while (0)
343 # define lock_set_class(l, n, k, s, i)		do { } while (0)
344 # define lock_set_subclass(l, s, i)		do { } while (0)
345 # define lockdep_init()				do { } while (0)
346 # define lockdep_info()				do { } while (0)
347 # define lockdep_init_map(lock, name, key, sub) \
348 		do { (void)(name); (void)(key); } while (0)
349 # define lockdep_set_class(lock, key)		do { (void)(key); } while (0)
350 # define lockdep_set_class_and_name(lock, key, name) \
351 		do { (void)(key); (void)(name); } while (0)
352 #define lockdep_set_class_and_subclass(lock, key, sub) \
353 		do { (void)(key); } while (0)
354 #define lockdep_set_subclass(lock, sub)		do { } while (0)
355 
356 # define INIT_LOCKDEP
357 # define lockdep_reset()		do { debug_locks = 1; } while (0)
358 # define lockdep_free_key_range(start, size)	do { } while (0)
359 # define lockdep_sys_exit() 			do { } while (0)
360 /*
361  * The class key takes no space if lockdep is disabled:
362  */
363 struct lock_class_key { };
364 
365 #define lockdep_depth(tsk)	(0)
366 
367 #endif /* !LOCKDEP */
368 
369 #ifdef CONFIG_LOCK_STAT
370 
371 extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
372 extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
373 
374 #define LOCK_CONTENDED(_lock, try, lock)			\
375 do {								\
376 	if (!try(_lock)) {					\
377 		lock_contended(&(_lock)->dep_map, _RET_IP_);	\
378 		lock(_lock);					\
379 	}							\
380 	lock_acquired(&(_lock)->dep_map, _RET_IP_);			\
381 } while (0)
382 
383 #else /* CONFIG_LOCK_STAT */
384 
385 #define lock_contended(lockdep_map, ip) do {} while (0)
386 #define lock_acquired(lockdep_map, ip) do {} while (0)
387 
388 #define LOCK_CONTENDED(_lock, try, lock) \
389 	lock(_lock)
390 
391 #endif /* CONFIG_LOCK_STAT */
392 
393 #ifdef CONFIG_GENERIC_HARDIRQS
394 extern void early_init_irq_lock_class(void);
395 #else
early_init_irq_lock_class(void)396 static inline void early_init_irq_lock_class(void)
397 {
398 }
399 #endif
400 
401 #ifdef CONFIG_TRACE_IRQFLAGS
402 extern void early_boot_irqs_off(void);
403 extern void early_boot_irqs_on(void);
404 extern void print_irqtrace_events(struct task_struct *curr);
405 #else
early_boot_irqs_off(void)406 static inline void early_boot_irqs_off(void)
407 {
408 }
early_boot_irqs_on(void)409 static inline void early_boot_irqs_on(void)
410 {
411 }
print_irqtrace_events(struct task_struct * curr)412 static inline void print_irqtrace_events(struct task_struct *curr)
413 {
414 }
415 #endif
416 
417 /*
418  * For trivial one-depth nesting of a lock-class, the following
419  * global define can be used. (Subsystems with multiple levels
420  * of nesting should define their own lock-nesting subclasses.)
421  */
422 #define SINGLE_DEPTH_NESTING			1
423 
424 /*
425  * Map the dependency ops to NOP or to real lockdep ops, depending
426  * on the per lock-class debug mode:
427  */
428 
429 #ifdef CONFIG_DEBUG_LOCK_ALLOC
430 # ifdef CONFIG_PROVE_LOCKING
431 #  define spin_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, NULL, i)
432 #  define spin_acquire_nest(l, s, t, n, i)	lock_acquire(l, s, t, 0, 2, n, i)
433 # else
434 #  define spin_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, NULL, i)
435 #  define spin_acquire_nest(l, s, t, n, i)	lock_acquire(l, s, t, 0, 1, NULL, i)
436 # endif
437 # define spin_release(l, n, i)			lock_release(l, n, i)
438 #else
439 # define spin_acquire(l, s, t, i)		do { } while (0)
440 # define spin_release(l, n, i)			do { } while (0)
441 #endif
442 
443 #ifdef CONFIG_DEBUG_LOCK_ALLOC
444 # ifdef CONFIG_PROVE_LOCKING
445 #  define rwlock_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, NULL, i)
446 #  define rwlock_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 2, 2, NULL, i)
447 # else
448 #  define rwlock_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, NULL, i)
449 #  define rwlock_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 2, 1, NULL, i)
450 # endif
451 # define rwlock_release(l, n, i)		lock_release(l, n, i)
452 #else
453 # define rwlock_acquire(l, s, t, i)		do { } while (0)
454 # define rwlock_acquire_read(l, s, t, i)	do { } while (0)
455 # define rwlock_release(l, n, i)		do { } while (0)
456 #endif
457 
458 #ifdef CONFIG_DEBUG_LOCK_ALLOC
459 # ifdef CONFIG_PROVE_LOCKING
460 #  define mutex_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, NULL, i)
461 # else
462 #  define mutex_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, NULL, i)
463 # endif
464 # define mutex_release(l, n, i)			lock_release(l, n, i)
465 #else
466 # define mutex_acquire(l, s, t, i)		do { } while (0)
467 # define mutex_release(l, n, i)			do { } while (0)
468 #endif
469 
470 #ifdef CONFIG_DEBUG_LOCK_ALLOC
471 # ifdef CONFIG_PROVE_LOCKING
472 #  define rwsem_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, NULL, i)
473 #  define rwsem_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 1, 2, NULL, i)
474 # else
475 #  define rwsem_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, NULL, i)
476 #  define rwsem_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 1, 1, NULL, i)
477 # endif
478 # define rwsem_release(l, n, i)			lock_release(l, n, i)
479 #else
480 # define rwsem_acquire(l, s, t, i)		do { } while (0)
481 # define rwsem_acquire_read(l, s, t, i)		do { } while (0)
482 # define rwsem_release(l, n, i)			do { } while (0)
483 #endif
484 
485 #ifdef CONFIG_DEBUG_LOCK_ALLOC
486 # ifdef CONFIG_PROVE_LOCKING
487 #  define lock_map_acquire(l)		lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
488 # else
489 #  define lock_map_acquire(l)		lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
490 # endif
491 # define lock_map_release(l)			lock_release(l, 1, _THIS_IP_)
492 #else
493 # define lock_map_acquire(l)			do { } while (0)
494 # define lock_map_release(l)			do { } while (0)
495 #endif
496 
497 #ifdef CONFIG_PROVE_LOCKING
498 # define might_lock(lock) 						\
499 do {									\
500 	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
501 	lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_);	\
502 	lock_release(&(lock)->dep_map, 0, _THIS_IP_);			\
503 } while (0)
504 # define might_lock_read(lock) 						\
505 do {									\
506 	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
507 	lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_);	\
508 	lock_release(&(lock)->dep_map, 0, _THIS_IP_);			\
509 } while (0)
510 #else
511 # define might_lock(lock) do { } while (0)
512 # define might_lock_read(lock) do { } while (0)
513 #endif
514 
515 #endif /* __LINUX_LOCKDEP_H */
516