• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * kernel/lockdep.c
3  *
4  * Runtime locking correctness validator
5  *
6  * Started by Ingo Molnar:
7  *
8  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
10  *
11  * this code maps all the lock dependencies as they occur in a live kernel
12  * and will warn about the following classes of locking bugs:
13  *
14  * - lock inversion scenarios
15  * - circular lock dependencies
16  * - hardirq/softirq safe/unsafe locking bugs
17  *
18  * Bugs are reported even if the current locking scenario does not cause
19  * any deadlock at this point.
20  *
21  * I.e. if anytime in the past two locks were taken in a different order,
22  * even if it happened for another task, even if those were different
23  * locks (but of the same class as this lock), this code will detect it.
24  *
25  * Thanks to Arjan van de Ven for coming up with the initial idea of
26  * mapping lock dependencies runtime.
27  */
28 #define DISABLE_BRANCH_PROFILING
29 #include <linux/mutex.h>
30 #include <linux/sched.h>
31 #include <linux/delay.h>
32 #include <linux/module.h>
33 #include <linux/proc_fs.h>
34 #include <linux/seq_file.h>
35 #include <linux/spinlock.h>
36 #include <linux/kallsyms.h>
37 #include <linux/interrupt.h>
38 #include <linux/stacktrace.h>
39 #include <linux/debug_locks.h>
40 #include <linux/irqflags.h>
41 #include <linux/utsname.h>
42 #include <linux/hash.h>
43 #include <linux/ftrace.h>
44 
45 #include <asm/sections.h>
46 
47 #include "lockdep_internals.h"
48 
49 #ifdef CONFIG_PROVE_LOCKING
50 int prove_locking = 1;
51 module_param(prove_locking, int, 0644);
52 #else
53 #define prove_locking 0
54 #endif
55 
56 #ifdef CONFIG_LOCK_STAT
57 int lock_stat = 1;
58 module_param(lock_stat, int, 0644);
59 #else
60 #define lock_stat 0
61 #endif
62 
63 /*
64  * lockdep_lock: protects the lockdep graph, the hashes and the
65  *               class/list/hash allocators.
66  *
67  * This is one of the rare exceptions where it's justified
68  * to use a raw spinlock - we really dont want the spinlock
69  * code to recurse back into the lockdep code...
70  */
71 static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
72 
graph_lock(void)73 static int graph_lock(void)
74 {
75 	__raw_spin_lock(&lockdep_lock);
76 	/*
77 	 * Make sure that if another CPU detected a bug while
78 	 * walking the graph we dont change it (while the other
79 	 * CPU is busy printing out stuff with the graph lock
80 	 * dropped already)
81 	 */
82 	if (!debug_locks) {
83 		__raw_spin_unlock(&lockdep_lock);
84 		return 0;
85 	}
86 	/* prevent any recursions within lockdep from causing deadlocks */
87 	current->lockdep_recursion++;
88 	return 1;
89 }
90 
graph_unlock(void)91 static inline int graph_unlock(void)
92 {
93 	if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
94 		return DEBUG_LOCKS_WARN_ON(1);
95 
96 	current->lockdep_recursion--;
97 	__raw_spin_unlock(&lockdep_lock);
98 	return 0;
99 }
100 
101 /*
102  * Turn lock debugging off and return with 0 if it was off already,
103  * and also release the graph lock:
104  */
debug_locks_off_graph_unlock(void)105 static inline int debug_locks_off_graph_unlock(void)
106 {
107 	int ret = debug_locks_off();
108 
109 	__raw_spin_unlock(&lockdep_lock);
110 
111 	return ret;
112 }
113 
114 static int lockdep_initialized;
115 
116 unsigned long nr_list_entries;
117 static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
118 
119 /*
120  * All data structures here are protected by the global debug_lock.
121  *
122  * Mutex key structs only get allocated, once during bootup, and never
123  * get freed - this significantly simplifies the debugging code.
124  */
125 unsigned long nr_lock_classes;
126 static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
127 
hlock_class(struct held_lock * hlock)128 static inline struct lock_class *hlock_class(struct held_lock *hlock)
129 {
130 	if (!hlock->class_idx) {
131 		DEBUG_LOCKS_WARN_ON(1);
132 		return NULL;
133 	}
134 	return lock_classes + hlock->class_idx - 1;
135 }
136 
137 #ifdef CONFIG_LOCK_STAT
138 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
139 
lock_point(unsigned long points[],unsigned long ip)140 static int lock_point(unsigned long points[], unsigned long ip)
141 {
142 	int i;
143 
144 	for (i = 0; i < LOCKSTAT_POINTS; i++) {
145 		if (points[i] == 0) {
146 			points[i] = ip;
147 			break;
148 		}
149 		if (points[i] == ip)
150 			break;
151 	}
152 
153 	return i;
154 }
155 
lock_time_inc(struct lock_time * lt,s64 time)156 static void lock_time_inc(struct lock_time *lt, s64 time)
157 {
158 	if (time > lt->max)
159 		lt->max = time;
160 
161 	if (time < lt->min || !lt->min)
162 		lt->min = time;
163 
164 	lt->total += time;
165 	lt->nr++;
166 }
167 
lock_time_add(struct lock_time * src,struct lock_time * dst)168 static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
169 {
170 	dst->min += src->min;
171 	dst->max += src->max;
172 	dst->total += src->total;
173 	dst->nr += src->nr;
174 }
175 
lock_stats(struct lock_class * class)176 struct lock_class_stats lock_stats(struct lock_class *class)
177 {
178 	struct lock_class_stats stats;
179 	int cpu, i;
180 
181 	memset(&stats, 0, sizeof(struct lock_class_stats));
182 	for_each_possible_cpu(cpu) {
183 		struct lock_class_stats *pcs =
184 			&per_cpu(lock_stats, cpu)[class - lock_classes];
185 
186 		for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
187 			stats.contention_point[i] += pcs->contention_point[i];
188 
189 		for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
190 			stats.contending_point[i] += pcs->contending_point[i];
191 
192 		lock_time_add(&pcs->read_waittime, &stats.read_waittime);
193 		lock_time_add(&pcs->write_waittime, &stats.write_waittime);
194 
195 		lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
196 		lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
197 
198 		for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
199 			stats.bounces[i] += pcs->bounces[i];
200 	}
201 
202 	return stats;
203 }
204 
clear_lock_stats(struct lock_class * class)205 void clear_lock_stats(struct lock_class *class)
206 {
207 	int cpu;
208 
209 	for_each_possible_cpu(cpu) {
210 		struct lock_class_stats *cpu_stats =
211 			&per_cpu(lock_stats, cpu)[class - lock_classes];
212 
213 		memset(cpu_stats, 0, sizeof(struct lock_class_stats));
214 	}
215 	memset(class->contention_point, 0, sizeof(class->contention_point));
216 	memset(class->contending_point, 0, sizeof(class->contending_point));
217 }
218 
get_lock_stats(struct lock_class * class)219 static struct lock_class_stats *get_lock_stats(struct lock_class *class)
220 {
221 	return &get_cpu_var(lock_stats)[class - lock_classes];
222 }
223 
put_lock_stats(struct lock_class_stats * stats)224 static void put_lock_stats(struct lock_class_stats *stats)
225 {
226 	put_cpu_var(lock_stats);
227 }
228 
lock_release_holdtime(struct held_lock * hlock)229 static void lock_release_holdtime(struct held_lock *hlock)
230 {
231 	struct lock_class_stats *stats;
232 	s64 holdtime;
233 
234 	if (!lock_stat)
235 		return;
236 
237 	holdtime = sched_clock() - hlock->holdtime_stamp;
238 
239 	stats = get_lock_stats(hlock_class(hlock));
240 	if (hlock->read)
241 		lock_time_inc(&stats->read_holdtime, holdtime);
242 	else
243 		lock_time_inc(&stats->write_holdtime, holdtime);
244 	put_lock_stats(stats);
245 }
246 #else
lock_release_holdtime(struct held_lock * hlock)247 static inline void lock_release_holdtime(struct held_lock *hlock)
248 {
249 }
250 #endif
251 
252 /*
253  * We keep a global list of all lock classes. The list only grows,
254  * never shrinks. The list is only accessed with the lockdep
255  * spinlock lock held.
256  */
257 LIST_HEAD(all_lock_classes);
258 
259 /*
260  * The lockdep classes are in a hash-table as well, for fast lookup:
261  */
262 #define CLASSHASH_BITS		(MAX_LOCKDEP_KEYS_BITS - 1)
263 #define CLASSHASH_SIZE		(1UL << CLASSHASH_BITS)
264 #define __classhashfn(key)	hash_long((unsigned long)key, CLASSHASH_BITS)
265 #define classhashentry(key)	(classhash_table + __classhashfn((key)))
266 
267 static struct list_head classhash_table[CLASSHASH_SIZE];
268 
269 /*
270  * We put the lock dependency chains into a hash-table as well, to cache
271  * their existence:
272  */
273 #define CHAINHASH_BITS		(MAX_LOCKDEP_CHAINS_BITS-1)
274 #define CHAINHASH_SIZE		(1UL << CHAINHASH_BITS)
275 #define __chainhashfn(chain)	hash_long(chain, CHAINHASH_BITS)
276 #define chainhashentry(chain)	(chainhash_table + __chainhashfn((chain)))
277 
278 static struct list_head chainhash_table[CHAINHASH_SIZE];
279 
280 /*
281  * The hash key of the lock dependency chains is a hash itself too:
282  * it's a hash of all locks taken up to that lock, including that lock.
283  * It's a 64-bit hash, because it's important for the keys to be
284  * unique.
285  */
286 #define iterate_chain_key(key1, key2) \
287 	(((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \
288 	((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
289 	(key2))
290 
lockdep_off(void)291 void lockdep_off(void)
292 {
293 	current->lockdep_recursion++;
294 }
295 EXPORT_SYMBOL(lockdep_off);
296 
lockdep_on(void)297 void lockdep_on(void)
298 {
299 	current->lockdep_recursion--;
300 }
301 EXPORT_SYMBOL(lockdep_on);
302 
303 /*
304  * Debugging switches:
305  */
306 
307 #define VERBOSE			0
308 #define VERY_VERBOSE		0
309 
310 #if VERBOSE
311 # define HARDIRQ_VERBOSE	1
312 # define SOFTIRQ_VERBOSE	1
313 #else
314 # define HARDIRQ_VERBOSE	0
315 # define SOFTIRQ_VERBOSE	0
316 #endif
317 
318 #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
319 /*
320  * Quick filtering for interesting events:
321  */
class_filter(struct lock_class * class)322 static int class_filter(struct lock_class *class)
323 {
324 #if 0
325 	/* Example */
326 	if (class->name_version == 1 &&
327 			!strcmp(class->name, "lockname"))
328 		return 1;
329 	if (class->name_version == 1 &&
330 			!strcmp(class->name, "&struct->lockfield"))
331 		return 1;
332 #endif
333 	/* Filter everything else. 1 would be to allow everything else */
334 	return 0;
335 }
336 #endif
337 
verbose(struct lock_class * class)338 static int verbose(struct lock_class *class)
339 {
340 #if VERBOSE
341 	return class_filter(class);
342 #endif
343 	return 0;
344 }
345 
346 /*
347  * Stack-trace: tightly packed array of stack backtrace
348  * addresses. Protected by the graph_lock.
349  */
350 unsigned long nr_stack_trace_entries;
351 static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
352 
save_trace(struct stack_trace * trace)353 static int save_trace(struct stack_trace *trace)
354 {
355 	trace->nr_entries = 0;
356 	trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
357 	trace->entries = stack_trace + nr_stack_trace_entries;
358 
359 	trace->skip = 3;
360 
361 	save_stack_trace(trace);
362 
363 	trace->max_entries = trace->nr_entries;
364 
365 	nr_stack_trace_entries += trace->nr_entries;
366 
367 	if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) {
368 		if (!debug_locks_off_graph_unlock())
369 			return 0;
370 
371 		printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
372 		printk("turning off the locking correctness validator.\n");
373 		dump_stack();
374 
375 		return 0;
376 	}
377 
378 	return 1;
379 }
380 
381 unsigned int nr_hardirq_chains;
382 unsigned int nr_softirq_chains;
383 unsigned int nr_process_chains;
384 unsigned int max_lockdep_depth;
385 unsigned int max_recursion_depth;
386 
387 static unsigned int lockdep_dependency_gen_id;
388 
lockdep_dependency_visit(struct lock_class * source,unsigned int depth)389 static bool lockdep_dependency_visit(struct lock_class *source,
390 				     unsigned int depth)
391 {
392 	if (!depth)
393 		lockdep_dependency_gen_id++;
394 	if (source->dep_gen_id == lockdep_dependency_gen_id)
395 		return true;
396 	source->dep_gen_id = lockdep_dependency_gen_id;
397 	return false;
398 }
399 
400 #ifdef CONFIG_DEBUG_LOCKDEP
401 /*
402  * We cannot printk in early bootup code. Not even early_printk()
403  * might work. So we mark any initialization errors and printk
404  * about it later on, in lockdep_info().
405  */
406 static int lockdep_init_error;
407 static unsigned long lockdep_init_trace_data[20];
408 static struct stack_trace lockdep_init_trace = {
409 	.max_entries = ARRAY_SIZE(lockdep_init_trace_data),
410 	.entries = lockdep_init_trace_data,
411 };
412 
413 /*
414  * Various lockdep statistics:
415  */
416 atomic_t chain_lookup_hits;
417 atomic_t chain_lookup_misses;
418 atomic_t hardirqs_on_events;
419 atomic_t hardirqs_off_events;
420 atomic_t redundant_hardirqs_on;
421 atomic_t redundant_hardirqs_off;
422 atomic_t softirqs_on_events;
423 atomic_t softirqs_off_events;
424 atomic_t redundant_softirqs_on;
425 atomic_t redundant_softirqs_off;
426 atomic_t nr_unused_locks;
427 atomic_t nr_cyclic_checks;
428 atomic_t nr_cyclic_check_recursions;
429 atomic_t nr_find_usage_forwards_checks;
430 atomic_t nr_find_usage_forwards_recursions;
431 atomic_t nr_find_usage_backwards_checks;
432 atomic_t nr_find_usage_backwards_recursions;
433 # define debug_atomic_inc(ptr)		atomic_inc(ptr)
434 # define debug_atomic_dec(ptr)		atomic_dec(ptr)
435 # define debug_atomic_read(ptr)		atomic_read(ptr)
436 #else
437 # define debug_atomic_inc(ptr)		do { } while (0)
438 # define debug_atomic_dec(ptr)		do { } while (0)
439 # define debug_atomic_read(ptr)		0
440 #endif
441 
442 /*
443  * Locking printouts:
444  */
445 
446 static const char *usage_str[] =
447 {
448 	[LOCK_USED] =			"initial-use ",
449 	[LOCK_USED_IN_HARDIRQ] =	"in-hardirq-W",
450 	[LOCK_USED_IN_SOFTIRQ] =	"in-softirq-W",
451 	[LOCK_ENABLED_SOFTIRQS] =	"softirq-on-W",
452 	[LOCK_ENABLED_HARDIRQS] =	"hardirq-on-W",
453 	[LOCK_USED_IN_HARDIRQ_READ] =	"in-hardirq-R",
454 	[LOCK_USED_IN_SOFTIRQ_READ] =	"in-softirq-R",
455 	[LOCK_ENABLED_SOFTIRQS_READ] =	"softirq-on-R",
456 	[LOCK_ENABLED_HARDIRQS_READ] =	"hardirq-on-R",
457 };
458 
__get_key_name(struct lockdep_subclass_key * key,char * str)459 const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
460 {
461 	return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
462 }
463 
464 void
get_usage_chars(struct lock_class * class,char * c1,char * c2,char * c3,char * c4)465 get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4)
466 {
467 	*c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.';
468 
469 	if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
470 		*c1 = '+';
471 	else
472 		if (class->usage_mask & LOCKF_ENABLED_HARDIRQS)
473 			*c1 = '-';
474 
475 	if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
476 		*c2 = '+';
477 	else
478 		if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS)
479 			*c2 = '-';
480 
481 	if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
482 		*c3 = '-';
483 	if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) {
484 		*c3 = '+';
485 		if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
486 			*c3 = '?';
487 	}
488 
489 	if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
490 		*c4 = '-';
491 	if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) {
492 		*c4 = '+';
493 		if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
494 			*c4 = '?';
495 	}
496 }
497 
print_lock_name(struct lock_class * class)498 static void print_lock_name(struct lock_class *class)
499 {
500 	char str[KSYM_NAME_LEN], c1, c2, c3, c4;
501 	const char *name;
502 
503 	get_usage_chars(class, &c1, &c2, &c3, &c4);
504 
505 	name = class->name;
506 	if (!name) {
507 		name = __get_key_name(class->key, str);
508 		printk(" (%s", name);
509 	} else {
510 		printk(" (%s", name);
511 		if (class->name_version > 1)
512 			printk("#%d", class->name_version);
513 		if (class->subclass)
514 			printk("/%d", class->subclass);
515 	}
516 	printk("){%c%c%c%c}", c1, c2, c3, c4);
517 }
518 
print_lockdep_cache(struct lockdep_map * lock)519 static void print_lockdep_cache(struct lockdep_map *lock)
520 {
521 	const char *name;
522 	char str[KSYM_NAME_LEN];
523 
524 	name = lock->name;
525 	if (!name)
526 		name = __get_key_name(lock->key->subkeys, str);
527 
528 	printk("%s", name);
529 }
530 
print_lock(struct held_lock * hlock)531 static void print_lock(struct held_lock *hlock)
532 {
533 	print_lock_name(hlock_class(hlock));
534 	printk(", at: ");
535 	print_ip_sym(hlock->acquire_ip);
536 }
537 
lockdep_print_held_locks(struct task_struct * curr)538 static void lockdep_print_held_locks(struct task_struct *curr)
539 {
540 	int i, depth = curr->lockdep_depth;
541 
542 	if (!depth) {
543 		printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
544 		return;
545 	}
546 	printk("%d lock%s held by %s/%d:\n",
547 		depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
548 
549 	for (i = 0; i < depth; i++) {
550 		printk(" #%d: ", i);
551 		print_lock(curr->held_locks + i);
552 	}
553 }
554 
print_lock_class_header(struct lock_class * class,int depth)555 static void print_lock_class_header(struct lock_class *class, int depth)
556 {
557 	int bit;
558 
559 	printk("%*s->", depth, "");
560 	print_lock_name(class);
561 	printk(" ops: %lu", class->ops);
562 	printk(" {\n");
563 
564 	for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
565 		if (class->usage_mask & (1 << bit)) {
566 			int len = depth;
567 
568 			len += printk("%*s   %s", depth, "", usage_str[bit]);
569 			len += printk(" at:\n");
570 			print_stack_trace(class->usage_traces + bit, len);
571 		}
572 	}
573 	printk("%*s }\n", depth, "");
574 
575 	printk("%*s ... key      at: ",depth,"");
576 	print_ip_sym((unsigned long)class->key);
577 }
578 
579 /*
580  * printk all lock dependencies starting at <entry>:
581  */
582 static void __used
print_lock_dependencies(struct lock_class * class,int depth)583 print_lock_dependencies(struct lock_class *class, int depth)
584 {
585 	struct lock_list *entry;
586 
587 	if (lockdep_dependency_visit(class, depth))
588 		return;
589 
590 	if (DEBUG_LOCKS_WARN_ON(depth >= 20))
591 		return;
592 
593 	print_lock_class_header(class, depth);
594 
595 	list_for_each_entry(entry, &class->locks_after, entry) {
596 		if (DEBUG_LOCKS_WARN_ON(!entry->class))
597 			return;
598 
599 		print_lock_dependencies(entry->class, depth + 1);
600 
601 		printk("%*s ... acquired at:\n",depth,"");
602 		print_stack_trace(&entry->trace, 2);
603 		printk("\n");
604 	}
605 }
606 
print_kernel_version(void)607 static void print_kernel_version(void)
608 {
609 	printk("%s %.*s\n", init_utsname()->release,
610 		(int)strcspn(init_utsname()->version, " "),
611 		init_utsname()->version);
612 }
613 
very_verbose(struct lock_class * class)614 static int very_verbose(struct lock_class *class)
615 {
616 #if VERY_VERBOSE
617 	return class_filter(class);
618 #endif
619 	return 0;
620 }
621 
622 /*
623  * Is this the address of a static object:
624  */
static_obj(void * obj)625 static int static_obj(void *obj)
626 {
627 	unsigned long start = (unsigned long) &_stext,
628 		      end   = (unsigned long) &_end,
629 		      addr  = (unsigned long) obj;
630 #ifdef CONFIG_SMP
631 	int i;
632 #endif
633 
634 	/*
635 	 * static variable?
636 	 */
637 	if ((addr >= start) && (addr < end))
638 		return 1;
639 
640 #ifdef CONFIG_SMP
641 	/*
642 	 * percpu var?
643 	 */
644 	for_each_possible_cpu(i) {
645 		start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
646 		end   = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
647 					+ per_cpu_offset(i);
648 
649 		if ((addr >= start) && (addr < end))
650 			return 1;
651 	}
652 #endif
653 
654 	/*
655 	 * module var?
656 	 */
657 	return is_module_address(addr);
658 }
659 
660 /*
661  * To make lock name printouts unique, we calculate a unique
662  * class->name_version generation counter:
663  */
count_matching_names(struct lock_class * new_class)664 static int count_matching_names(struct lock_class *new_class)
665 {
666 	struct lock_class *class;
667 	int count = 0;
668 
669 	if (!new_class->name)
670 		return 0;
671 
672 	list_for_each_entry(class, &all_lock_classes, lock_entry) {
673 		if (new_class->key - new_class->subclass == class->key)
674 			return class->name_version;
675 		if (class->name && !strcmp(class->name, new_class->name))
676 			count = max(count, class->name_version);
677 	}
678 
679 	return count + 1;
680 }
681 
682 /*
683  * Register a lock's class in the hash-table, if the class is not present
684  * yet. Otherwise we look it up. We cache the result in the lock object
685  * itself, so actual lookup of the hash should be once per lock object.
686  */
687 static inline struct lock_class *
look_up_lock_class(struct lockdep_map * lock,unsigned int subclass)688 look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
689 {
690 	struct lockdep_subclass_key *key;
691 	struct list_head *hash_head;
692 	struct lock_class *class;
693 
694 #ifdef CONFIG_DEBUG_LOCKDEP
695 	/*
696 	 * If the architecture calls into lockdep before initializing
697 	 * the hashes then we'll warn about it later. (we cannot printk
698 	 * right now)
699 	 */
700 	if (unlikely(!lockdep_initialized)) {
701 		lockdep_init();
702 		lockdep_init_error = 1;
703 		save_stack_trace(&lockdep_init_trace);
704 	}
705 #endif
706 
707 	/*
708 	 * Static locks do not have their class-keys yet - for them the key
709 	 * is the lock object itself:
710 	 */
711 	if (unlikely(!lock->key))
712 		lock->key = (void *)lock;
713 
714 	/*
715 	 * NOTE: the class-key must be unique. For dynamic locks, a static
716 	 * lock_class_key variable is passed in through the mutex_init()
717 	 * (or spin_lock_init()) call - which acts as the key. For static
718 	 * locks we use the lock object itself as the key.
719 	 */
720 	BUILD_BUG_ON(sizeof(struct lock_class_key) >
721 			sizeof(struct lockdep_map));
722 
723 	key = lock->key->subkeys + subclass;
724 
725 	hash_head = classhashentry(key);
726 
727 	/*
728 	 * We can walk the hash lockfree, because the hash only
729 	 * grows, and we are careful when adding entries to the end:
730 	 */
731 	list_for_each_entry(class, hash_head, hash_entry) {
732 		if (class->key == key) {
733 			WARN_ON_ONCE(class->name != lock->name);
734 			return class;
735 		}
736 	}
737 
738 	return NULL;
739 }
740 
741 /*
742  * Register a lock's class in the hash-table, if the class is not present
743  * yet. Otherwise we look it up. We cache the result in the lock object
744  * itself, so actual lookup of the hash should be once per lock object.
745  */
746 static inline struct lock_class *
register_lock_class(struct lockdep_map * lock,unsigned int subclass,int force)747 register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
748 {
749 	struct lockdep_subclass_key *key;
750 	struct list_head *hash_head;
751 	struct lock_class *class;
752 	unsigned long flags;
753 
754 	class = look_up_lock_class(lock, subclass);
755 	if (likely(class))
756 		return class;
757 
758 	/*
759 	 * Debug-check: all keys must be persistent!
760  	 */
761 	if (!static_obj(lock->key)) {
762 		debug_locks_off();
763 		printk("INFO: trying to register non-static key.\n");
764 		printk("the code is fine but needs lockdep annotation.\n");
765 		printk("turning off the locking correctness validator.\n");
766 		dump_stack();
767 
768 		return NULL;
769 	}
770 
771 	key = lock->key->subkeys + subclass;
772 	hash_head = classhashentry(key);
773 
774 	raw_local_irq_save(flags);
775 	if (!graph_lock()) {
776 		raw_local_irq_restore(flags);
777 		return NULL;
778 	}
779 	/*
780 	 * We have to do the hash-walk again, to avoid races
781 	 * with another CPU:
782 	 */
783 	list_for_each_entry(class, hash_head, hash_entry)
784 		if (class->key == key)
785 			goto out_unlock_set;
786 	/*
787 	 * Allocate a new key from the static array, and add it to
788 	 * the hash:
789 	 */
790 	if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
791 		if (!debug_locks_off_graph_unlock()) {
792 			raw_local_irq_restore(flags);
793 			return NULL;
794 		}
795 		raw_local_irq_restore(flags);
796 
797 		printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
798 		printk("turning off the locking correctness validator.\n");
799 		return NULL;
800 	}
801 	class = lock_classes + nr_lock_classes++;
802 	debug_atomic_inc(&nr_unused_locks);
803 	class->key = key;
804 	class->name = lock->name;
805 	class->subclass = subclass;
806 	INIT_LIST_HEAD(&class->lock_entry);
807 	INIT_LIST_HEAD(&class->locks_before);
808 	INIT_LIST_HEAD(&class->locks_after);
809 	class->name_version = count_matching_names(class);
810 	/*
811 	 * We use RCU's safe list-add method to make
812 	 * parallel walking of the hash-list safe:
813 	 */
814 	list_add_tail_rcu(&class->hash_entry, hash_head);
815 	/*
816 	 * Add it to the global list of classes:
817 	 */
818 	list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
819 
820 	if (verbose(class)) {
821 		graph_unlock();
822 		raw_local_irq_restore(flags);
823 
824 		printk("\nnew class %p: %s", class->key, class->name);
825 		if (class->name_version > 1)
826 			printk("#%d", class->name_version);
827 		printk("\n");
828 		dump_stack();
829 
830 		raw_local_irq_save(flags);
831 		if (!graph_lock()) {
832 			raw_local_irq_restore(flags);
833 			return NULL;
834 		}
835 	}
836 out_unlock_set:
837 	graph_unlock();
838 	raw_local_irq_restore(flags);
839 
840 	if (!subclass || force)
841 		lock->class_cache = class;
842 
843 	if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
844 		return NULL;
845 
846 	return class;
847 }
848 
849 #ifdef CONFIG_PROVE_LOCKING
850 /*
851  * Allocate a lockdep entry. (assumes the graph_lock held, returns
852  * with NULL on failure)
853  */
alloc_list_entry(void)854 static struct lock_list *alloc_list_entry(void)
855 {
856 	if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
857 		if (!debug_locks_off_graph_unlock())
858 			return NULL;
859 
860 		printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
861 		printk("turning off the locking correctness validator.\n");
862 		return NULL;
863 	}
864 	return list_entries + nr_list_entries++;
865 }
866 
867 /*
868  * Add a new dependency to the head of the list:
869  */
add_lock_to_list(struct lock_class * class,struct lock_class * this,struct list_head * head,unsigned long ip,int distance)870 static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
871 			    struct list_head *head, unsigned long ip, int distance)
872 {
873 	struct lock_list *entry;
874 	/*
875 	 * Lock not present yet - get a new dependency struct and
876 	 * add it to the list:
877 	 */
878 	entry = alloc_list_entry();
879 	if (!entry)
880 		return 0;
881 
882 	if (!save_trace(&entry->trace))
883 		return 0;
884 
885 	entry->class = this;
886 	entry->distance = distance;
887 	/*
888 	 * Since we never remove from the dependency list, the list can
889 	 * be walked lockless by other CPUs, it's only allocation
890 	 * that must be protected by the spinlock. But this also means
891 	 * we must make new entries visible only once writes to the
892 	 * entry become visible - hence the RCU op:
893 	 */
894 	list_add_tail_rcu(&entry->entry, head);
895 
896 	return 1;
897 }
898 
899 /*
900  * Recursive, forwards-direction lock-dependency checking, used for
901  * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
902  * checking.
903  *
904  * (to keep the stackframe of the recursive functions small we
905  *  use these global variables, and we also mark various helper
906  *  functions as noinline.)
907  */
908 static struct held_lock *check_source, *check_target;
909 
910 /*
911  * Print a dependency chain entry (this is only done when a deadlock
912  * has been detected):
913  */
914 static noinline int
print_circular_bug_entry(struct lock_list * target,unsigned int depth)915 print_circular_bug_entry(struct lock_list *target, unsigned int depth)
916 {
917 	if (debug_locks_silent)
918 		return 0;
919 	printk("\n-> #%u", depth);
920 	print_lock_name(target->class);
921 	printk(":\n");
922 	print_stack_trace(&target->trace, 6);
923 
924 	return 0;
925 }
926 
927 /*
928  * When a circular dependency is detected, print the
929  * header first:
930  */
931 static noinline int
print_circular_bug_header(struct lock_list * entry,unsigned int depth)932 print_circular_bug_header(struct lock_list *entry, unsigned int depth)
933 {
934 	struct task_struct *curr = current;
935 
936 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
937 		return 0;
938 
939 	printk("\n=======================================================\n");
940 	printk(  "[ INFO: possible circular locking dependency detected ]\n");
941 	print_kernel_version();
942 	printk(  "-------------------------------------------------------\n");
943 	printk("%s/%d is trying to acquire lock:\n",
944 		curr->comm, task_pid_nr(curr));
945 	print_lock(check_source);
946 	printk("\nbut task is already holding lock:\n");
947 	print_lock(check_target);
948 	printk("\nwhich lock already depends on the new lock.\n\n");
949 	printk("\nthe existing dependency chain (in reverse order) is:\n");
950 
951 	print_circular_bug_entry(entry, depth);
952 
953 	return 0;
954 }
955 
print_circular_bug_tail(void)956 static noinline int print_circular_bug_tail(void)
957 {
958 	struct task_struct *curr = current;
959 	struct lock_list this;
960 
961 	if (debug_locks_silent)
962 		return 0;
963 
964 	this.class = hlock_class(check_source);
965 	if (!save_trace(&this.trace))
966 		return 0;
967 
968 	print_circular_bug_entry(&this, 0);
969 
970 	printk("\nother info that might help us debug this:\n\n");
971 	lockdep_print_held_locks(curr);
972 
973 	printk("\nstack backtrace:\n");
974 	dump_stack();
975 
976 	return 0;
977 }
978 
979 #define RECURSION_LIMIT 40
980 
print_infinite_recursion_bug(void)981 static int noinline print_infinite_recursion_bug(void)
982 {
983 	if (!debug_locks_off_graph_unlock())
984 		return 0;
985 
986 	WARN_ON(1);
987 
988 	return 0;
989 }
990 
__lockdep_count_forward_deps(struct lock_class * class,unsigned int depth)991 unsigned long __lockdep_count_forward_deps(struct lock_class *class,
992 					   unsigned int depth)
993 {
994 	struct lock_list *entry;
995 	unsigned long ret = 1;
996 
997 	if (lockdep_dependency_visit(class, depth))
998 		return 0;
999 
1000 	/*
1001 	 * Recurse this class's dependency list:
1002 	 */
1003 	list_for_each_entry(entry, &class->locks_after, entry)
1004 		ret += __lockdep_count_forward_deps(entry->class, depth + 1);
1005 
1006 	return ret;
1007 }
1008 
lockdep_count_forward_deps(struct lock_class * class)1009 unsigned long lockdep_count_forward_deps(struct lock_class *class)
1010 {
1011 	unsigned long ret, flags;
1012 
1013 	local_irq_save(flags);
1014 	__raw_spin_lock(&lockdep_lock);
1015 	ret = __lockdep_count_forward_deps(class, 0);
1016 	__raw_spin_unlock(&lockdep_lock);
1017 	local_irq_restore(flags);
1018 
1019 	return ret;
1020 }
1021 
__lockdep_count_backward_deps(struct lock_class * class,unsigned int depth)1022 unsigned long __lockdep_count_backward_deps(struct lock_class *class,
1023 					    unsigned int depth)
1024 {
1025 	struct lock_list *entry;
1026 	unsigned long ret = 1;
1027 
1028 	if (lockdep_dependency_visit(class, depth))
1029 		return 0;
1030 	/*
1031 	 * Recurse this class's dependency list:
1032 	 */
1033 	list_for_each_entry(entry, &class->locks_before, entry)
1034 		ret += __lockdep_count_backward_deps(entry->class, depth + 1);
1035 
1036 	return ret;
1037 }
1038 
lockdep_count_backward_deps(struct lock_class * class)1039 unsigned long lockdep_count_backward_deps(struct lock_class *class)
1040 {
1041 	unsigned long ret, flags;
1042 
1043 	local_irq_save(flags);
1044 	__raw_spin_lock(&lockdep_lock);
1045 	ret = __lockdep_count_backward_deps(class, 0);
1046 	__raw_spin_unlock(&lockdep_lock);
1047 	local_irq_restore(flags);
1048 
1049 	return ret;
1050 }
1051 
1052 /*
1053  * Prove that the dependency graph starting at <entry> can not
1054  * lead to <target>. Print an error and return 0 if it does.
1055  */
1056 static noinline int
check_noncircular(struct lock_class * source,unsigned int depth)1057 check_noncircular(struct lock_class *source, unsigned int depth)
1058 {
1059 	struct lock_list *entry;
1060 
1061 	if (lockdep_dependency_visit(source, depth))
1062 		return 1;
1063 
1064 	debug_atomic_inc(&nr_cyclic_check_recursions);
1065 	if (depth > max_recursion_depth)
1066 		max_recursion_depth = depth;
1067 	if (depth >= RECURSION_LIMIT)
1068 		return print_infinite_recursion_bug();
1069 	/*
1070 	 * Check this lock's dependency list:
1071 	 */
1072 	list_for_each_entry(entry, &source->locks_after, entry) {
1073 		if (entry->class == hlock_class(check_target))
1074 			return print_circular_bug_header(entry, depth+1);
1075 		debug_atomic_inc(&nr_cyclic_checks);
1076 		if (!check_noncircular(entry->class, depth+1))
1077 			return print_circular_bug_entry(entry, depth+1);
1078 	}
1079 	return 1;
1080 }
1081 
1082 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1083 /*
1084  * Forwards and backwards subgraph searching, for the purposes of
1085  * proving that two subgraphs can be connected by a new dependency
1086  * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1087  */
1088 static enum lock_usage_bit find_usage_bit;
1089 static struct lock_class *forwards_match, *backwards_match;
1090 
1091 /*
1092  * Find a node in the forwards-direction dependency sub-graph starting
1093  * at <source> that matches <find_usage_bit>.
1094  *
1095  * Return 2 if such a node exists in the subgraph, and put that node
1096  * into <forwards_match>.
1097  *
1098  * Return 1 otherwise and keep <forwards_match> unchanged.
1099  * Return 0 on error.
1100  */
1101 static noinline int
find_usage_forwards(struct lock_class * source,unsigned int depth)1102 find_usage_forwards(struct lock_class *source, unsigned int depth)
1103 {
1104 	struct lock_list *entry;
1105 	int ret;
1106 
1107 	if (lockdep_dependency_visit(source, depth))
1108 		return 1;
1109 
1110 	if (depth > max_recursion_depth)
1111 		max_recursion_depth = depth;
1112 	if (depth >= RECURSION_LIMIT)
1113 		return print_infinite_recursion_bug();
1114 
1115 	debug_atomic_inc(&nr_find_usage_forwards_checks);
1116 	if (source->usage_mask & (1 << find_usage_bit)) {
1117 		forwards_match = source;
1118 		return 2;
1119 	}
1120 
1121 	/*
1122 	 * Check this lock's dependency list:
1123 	 */
1124 	list_for_each_entry(entry, &source->locks_after, entry) {
1125 		debug_atomic_inc(&nr_find_usage_forwards_recursions);
1126 		ret = find_usage_forwards(entry->class, depth+1);
1127 		if (ret == 2 || ret == 0)
1128 			return ret;
1129 	}
1130 	return 1;
1131 }
1132 
1133 /*
1134  * Find a node in the backwards-direction dependency sub-graph starting
1135  * at <source> that matches <find_usage_bit>.
1136  *
1137  * Return 2 if such a node exists in the subgraph, and put that node
1138  * into <backwards_match>.
1139  *
1140  * Return 1 otherwise and keep <backwards_match> unchanged.
1141  * Return 0 on error.
1142  */
1143 static noinline int
find_usage_backwards(struct lock_class * source,unsigned int depth)1144 find_usage_backwards(struct lock_class *source, unsigned int depth)
1145 {
1146 	struct lock_list *entry;
1147 	int ret;
1148 
1149 	if (lockdep_dependency_visit(source, depth))
1150 		return 1;
1151 
1152 	if (!__raw_spin_is_locked(&lockdep_lock))
1153 		return DEBUG_LOCKS_WARN_ON(1);
1154 
1155 	if (depth > max_recursion_depth)
1156 		max_recursion_depth = depth;
1157 	if (depth >= RECURSION_LIMIT)
1158 		return print_infinite_recursion_bug();
1159 
1160 	debug_atomic_inc(&nr_find_usage_backwards_checks);
1161 	if (source->usage_mask & (1 << find_usage_bit)) {
1162 		backwards_match = source;
1163 		return 2;
1164 	}
1165 
1166 	if (!source && debug_locks_off_graph_unlock()) {
1167 		WARN_ON(1);
1168 		return 0;
1169 	}
1170 
1171 	/*
1172 	 * Check this lock's dependency list:
1173 	 */
1174 	list_for_each_entry(entry, &source->locks_before, entry) {
1175 		debug_atomic_inc(&nr_find_usage_backwards_recursions);
1176 		ret = find_usage_backwards(entry->class, depth+1);
1177 		if (ret == 2 || ret == 0)
1178 			return ret;
1179 	}
1180 	return 1;
1181 }
1182 
1183 static int
print_bad_irq_dependency(struct task_struct * curr,struct held_lock * prev,struct held_lock * next,enum lock_usage_bit bit1,enum lock_usage_bit bit2,const char * irqclass)1184 print_bad_irq_dependency(struct task_struct *curr,
1185 			 struct held_lock *prev,
1186 			 struct held_lock *next,
1187 			 enum lock_usage_bit bit1,
1188 			 enum lock_usage_bit bit2,
1189 			 const char *irqclass)
1190 {
1191 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1192 		return 0;
1193 
1194 	printk("\n======================================================\n");
1195 	printk(  "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
1196 		irqclass, irqclass);
1197 	print_kernel_version();
1198 	printk(  "------------------------------------------------------\n");
1199 	printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1200 		curr->comm, task_pid_nr(curr),
1201 		curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
1202 		curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
1203 		curr->hardirqs_enabled,
1204 		curr->softirqs_enabled);
1205 	print_lock(next);
1206 
1207 	printk("\nand this task is already holding:\n");
1208 	print_lock(prev);
1209 	printk("which would create a new lock dependency:\n");
1210 	print_lock_name(hlock_class(prev));
1211 	printk(" ->");
1212 	print_lock_name(hlock_class(next));
1213 	printk("\n");
1214 
1215 	printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
1216 		irqclass);
1217 	print_lock_name(backwards_match);
1218 	printk("\n... which became %s-irq-safe at:\n", irqclass);
1219 
1220 	print_stack_trace(backwards_match->usage_traces + bit1, 1);
1221 
1222 	printk("\nto a %s-irq-unsafe lock:\n", irqclass);
1223 	print_lock_name(forwards_match);
1224 	printk("\n... which became %s-irq-unsafe at:\n", irqclass);
1225 	printk("...");
1226 
1227 	print_stack_trace(forwards_match->usage_traces + bit2, 1);
1228 
1229 	printk("\nother info that might help us debug this:\n\n");
1230 	lockdep_print_held_locks(curr);
1231 
1232 	printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass);
1233 	print_lock_dependencies(backwards_match, 0);
1234 
1235 	printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass);
1236 	print_lock_dependencies(forwards_match, 0);
1237 
1238 	printk("\nstack backtrace:\n");
1239 	dump_stack();
1240 
1241 	return 0;
1242 }
1243 
1244 static int
check_usage(struct task_struct * curr,struct held_lock * prev,struct held_lock * next,enum lock_usage_bit bit_backwards,enum lock_usage_bit bit_forwards,const char * irqclass)1245 check_usage(struct task_struct *curr, struct held_lock *prev,
1246 	    struct held_lock *next, enum lock_usage_bit bit_backwards,
1247 	    enum lock_usage_bit bit_forwards, const char *irqclass)
1248 {
1249 	int ret;
1250 
1251 	find_usage_bit = bit_backwards;
1252 	/* fills in <backwards_match> */
1253 	ret = find_usage_backwards(hlock_class(prev), 0);
1254 	if (!ret || ret == 1)
1255 		return ret;
1256 
1257 	find_usage_bit = bit_forwards;
1258 	ret = find_usage_forwards(hlock_class(next), 0);
1259 	if (!ret || ret == 1)
1260 		return ret;
1261 	/* ret == 2 */
1262 	return print_bad_irq_dependency(curr, prev, next,
1263 			bit_backwards, bit_forwards, irqclass);
1264 }
1265 
1266 static int
check_prev_add_irq(struct task_struct * curr,struct held_lock * prev,struct held_lock * next)1267 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1268 		struct held_lock *next)
1269 {
1270 	/*
1271 	 * Prove that the new dependency does not connect a hardirq-safe
1272 	 * lock with a hardirq-unsafe lock - to achieve this we search
1273 	 * the backwards-subgraph starting at <prev>, and the
1274 	 * forwards-subgraph starting at <next>:
1275 	 */
1276 	if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ,
1277 					LOCK_ENABLED_HARDIRQS, "hard"))
1278 		return 0;
1279 
1280 	/*
1281 	 * Prove that the new dependency does not connect a hardirq-safe-read
1282 	 * lock with a hardirq-unsafe lock - to achieve this we search
1283 	 * the backwards-subgraph starting at <prev>, and the
1284 	 * forwards-subgraph starting at <next>:
1285 	 */
1286 	if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ,
1287 					LOCK_ENABLED_HARDIRQS, "hard-read"))
1288 		return 0;
1289 
1290 	/*
1291 	 * Prove that the new dependency does not connect a softirq-safe
1292 	 * lock with a softirq-unsafe lock - to achieve this we search
1293 	 * the backwards-subgraph starting at <prev>, and the
1294 	 * forwards-subgraph starting at <next>:
1295 	 */
1296 	if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ,
1297 					LOCK_ENABLED_SOFTIRQS, "soft"))
1298 		return 0;
1299 	/*
1300 	 * Prove that the new dependency does not connect a softirq-safe-read
1301 	 * lock with a softirq-unsafe lock - to achieve this we search
1302 	 * the backwards-subgraph starting at <prev>, and the
1303 	 * forwards-subgraph starting at <next>:
1304 	 */
1305 	if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ,
1306 					LOCK_ENABLED_SOFTIRQS, "soft"))
1307 		return 0;
1308 
1309 	return 1;
1310 }
1311 
inc_chains(void)1312 static void inc_chains(void)
1313 {
1314 	if (current->hardirq_context)
1315 		nr_hardirq_chains++;
1316 	else {
1317 		if (current->softirq_context)
1318 			nr_softirq_chains++;
1319 		else
1320 			nr_process_chains++;
1321 	}
1322 }
1323 
1324 #else
1325 
1326 static inline int
check_prev_add_irq(struct task_struct * curr,struct held_lock * prev,struct held_lock * next)1327 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1328 		struct held_lock *next)
1329 {
1330 	return 1;
1331 }
1332 
inc_chains(void)1333 static inline void inc_chains(void)
1334 {
1335 	nr_process_chains++;
1336 }
1337 
1338 #endif
1339 
1340 static int
print_deadlock_bug(struct task_struct * curr,struct held_lock * prev,struct held_lock * next)1341 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1342 		   struct held_lock *next)
1343 {
1344 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1345 		return 0;
1346 
1347 	printk("\n=============================================\n");
1348 	printk(  "[ INFO: possible recursive locking detected ]\n");
1349 	print_kernel_version();
1350 	printk(  "---------------------------------------------\n");
1351 	printk("%s/%d is trying to acquire lock:\n",
1352 		curr->comm, task_pid_nr(curr));
1353 	print_lock(next);
1354 	printk("\nbut task is already holding lock:\n");
1355 	print_lock(prev);
1356 
1357 	printk("\nother info that might help us debug this:\n");
1358 	lockdep_print_held_locks(curr);
1359 
1360 	printk("\nstack backtrace:\n");
1361 	dump_stack();
1362 
1363 	return 0;
1364 }
1365 
1366 /*
1367  * Check whether we are holding such a class already.
1368  *
1369  * (Note that this has to be done separately, because the graph cannot
1370  * detect such classes of deadlocks.)
1371  *
1372  * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
1373  */
1374 static int
check_deadlock(struct task_struct * curr,struct held_lock * next,struct lockdep_map * next_instance,int read)1375 check_deadlock(struct task_struct *curr, struct held_lock *next,
1376 	       struct lockdep_map *next_instance, int read)
1377 {
1378 	struct held_lock *prev;
1379 	struct held_lock *nest = NULL;
1380 	int i;
1381 
1382 	for (i = 0; i < curr->lockdep_depth; i++) {
1383 		prev = curr->held_locks + i;
1384 
1385 		if (prev->instance == next->nest_lock)
1386 			nest = prev;
1387 
1388 		if (hlock_class(prev) != hlock_class(next))
1389 			continue;
1390 
1391 		/*
1392 		 * Allow read-after-read recursion of the same
1393 		 * lock class (i.e. read_lock(lock)+read_lock(lock)):
1394 		 */
1395 		if ((read == 2) && prev->read)
1396 			return 2;
1397 
1398 		/*
1399 		 * We're holding the nest_lock, which serializes this lock's
1400 		 * nesting behaviour.
1401 		 */
1402 		if (nest)
1403 			return 2;
1404 
1405 		return print_deadlock_bug(curr, prev, next);
1406 	}
1407 	return 1;
1408 }
1409 
1410 /*
1411  * There was a chain-cache miss, and we are about to add a new dependency
1412  * to a previous lock. We recursively validate the following rules:
1413  *
1414  *  - would the adding of the <prev> -> <next> dependency create a
1415  *    circular dependency in the graph? [== circular deadlock]
1416  *
1417  *  - does the new prev->next dependency connect any hardirq-safe lock
1418  *    (in the full backwards-subgraph starting at <prev>) with any
1419  *    hardirq-unsafe lock (in the full forwards-subgraph starting at
1420  *    <next>)? [== illegal lock inversion with hardirq contexts]
1421  *
1422  *  - does the new prev->next dependency connect any softirq-safe lock
1423  *    (in the full backwards-subgraph starting at <prev>) with any
1424  *    softirq-unsafe lock (in the full forwards-subgraph starting at
1425  *    <next>)? [== illegal lock inversion with softirq contexts]
1426  *
1427  * any of these scenarios could lead to a deadlock.
1428  *
1429  * Then if all the validations pass, we add the forwards and backwards
1430  * dependency.
1431  */
1432 static int
check_prev_add(struct task_struct * curr,struct held_lock * prev,struct held_lock * next,int distance)1433 check_prev_add(struct task_struct *curr, struct held_lock *prev,
1434 	       struct held_lock *next, int distance)
1435 {
1436 	struct lock_list *entry;
1437 	int ret;
1438 
1439 	/*
1440 	 * Prove that the new <prev> -> <next> dependency would not
1441 	 * create a circular dependency in the graph. (We do this by
1442 	 * forward-recursing into the graph starting at <next>, and
1443 	 * checking whether we can reach <prev>.)
1444 	 *
1445 	 * We are using global variables to control the recursion, to
1446 	 * keep the stackframe size of the recursive functions low:
1447 	 */
1448 	check_source = next;
1449 	check_target = prev;
1450 	if (!(check_noncircular(hlock_class(next), 0)))
1451 		return print_circular_bug_tail();
1452 
1453 	if (!check_prev_add_irq(curr, prev, next))
1454 		return 0;
1455 
1456 	/*
1457 	 * For recursive read-locks we do all the dependency checks,
1458 	 * but we dont store read-triggered dependencies (only
1459 	 * write-triggered dependencies). This ensures that only the
1460 	 * write-side dependencies matter, and that if for example a
1461 	 * write-lock never takes any other locks, then the reads are
1462 	 * equivalent to a NOP.
1463 	 */
1464 	if (next->read == 2 || prev->read == 2)
1465 		return 1;
1466 	/*
1467 	 * Is the <prev> -> <next> dependency already present?
1468 	 *
1469 	 * (this may occur even though this is a new chain: consider
1470 	 *  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
1471 	 *  chains - the second one will be new, but L1 already has
1472 	 *  L2 added to its dependency list, due to the first chain.)
1473 	 */
1474 	list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
1475 		if (entry->class == hlock_class(next)) {
1476 			if (distance == 1)
1477 				entry->distance = 1;
1478 			return 2;
1479 		}
1480 	}
1481 
1482 	/*
1483 	 * Ok, all validations passed, add the new lock
1484 	 * to the previous lock's dependency list:
1485 	 */
1486 	ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
1487 			       &hlock_class(prev)->locks_after,
1488 			       next->acquire_ip, distance);
1489 
1490 	if (!ret)
1491 		return 0;
1492 
1493 	ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
1494 			       &hlock_class(next)->locks_before,
1495 			       next->acquire_ip, distance);
1496 	if (!ret)
1497 		return 0;
1498 
1499 	/*
1500 	 * Debugging printouts:
1501 	 */
1502 	if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
1503 		graph_unlock();
1504 		printk("\n new dependency: ");
1505 		print_lock_name(hlock_class(prev));
1506 		printk(" => ");
1507 		print_lock_name(hlock_class(next));
1508 		printk("\n");
1509 		dump_stack();
1510 		return graph_lock();
1511 	}
1512 	return 1;
1513 }
1514 
1515 /*
1516  * Add the dependency to all directly-previous locks that are 'relevant'.
1517  * The ones that are relevant are (in increasing distance from curr):
1518  * all consecutive trylock entries and the final non-trylock entry - or
1519  * the end of this context's lock-chain - whichever comes first.
1520  */
1521 static int
check_prevs_add(struct task_struct * curr,struct held_lock * next)1522 check_prevs_add(struct task_struct *curr, struct held_lock *next)
1523 {
1524 	int depth = curr->lockdep_depth;
1525 	struct held_lock *hlock;
1526 
1527 	/*
1528 	 * Debugging checks.
1529 	 *
1530 	 * Depth must not be zero for a non-head lock:
1531 	 */
1532 	if (!depth)
1533 		goto out_bug;
1534 	/*
1535 	 * At least two relevant locks must exist for this
1536 	 * to be a head:
1537 	 */
1538 	if (curr->held_locks[depth].irq_context !=
1539 			curr->held_locks[depth-1].irq_context)
1540 		goto out_bug;
1541 
1542 	for (;;) {
1543 		int distance = curr->lockdep_depth - depth + 1;
1544 		hlock = curr->held_locks + depth-1;
1545 		/*
1546 		 * Only non-recursive-read entries get new dependencies
1547 		 * added:
1548 		 */
1549 		if (hlock->read != 2) {
1550 			if (!check_prev_add(curr, hlock, next, distance))
1551 				return 0;
1552 			/*
1553 			 * Stop after the first non-trylock entry,
1554 			 * as non-trylock entries have added their
1555 			 * own direct dependencies already, so this
1556 			 * lock is connected to them indirectly:
1557 			 */
1558 			if (!hlock->trylock)
1559 				break;
1560 		}
1561 		depth--;
1562 		/*
1563 		 * End of lock-stack?
1564 		 */
1565 		if (!depth)
1566 			break;
1567 		/*
1568 		 * Stop the search if we cross into another context:
1569 		 */
1570 		if (curr->held_locks[depth].irq_context !=
1571 				curr->held_locks[depth-1].irq_context)
1572 			break;
1573 	}
1574 	return 1;
1575 out_bug:
1576 	if (!debug_locks_off_graph_unlock())
1577 		return 0;
1578 
1579 	WARN_ON(1);
1580 
1581 	return 0;
1582 }
1583 
1584 unsigned long nr_lock_chains;
1585 struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
1586 int nr_chain_hlocks;
1587 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
1588 
lock_chain_get_class(struct lock_chain * chain,int i)1589 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
1590 {
1591 	return lock_classes + chain_hlocks[chain->base + i];
1592 }
1593 
1594 /*
1595  * Look up a dependency chain. If the key is not present yet then
1596  * add it and return 1 - in this case the new dependency chain is
1597  * validated. If the key is already hashed, return 0.
1598  * (On return with 1 graph_lock is held.)
1599  */
lookup_chain_cache(struct task_struct * curr,struct held_lock * hlock,u64 chain_key)1600 static inline int lookup_chain_cache(struct task_struct *curr,
1601 				     struct held_lock *hlock,
1602 				     u64 chain_key)
1603 {
1604 	struct lock_class *class = hlock_class(hlock);
1605 	struct list_head *hash_head = chainhashentry(chain_key);
1606 	struct lock_chain *chain;
1607 	struct held_lock *hlock_curr, *hlock_next;
1608 	int i, j, n, cn;
1609 
1610 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1611 		return 0;
1612 	/*
1613 	 * We can walk it lock-free, because entries only get added
1614 	 * to the hash:
1615 	 */
1616 	list_for_each_entry(chain, hash_head, entry) {
1617 		if (chain->chain_key == chain_key) {
1618 cache_hit:
1619 			debug_atomic_inc(&chain_lookup_hits);
1620 			if (very_verbose(class))
1621 				printk("\nhash chain already cached, key: "
1622 					"%016Lx tail class: [%p] %s\n",
1623 					(unsigned long long)chain_key,
1624 					class->key, class->name);
1625 			return 0;
1626 		}
1627 	}
1628 	if (very_verbose(class))
1629 		printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",
1630 			(unsigned long long)chain_key, class->key, class->name);
1631 	/*
1632 	 * Allocate a new chain entry from the static array, and add
1633 	 * it to the hash:
1634 	 */
1635 	if (!graph_lock())
1636 		return 0;
1637 	/*
1638 	 * We have to walk the chain again locked - to avoid duplicates:
1639 	 */
1640 	list_for_each_entry(chain, hash_head, entry) {
1641 		if (chain->chain_key == chain_key) {
1642 			graph_unlock();
1643 			goto cache_hit;
1644 		}
1645 	}
1646 	if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
1647 		if (!debug_locks_off_graph_unlock())
1648 			return 0;
1649 
1650 		printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
1651 		printk("turning off the locking correctness validator.\n");
1652 		return 0;
1653 	}
1654 	chain = lock_chains + nr_lock_chains++;
1655 	chain->chain_key = chain_key;
1656 	chain->irq_context = hlock->irq_context;
1657 	/* Find the first held_lock of current chain */
1658 	hlock_next = hlock;
1659 	for (i = curr->lockdep_depth - 1; i >= 0; i--) {
1660 		hlock_curr = curr->held_locks + i;
1661 		if (hlock_curr->irq_context != hlock_next->irq_context)
1662 			break;
1663 		hlock_next = hlock;
1664 	}
1665 	i++;
1666 	chain->depth = curr->lockdep_depth + 1 - i;
1667 	cn = nr_chain_hlocks;
1668 	while (cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS) {
1669 		n = cmpxchg(&nr_chain_hlocks, cn, cn + chain->depth);
1670 		if (n == cn)
1671 			break;
1672 		cn = n;
1673 	}
1674 	if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
1675 		chain->base = cn;
1676 		for (j = 0; j < chain->depth - 1; j++, i++) {
1677 			int lock_id = curr->held_locks[i].class_idx - 1;
1678 			chain_hlocks[chain->base + j] = lock_id;
1679 		}
1680 		chain_hlocks[chain->base + j] = class - lock_classes;
1681 	}
1682 	list_add_tail_rcu(&chain->entry, hash_head);
1683 	debug_atomic_inc(&chain_lookup_misses);
1684 	inc_chains();
1685 
1686 	return 1;
1687 }
1688 
validate_chain(struct task_struct * curr,struct lockdep_map * lock,struct held_lock * hlock,int chain_head,u64 chain_key)1689 static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
1690 		struct held_lock *hlock, int chain_head, u64 chain_key)
1691 {
1692 	/*
1693 	 * Trylock needs to maintain the stack of held locks, but it
1694 	 * does not add new dependencies, because trylock can be done
1695 	 * in any order.
1696 	 *
1697 	 * We look up the chain_key and do the O(N^2) check and update of
1698 	 * the dependencies only if this is a new dependency chain.
1699 	 * (If lookup_chain_cache() returns with 1 it acquires
1700 	 * graph_lock for us)
1701 	 */
1702 	if (!hlock->trylock && (hlock->check == 2) &&
1703 	    lookup_chain_cache(curr, hlock, chain_key)) {
1704 		/*
1705 		 * Check whether last held lock:
1706 		 *
1707 		 * - is irq-safe, if this lock is irq-unsafe
1708 		 * - is softirq-safe, if this lock is hardirq-unsafe
1709 		 *
1710 		 * And check whether the new lock's dependency graph
1711 		 * could lead back to the previous lock.
1712 		 *
1713 		 * any of these scenarios could lead to a deadlock. If
1714 		 * All validations
1715 		 */
1716 		int ret = check_deadlock(curr, hlock, lock, hlock->read);
1717 
1718 		if (!ret)
1719 			return 0;
1720 		/*
1721 		 * Mark recursive read, as we jump over it when
1722 		 * building dependencies (just like we jump over
1723 		 * trylock entries):
1724 		 */
1725 		if (ret == 2)
1726 			hlock->read = 2;
1727 		/*
1728 		 * Add dependency only if this lock is not the head
1729 		 * of the chain, and if it's not a secondary read-lock:
1730 		 */
1731 		if (!chain_head && ret != 2)
1732 			if (!check_prevs_add(curr, hlock))
1733 				return 0;
1734 		graph_unlock();
1735 	} else
1736 		/* after lookup_chain_cache(): */
1737 		if (unlikely(!debug_locks))
1738 			return 0;
1739 
1740 	return 1;
1741 }
1742 #else
validate_chain(struct task_struct * curr,struct lockdep_map * lock,struct held_lock * hlock,int chain_head,u64 chain_key)1743 static inline int validate_chain(struct task_struct *curr,
1744 	       	struct lockdep_map *lock, struct held_lock *hlock,
1745 		int chain_head, u64 chain_key)
1746 {
1747 	return 1;
1748 }
1749 #endif
1750 
1751 /*
1752  * We are building curr_chain_key incrementally, so double-check
1753  * it from scratch, to make sure that it's done correctly:
1754  */
check_chain_key(struct task_struct * curr)1755 static void check_chain_key(struct task_struct *curr)
1756 {
1757 #ifdef CONFIG_DEBUG_LOCKDEP
1758 	struct held_lock *hlock, *prev_hlock = NULL;
1759 	unsigned int i, id;
1760 	u64 chain_key = 0;
1761 
1762 	for (i = 0; i < curr->lockdep_depth; i++) {
1763 		hlock = curr->held_locks + i;
1764 		if (chain_key != hlock->prev_chain_key) {
1765 			debug_locks_off();
1766 			WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
1767 				curr->lockdep_depth, i,
1768 				(unsigned long long)chain_key,
1769 				(unsigned long long)hlock->prev_chain_key);
1770 			return;
1771 		}
1772 		id = hlock->class_idx - 1;
1773 		if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
1774 			return;
1775 
1776 		if (prev_hlock && (prev_hlock->irq_context !=
1777 							hlock->irq_context))
1778 			chain_key = 0;
1779 		chain_key = iterate_chain_key(chain_key, id);
1780 		prev_hlock = hlock;
1781 	}
1782 	if (chain_key != curr->curr_chain_key) {
1783 		debug_locks_off();
1784 		WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
1785 			curr->lockdep_depth, i,
1786 			(unsigned long long)chain_key,
1787 			(unsigned long long)curr->curr_chain_key);
1788 	}
1789 #endif
1790 }
1791 
1792 static int
print_usage_bug(struct task_struct * curr,struct held_lock * this,enum lock_usage_bit prev_bit,enum lock_usage_bit new_bit)1793 print_usage_bug(struct task_struct *curr, struct held_lock *this,
1794 		enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
1795 {
1796 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1797 		return 0;
1798 
1799 	printk("\n=================================\n");
1800 	printk(  "[ INFO: inconsistent lock state ]\n");
1801 	print_kernel_version();
1802 	printk(  "---------------------------------\n");
1803 
1804 	printk("inconsistent {%s} -> {%s} usage.\n",
1805 		usage_str[prev_bit], usage_str[new_bit]);
1806 
1807 	printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
1808 		curr->comm, task_pid_nr(curr),
1809 		trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
1810 		trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
1811 		trace_hardirqs_enabled(curr),
1812 		trace_softirqs_enabled(curr));
1813 	print_lock(this);
1814 
1815 	printk("{%s} state was registered at:\n", usage_str[prev_bit]);
1816 	print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
1817 
1818 	print_irqtrace_events(curr);
1819 	printk("\nother info that might help us debug this:\n");
1820 	lockdep_print_held_locks(curr);
1821 
1822 	printk("\nstack backtrace:\n");
1823 	dump_stack();
1824 
1825 	return 0;
1826 }
1827 
1828 /*
1829  * Print out an error if an invalid bit is set:
1830  */
1831 static inline int
valid_state(struct task_struct * curr,struct held_lock * this,enum lock_usage_bit new_bit,enum lock_usage_bit bad_bit)1832 valid_state(struct task_struct *curr, struct held_lock *this,
1833 	    enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
1834 {
1835 	if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
1836 		return print_usage_bug(curr, this, bad_bit, new_bit);
1837 	return 1;
1838 }
1839 
1840 static int mark_lock(struct task_struct *curr, struct held_lock *this,
1841 		     enum lock_usage_bit new_bit);
1842 
1843 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1844 
1845 /*
1846  * print irq inversion bug:
1847  */
1848 static int
print_irq_inversion_bug(struct task_struct * curr,struct lock_class * other,struct held_lock * this,int forwards,const char * irqclass)1849 print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
1850 			struct held_lock *this, int forwards,
1851 			const char *irqclass)
1852 {
1853 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1854 		return 0;
1855 
1856 	printk("\n=========================================================\n");
1857 	printk(  "[ INFO: possible irq lock inversion dependency detected ]\n");
1858 	print_kernel_version();
1859 	printk(  "---------------------------------------------------------\n");
1860 	printk("%s/%d just changed the state of lock:\n",
1861 		curr->comm, task_pid_nr(curr));
1862 	print_lock(this);
1863 	if (forwards)
1864 		printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass);
1865 	else
1866 		printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass);
1867 	print_lock_name(other);
1868 	printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
1869 
1870 	printk("\nother info that might help us debug this:\n");
1871 	lockdep_print_held_locks(curr);
1872 
1873 	printk("\nthe first lock's dependencies:\n");
1874 	print_lock_dependencies(hlock_class(this), 0);
1875 
1876 	printk("\nthe second lock's dependencies:\n");
1877 	print_lock_dependencies(other, 0);
1878 
1879 	printk("\nstack backtrace:\n");
1880 	dump_stack();
1881 
1882 	return 0;
1883 }
1884 
1885 /*
1886  * Prove that in the forwards-direction subgraph starting at <this>
1887  * there is no lock matching <mask>:
1888  */
1889 static int
check_usage_forwards(struct task_struct * curr,struct held_lock * this,enum lock_usage_bit bit,const char * irqclass)1890 check_usage_forwards(struct task_struct *curr, struct held_lock *this,
1891 		     enum lock_usage_bit bit, const char *irqclass)
1892 {
1893 	int ret;
1894 
1895 	find_usage_bit = bit;
1896 	/* fills in <forwards_match> */
1897 	ret = find_usage_forwards(hlock_class(this), 0);
1898 	if (!ret || ret == 1)
1899 		return ret;
1900 
1901 	return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass);
1902 }
1903 
1904 /*
1905  * Prove that in the backwards-direction subgraph starting at <this>
1906  * there is no lock matching <mask>:
1907  */
1908 static int
check_usage_backwards(struct task_struct * curr,struct held_lock * this,enum lock_usage_bit bit,const char * irqclass)1909 check_usage_backwards(struct task_struct *curr, struct held_lock *this,
1910 		      enum lock_usage_bit bit, const char *irqclass)
1911 {
1912 	int ret;
1913 
1914 	find_usage_bit = bit;
1915 	/* fills in <backwards_match> */
1916 	ret = find_usage_backwards(hlock_class(this), 0);
1917 	if (!ret || ret == 1)
1918 		return ret;
1919 
1920 	return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass);
1921 }
1922 
print_irqtrace_events(struct task_struct * curr)1923 void print_irqtrace_events(struct task_struct *curr)
1924 {
1925 	printk("irq event stamp: %u\n", curr->irq_events);
1926 	printk("hardirqs last  enabled at (%u): ", curr->hardirq_enable_event);
1927 	print_ip_sym(curr->hardirq_enable_ip);
1928 	printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event);
1929 	print_ip_sym(curr->hardirq_disable_ip);
1930 	printk("softirqs last  enabled at (%u): ", curr->softirq_enable_event);
1931 	print_ip_sym(curr->softirq_enable_ip);
1932 	printk("softirqs last disabled at (%u): ", curr->softirq_disable_event);
1933 	print_ip_sym(curr->softirq_disable_ip);
1934 }
1935 
hardirq_verbose(struct lock_class * class)1936 static int hardirq_verbose(struct lock_class *class)
1937 {
1938 #if HARDIRQ_VERBOSE
1939 	return class_filter(class);
1940 #endif
1941 	return 0;
1942 }
1943 
softirq_verbose(struct lock_class * class)1944 static int softirq_verbose(struct lock_class *class)
1945 {
1946 #if SOFTIRQ_VERBOSE
1947 	return class_filter(class);
1948 #endif
1949 	return 0;
1950 }
1951 
1952 #define STRICT_READ_CHECKS	1
1953 
mark_lock_irq(struct task_struct * curr,struct held_lock * this,enum lock_usage_bit new_bit)1954 static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
1955 		enum lock_usage_bit new_bit)
1956 {
1957 	int ret = 1;
1958 
1959 	switch(new_bit) {
1960 	case LOCK_USED_IN_HARDIRQ:
1961 		if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
1962 			return 0;
1963 		if (!valid_state(curr, this, new_bit,
1964 				 LOCK_ENABLED_HARDIRQS_READ))
1965 			return 0;
1966 		/*
1967 		 * just marked it hardirq-safe, check that this lock
1968 		 * took no hardirq-unsafe lock in the past:
1969 		 */
1970 		if (!check_usage_forwards(curr, this,
1971 					  LOCK_ENABLED_HARDIRQS, "hard"))
1972 			return 0;
1973 #if STRICT_READ_CHECKS
1974 		/*
1975 		 * just marked it hardirq-safe, check that this lock
1976 		 * took no hardirq-unsafe-read lock in the past:
1977 		 */
1978 		if (!check_usage_forwards(curr, this,
1979 				LOCK_ENABLED_HARDIRQS_READ, "hard-read"))
1980 			return 0;
1981 #endif
1982 		if (hardirq_verbose(hlock_class(this)))
1983 			ret = 2;
1984 		break;
1985 	case LOCK_USED_IN_SOFTIRQ:
1986 		if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
1987 			return 0;
1988 		if (!valid_state(curr, this, new_bit,
1989 				 LOCK_ENABLED_SOFTIRQS_READ))
1990 			return 0;
1991 		/*
1992 		 * just marked it softirq-safe, check that this lock
1993 		 * took no softirq-unsafe lock in the past:
1994 		 */
1995 		if (!check_usage_forwards(curr, this,
1996 					  LOCK_ENABLED_SOFTIRQS, "soft"))
1997 			return 0;
1998 #if STRICT_READ_CHECKS
1999 		/*
2000 		 * just marked it softirq-safe, check that this lock
2001 		 * took no softirq-unsafe-read lock in the past:
2002 		 */
2003 		if (!check_usage_forwards(curr, this,
2004 				LOCK_ENABLED_SOFTIRQS_READ, "soft-read"))
2005 			return 0;
2006 #endif
2007 		if (softirq_verbose(hlock_class(this)))
2008 			ret = 2;
2009 		break;
2010 	case LOCK_USED_IN_HARDIRQ_READ:
2011 		if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
2012 			return 0;
2013 		/*
2014 		 * just marked it hardirq-read-safe, check that this lock
2015 		 * took no hardirq-unsafe lock in the past:
2016 		 */
2017 		if (!check_usage_forwards(curr, this,
2018 					  LOCK_ENABLED_HARDIRQS, "hard"))
2019 			return 0;
2020 		if (hardirq_verbose(hlock_class(this)))
2021 			ret = 2;
2022 		break;
2023 	case LOCK_USED_IN_SOFTIRQ_READ:
2024 		if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
2025 			return 0;
2026 		/*
2027 		 * just marked it softirq-read-safe, check that this lock
2028 		 * took no softirq-unsafe lock in the past:
2029 		 */
2030 		if (!check_usage_forwards(curr, this,
2031 					  LOCK_ENABLED_SOFTIRQS, "soft"))
2032 			return 0;
2033 		if (softirq_verbose(hlock_class(this)))
2034 			ret = 2;
2035 		break;
2036 	case LOCK_ENABLED_HARDIRQS:
2037 		if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
2038 			return 0;
2039 		if (!valid_state(curr, this, new_bit,
2040 				 LOCK_USED_IN_HARDIRQ_READ))
2041 			return 0;
2042 		/*
2043 		 * just marked it hardirq-unsafe, check that no hardirq-safe
2044 		 * lock in the system ever took it in the past:
2045 		 */
2046 		if (!check_usage_backwards(curr, this,
2047 					   LOCK_USED_IN_HARDIRQ, "hard"))
2048 			return 0;
2049 #if STRICT_READ_CHECKS
2050 		/*
2051 		 * just marked it hardirq-unsafe, check that no
2052 		 * hardirq-safe-read lock in the system ever took
2053 		 * it in the past:
2054 		 */
2055 		if (!check_usage_backwards(curr, this,
2056 				   LOCK_USED_IN_HARDIRQ_READ, "hard-read"))
2057 			return 0;
2058 #endif
2059 		if (hardirq_verbose(hlock_class(this)))
2060 			ret = 2;
2061 		break;
2062 	case LOCK_ENABLED_SOFTIRQS:
2063 		if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
2064 			return 0;
2065 		if (!valid_state(curr, this, new_bit,
2066 				 LOCK_USED_IN_SOFTIRQ_READ))
2067 			return 0;
2068 		/*
2069 		 * just marked it softirq-unsafe, check that no softirq-safe
2070 		 * lock in the system ever took it in the past:
2071 		 */
2072 		if (!check_usage_backwards(curr, this,
2073 					   LOCK_USED_IN_SOFTIRQ, "soft"))
2074 			return 0;
2075 #if STRICT_READ_CHECKS
2076 		/*
2077 		 * just marked it softirq-unsafe, check that no
2078 		 * softirq-safe-read lock in the system ever took
2079 		 * it in the past:
2080 		 */
2081 		if (!check_usage_backwards(curr, this,
2082 				   LOCK_USED_IN_SOFTIRQ_READ, "soft-read"))
2083 			return 0;
2084 #endif
2085 		if (softirq_verbose(hlock_class(this)))
2086 			ret = 2;
2087 		break;
2088 	case LOCK_ENABLED_HARDIRQS_READ:
2089 		if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
2090 			return 0;
2091 #if STRICT_READ_CHECKS
2092 		/*
2093 		 * just marked it hardirq-read-unsafe, check that no
2094 		 * hardirq-safe lock in the system ever took it in the past:
2095 		 */
2096 		if (!check_usage_backwards(curr, this,
2097 					   LOCK_USED_IN_HARDIRQ, "hard"))
2098 			return 0;
2099 #endif
2100 		if (hardirq_verbose(hlock_class(this)))
2101 			ret = 2;
2102 		break;
2103 	case LOCK_ENABLED_SOFTIRQS_READ:
2104 		if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
2105 			return 0;
2106 #if STRICT_READ_CHECKS
2107 		/*
2108 		 * just marked it softirq-read-unsafe, check that no
2109 		 * softirq-safe lock in the system ever took it in the past:
2110 		 */
2111 		if (!check_usage_backwards(curr, this,
2112 					   LOCK_USED_IN_SOFTIRQ, "soft"))
2113 			return 0;
2114 #endif
2115 		if (softirq_verbose(hlock_class(this)))
2116 			ret = 2;
2117 		break;
2118 	default:
2119 		WARN_ON(1);
2120 		break;
2121 	}
2122 
2123 	return ret;
2124 }
2125 
2126 /*
2127  * Mark all held locks with a usage bit:
2128  */
2129 static int
mark_held_locks(struct task_struct * curr,int hardirq)2130 mark_held_locks(struct task_struct *curr, int hardirq)
2131 {
2132 	enum lock_usage_bit usage_bit;
2133 	struct held_lock *hlock;
2134 	int i;
2135 
2136 	for (i = 0; i < curr->lockdep_depth; i++) {
2137 		hlock = curr->held_locks + i;
2138 
2139 		if (hardirq) {
2140 			if (hlock->read)
2141 				usage_bit = LOCK_ENABLED_HARDIRQS_READ;
2142 			else
2143 				usage_bit = LOCK_ENABLED_HARDIRQS;
2144 		} else {
2145 			if (hlock->read)
2146 				usage_bit = LOCK_ENABLED_SOFTIRQS_READ;
2147 			else
2148 				usage_bit = LOCK_ENABLED_SOFTIRQS;
2149 		}
2150 		if (!mark_lock(curr, hlock, usage_bit))
2151 			return 0;
2152 	}
2153 
2154 	return 1;
2155 }
2156 
2157 /*
2158  * Debugging helper: via this flag we know that we are in
2159  * 'early bootup code', and will warn about any invalid irqs-on event:
2160  */
2161 static int early_boot_irqs_enabled;
2162 
early_boot_irqs_off(void)2163 void early_boot_irqs_off(void)
2164 {
2165 	early_boot_irqs_enabled = 0;
2166 }
2167 
early_boot_irqs_on(void)2168 void early_boot_irqs_on(void)
2169 {
2170 	early_boot_irqs_enabled = 1;
2171 }
2172 
2173 /*
2174  * Hardirqs will be enabled:
2175  */
trace_hardirqs_on_caller(unsigned long ip)2176 void trace_hardirqs_on_caller(unsigned long ip)
2177 {
2178 	struct task_struct *curr = current;
2179 
2180 	time_hardirqs_on(CALLER_ADDR0, ip);
2181 
2182 	if (unlikely(!debug_locks || current->lockdep_recursion))
2183 		return;
2184 
2185 	if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled)))
2186 		return;
2187 
2188 	if (unlikely(curr->hardirqs_enabled)) {
2189 		debug_atomic_inc(&redundant_hardirqs_on);
2190 		return;
2191 	}
2192 	/* we'll do an OFF -> ON transition: */
2193 	curr->hardirqs_enabled = 1;
2194 
2195 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2196 		return;
2197 	if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2198 		return;
2199 	/*
2200 	 * We are going to turn hardirqs on, so set the
2201 	 * usage bit for all held locks:
2202 	 */
2203 	if (!mark_held_locks(curr, 1))
2204 		return;
2205 	/*
2206 	 * If we have softirqs enabled, then set the usage
2207 	 * bit for all held locks. (disabled hardirqs prevented
2208 	 * this bit from being set before)
2209 	 */
2210 	if (curr->softirqs_enabled)
2211 		if (!mark_held_locks(curr, 0))
2212 			return;
2213 
2214 	curr->hardirq_enable_ip = ip;
2215 	curr->hardirq_enable_event = ++curr->irq_events;
2216 	debug_atomic_inc(&hardirqs_on_events);
2217 }
2218 EXPORT_SYMBOL(trace_hardirqs_on_caller);
2219 
trace_hardirqs_on(void)2220 void trace_hardirqs_on(void)
2221 {
2222 	trace_hardirqs_on_caller(CALLER_ADDR0);
2223 }
2224 EXPORT_SYMBOL(trace_hardirqs_on);
2225 
2226 /*
2227  * Hardirqs were disabled:
2228  */
trace_hardirqs_off_caller(unsigned long ip)2229 void trace_hardirqs_off_caller(unsigned long ip)
2230 {
2231 	struct task_struct *curr = current;
2232 
2233 	time_hardirqs_off(CALLER_ADDR0, ip);
2234 
2235 	if (unlikely(!debug_locks || current->lockdep_recursion))
2236 		return;
2237 
2238 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2239 		return;
2240 
2241 	if (curr->hardirqs_enabled) {
2242 		/*
2243 		 * We have done an ON -> OFF transition:
2244 		 */
2245 		curr->hardirqs_enabled = 0;
2246 		curr->hardirq_disable_ip = ip;
2247 		curr->hardirq_disable_event = ++curr->irq_events;
2248 		debug_atomic_inc(&hardirqs_off_events);
2249 	} else
2250 		debug_atomic_inc(&redundant_hardirqs_off);
2251 }
2252 EXPORT_SYMBOL(trace_hardirqs_off_caller);
2253 
trace_hardirqs_off(void)2254 void trace_hardirqs_off(void)
2255 {
2256 	trace_hardirqs_off_caller(CALLER_ADDR0);
2257 }
2258 EXPORT_SYMBOL(trace_hardirqs_off);
2259 
2260 /*
2261  * Softirqs will be enabled:
2262  */
trace_softirqs_on(unsigned long ip)2263 void trace_softirqs_on(unsigned long ip)
2264 {
2265 	struct task_struct *curr = current;
2266 
2267 	if (unlikely(!debug_locks))
2268 		return;
2269 
2270 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2271 		return;
2272 
2273 	if (curr->softirqs_enabled) {
2274 		debug_atomic_inc(&redundant_softirqs_on);
2275 		return;
2276 	}
2277 
2278 	/*
2279 	 * We'll do an OFF -> ON transition:
2280 	 */
2281 	curr->softirqs_enabled = 1;
2282 	curr->softirq_enable_ip = ip;
2283 	curr->softirq_enable_event = ++curr->irq_events;
2284 	debug_atomic_inc(&softirqs_on_events);
2285 	/*
2286 	 * We are going to turn softirqs on, so set the
2287 	 * usage bit for all held locks, if hardirqs are
2288 	 * enabled too:
2289 	 */
2290 	if (curr->hardirqs_enabled)
2291 		mark_held_locks(curr, 0);
2292 }
2293 
2294 /*
2295  * Softirqs were disabled:
2296  */
trace_softirqs_off(unsigned long ip)2297 void trace_softirqs_off(unsigned long ip)
2298 {
2299 	struct task_struct *curr = current;
2300 
2301 	if (unlikely(!debug_locks))
2302 		return;
2303 
2304 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2305 		return;
2306 
2307 	if (curr->softirqs_enabled) {
2308 		/*
2309 		 * We have done an ON -> OFF transition:
2310 		 */
2311 		curr->softirqs_enabled = 0;
2312 		curr->softirq_disable_ip = ip;
2313 		curr->softirq_disable_event = ++curr->irq_events;
2314 		debug_atomic_inc(&softirqs_off_events);
2315 		DEBUG_LOCKS_WARN_ON(!softirq_count());
2316 	} else
2317 		debug_atomic_inc(&redundant_softirqs_off);
2318 }
2319 
mark_irqflags(struct task_struct * curr,struct held_lock * hlock)2320 static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2321 {
2322 	/*
2323 	 * If non-trylock use in a hardirq or softirq context, then
2324 	 * mark the lock as used in these contexts:
2325 	 */
2326 	if (!hlock->trylock) {
2327 		if (hlock->read) {
2328 			if (curr->hardirq_context)
2329 				if (!mark_lock(curr, hlock,
2330 						LOCK_USED_IN_HARDIRQ_READ))
2331 					return 0;
2332 			if (curr->softirq_context)
2333 				if (!mark_lock(curr, hlock,
2334 						LOCK_USED_IN_SOFTIRQ_READ))
2335 					return 0;
2336 		} else {
2337 			if (curr->hardirq_context)
2338 				if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
2339 					return 0;
2340 			if (curr->softirq_context)
2341 				if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
2342 					return 0;
2343 		}
2344 	}
2345 	if (!hlock->hardirqs_off) {
2346 		if (hlock->read) {
2347 			if (!mark_lock(curr, hlock,
2348 					LOCK_ENABLED_HARDIRQS_READ))
2349 				return 0;
2350 			if (curr->softirqs_enabled)
2351 				if (!mark_lock(curr, hlock,
2352 						LOCK_ENABLED_SOFTIRQS_READ))
2353 					return 0;
2354 		} else {
2355 			if (!mark_lock(curr, hlock,
2356 					LOCK_ENABLED_HARDIRQS))
2357 				return 0;
2358 			if (curr->softirqs_enabled)
2359 				if (!mark_lock(curr, hlock,
2360 						LOCK_ENABLED_SOFTIRQS))
2361 					return 0;
2362 		}
2363 	}
2364 
2365 	return 1;
2366 }
2367 
separate_irq_context(struct task_struct * curr,struct held_lock * hlock)2368 static int separate_irq_context(struct task_struct *curr,
2369 		struct held_lock *hlock)
2370 {
2371 	unsigned int depth = curr->lockdep_depth;
2372 
2373 	/*
2374 	 * Keep track of points where we cross into an interrupt context:
2375 	 */
2376 	hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
2377 				curr->softirq_context;
2378 	if (depth) {
2379 		struct held_lock *prev_hlock;
2380 
2381 		prev_hlock = curr->held_locks + depth-1;
2382 		/*
2383 		 * If we cross into another context, reset the
2384 		 * hash key (this also prevents the checking and the
2385 		 * adding of the dependency to 'prev'):
2386 		 */
2387 		if (prev_hlock->irq_context != hlock->irq_context)
2388 			return 1;
2389 	}
2390 	return 0;
2391 }
2392 
2393 #else
2394 
2395 static inline
mark_lock_irq(struct task_struct * curr,struct held_lock * this,enum lock_usage_bit new_bit)2396 int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2397 		enum lock_usage_bit new_bit)
2398 {
2399 	WARN_ON(1);
2400 	return 1;
2401 }
2402 
mark_irqflags(struct task_struct * curr,struct held_lock * hlock)2403 static inline int mark_irqflags(struct task_struct *curr,
2404 		struct held_lock *hlock)
2405 {
2406 	return 1;
2407 }
2408 
separate_irq_context(struct task_struct * curr,struct held_lock * hlock)2409 static inline int separate_irq_context(struct task_struct *curr,
2410 		struct held_lock *hlock)
2411 {
2412 	return 0;
2413 }
2414 
2415 #endif
2416 
2417 /*
2418  * Mark a lock with a usage bit, and validate the state transition:
2419  */
mark_lock(struct task_struct * curr,struct held_lock * this,enum lock_usage_bit new_bit)2420 static int mark_lock(struct task_struct *curr, struct held_lock *this,
2421 			     enum lock_usage_bit new_bit)
2422 {
2423 	unsigned int new_mask = 1 << new_bit, ret = 1;
2424 
2425 	/*
2426 	 * If already set then do not dirty the cacheline,
2427 	 * nor do any checks:
2428 	 */
2429 	if (likely(hlock_class(this)->usage_mask & new_mask))
2430 		return 1;
2431 
2432 	if (!graph_lock())
2433 		return 0;
2434 	/*
2435 	 * Make sure we didnt race:
2436 	 */
2437 	if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
2438 		graph_unlock();
2439 		return 1;
2440 	}
2441 
2442 	hlock_class(this)->usage_mask |= new_mask;
2443 
2444 	if (!save_trace(hlock_class(this)->usage_traces + new_bit))
2445 		return 0;
2446 
2447 	switch (new_bit) {
2448 	case LOCK_USED_IN_HARDIRQ:
2449 	case LOCK_USED_IN_SOFTIRQ:
2450 	case LOCK_USED_IN_HARDIRQ_READ:
2451 	case LOCK_USED_IN_SOFTIRQ_READ:
2452 	case LOCK_ENABLED_HARDIRQS:
2453 	case LOCK_ENABLED_SOFTIRQS:
2454 	case LOCK_ENABLED_HARDIRQS_READ:
2455 	case LOCK_ENABLED_SOFTIRQS_READ:
2456 		ret = mark_lock_irq(curr, this, new_bit);
2457 		if (!ret)
2458 			return 0;
2459 		break;
2460 	case LOCK_USED:
2461 		debug_atomic_dec(&nr_unused_locks);
2462 		break;
2463 	default:
2464 		if (!debug_locks_off_graph_unlock())
2465 			return 0;
2466 		WARN_ON(1);
2467 		return 0;
2468 	}
2469 
2470 	graph_unlock();
2471 
2472 	/*
2473 	 * We must printk outside of the graph_lock:
2474 	 */
2475 	if (ret == 2) {
2476 		printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
2477 		print_lock(this);
2478 		print_irqtrace_events(curr);
2479 		dump_stack();
2480 	}
2481 
2482 	return ret;
2483 }
2484 
2485 /*
2486  * Initialize a lock instance's lock-class mapping info:
2487  */
lockdep_init_map(struct lockdep_map * lock,const char * name,struct lock_class_key * key,int subclass)2488 void lockdep_init_map(struct lockdep_map *lock, const char *name,
2489 		      struct lock_class_key *key, int subclass)
2490 {
2491 	if (unlikely(!debug_locks))
2492 		return;
2493 
2494 	if (DEBUG_LOCKS_WARN_ON(!key))
2495 		return;
2496 	if (DEBUG_LOCKS_WARN_ON(!name))
2497 		return;
2498 	/*
2499 	 * Sanity check, the lock-class key must be persistent:
2500 	 */
2501 	if (!static_obj(key)) {
2502 		printk("BUG: key %p not in .data!\n", key);
2503 		DEBUG_LOCKS_WARN_ON(1);
2504 		return;
2505 	}
2506 	lock->name = name;
2507 	lock->key = key;
2508 	lock->class_cache = NULL;
2509 #ifdef CONFIG_LOCK_STAT
2510 	lock->cpu = raw_smp_processor_id();
2511 #endif
2512 	if (subclass)
2513 		register_lock_class(lock, subclass, 1);
2514 }
2515 EXPORT_SYMBOL_GPL(lockdep_init_map);
2516 
2517 /*
2518  * This gets called for every mutex_lock*()/spin_lock*() operation.
2519  * We maintain the dependency maps and validate the locking attempt:
2520  */
__lock_acquire(struct lockdep_map * lock,unsigned int subclass,int trylock,int read,int check,int hardirqs_off,struct lockdep_map * nest_lock,unsigned long ip)2521 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2522 			  int trylock, int read, int check, int hardirqs_off,
2523 			  struct lockdep_map *nest_lock, unsigned long ip)
2524 {
2525 	struct task_struct *curr = current;
2526 	struct lock_class *class = NULL;
2527 	struct held_lock *hlock;
2528 	unsigned int depth, id;
2529 	int chain_head = 0;
2530 	u64 chain_key;
2531 
2532 	if (!prove_locking)
2533 		check = 1;
2534 
2535 	if (unlikely(!debug_locks))
2536 		return 0;
2537 
2538 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2539 		return 0;
2540 
2541 	if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
2542 		debug_locks_off();
2543 		printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
2544 		printk("turning off the locking correctness validator.\n");
2545 		return 0;
2546 	}
2547 
2548 	if (!subclass)
2549 		class = lock->class_cache;
2550 	/*
2551 	 * Not cached yet or subclass?
2552 	 */
2553 	if (unlikely(!class)) {
2554 		class = register_lock_class(lock, subclass, 0);
2555 		if (!class)
2556 			return 0;
2557 	}
2558 	debug_atomic_inc((atomic_t *)&class->ops);
2559 	if (very_verbose(class)) {
2560 		printk("\nacquire class [%p] %s", class->key, class->name);
2561 		if (class->name_version > 1)
2562 			printk("#%d", class->name_version);
2563 		printk("\n");
2564 		dump_stack();
2565 	}
2566 
2567 	/*
2568 	 * Add the lock to the list of currently held locks.
2569 	 * (we dont increase the depth just yet, up until the
2570 	 * dependency checks are done)
2571 	 */
2572 	depth = curr->lockdep_depth;
2573 	if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
2574 		return 0;
2575 
2576 	hlock = curr->held_locks + depth;
2577 	if (DEBUG_LOCKS_WARN_ON(!class))
2578 		return 0;
2579 	hlock->class_idx = class - lock_classes + 1;
2580 	hlock->acquire_ip = ip;
2581 	hlock->instance = lock;
2582 	hlock->nest_lock = nest_lock;
2583 	hlock->trylock = trylock;
2584 	hlock->read = read;
2585 	hlock->check = check;
2586 	hlock->hardirqs_off = !!hardirqs_off;
2587 #ifdef CONFIG_LOCK_STAT
2588 	hlock->waittime_stamp = 0;
2589 	hlock->holdtime_stamp = sched_clock();
2590 #endif
2591 
2592 	if (check == 2 && !mark_irqflags(curr, hlock))
2593 		return 0;
2594 
2595 	/* mark it as used: */
2596 	if (!mark_lock(curr, hlock, LOCK_USED))
2597 		return 0;
2598 
2599 	/*
2600 	 * Calculate the chain hash: it's the combined hash of all the
2601 	 * lock keys along the dependency chain. We save the hash value
2602 	 * at every step so that we can get the current hash easily
2603 	 * after unlock. The chain hash is then used to cache dependency
2604 	 * results.
2605 	 *
2606 	 * The 'key ID' is what is the most compact key value to drive
2607 	 * the hash, not class->key.
2608 	 */
2609 	id = class - lock_classes;
2610 	if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
2611 		return 0;
2612 
2613 	chain_key = curr->curr_chain_key;
2614 	if (!depth) {
2615 		if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
2616 			return 0;
2617 		chain_head = 1;
2618 	}
2619 
2620 	hlock->prev_chain_key = chain_key;
2621 	if (separate_irq_context(curr, hlock)) {
2622 		chain_key = 0;
2623 		chain_head = 1;
2624 	}
2625 	chain_key = iterate_chain_key(chain_key, id);
2626 
2627 	if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
2628 		return 0;
2629 
2630 	curr->curr_chain_key = chain_key;
2631 	curr->lockdep_depth++;
2632 	check_chain_key(curr);
2633 #ifdef CONFIG_DEBUG_LOCKDEP
2634 	if (unlikely(!debug_locks))
2635 		return 0;
2636 #endif
2637 	if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
2638 		debug_locks_off();
2639 		printk("BUG: MAX_LOCK_DEPTH too low!\n");
2640 		printk("turning off the locking correctness validator.\n");
2641 		return 0;
2642 	}
2643 
2644 	if (unlikely(curr->lockdep_depth > max_lockdep_depth))
2645 		max_lockdep_depth = curr->lockdep_depth;
2646 
2647 	return 1;
2648 }
2649 
2650 static int
print_unlock_inbalance_bug(struct task_struct * curr,struct lockdep_map * lock,unsigned long ip)2651 print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
2652 			   unsigned long ip)
2653 {
2654 	if (!debug_locks_off())
2655 		return 0;
2656 	if (debug_locks_silent)
2657 		return 0;
2658 
2659 	printk("\n=====================================\n");
2660 	printk(  "[ BUG: bad unlock balance detected! ]\n");
2661 	printk(  "-------------------------------------\n");
2662 	printk("%s/%d is trying to release lock (",
2663 		curr->comm, task_pid_nr(curr));
2664 	print_lockdep_cache(lock);
2665 	printk(") at:\n");
2666 	print_ip_sym(ip);
2667 	printk("but there are no more locks to release!\n");
2668 	printk("\nother info that might help us debug this:\n");
2669 	lockdep_print_held_locks(curr);
2670 
2671 	printk("\nstack backtrace:\n");
2672 	dump_stack();
2673 
2674 	return 0;
2675 }
2676 
2677 /*
2678  * Common debugging checks for both nested and non-nested unlock:
2679  */
check_unlock(struct task_struct * curr,struct lockdep_map * lock,unsigned long ip)2680 static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
2681 			unsigned long ip)
2682 {
2683 	if (unlikely(!debug_locks))
2684 		return 0;
2685 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2686 		return 0;
2687 
2688 	if (curr->lockdep_depth <= 0)
2689 		return print_unlock_inbalance_bug(curr, lock, ip);
2690 
2691 	return 1;
2692 }
2693 
2694 static int
__lock_set_class(struct lockdep_map * lock,const char * name,struct lock_class_key * key,unsigned int subclass,unsigned long ip)2695 __lock_set_class(struct lockdep_map *lock, const char *name,
2696 		 struct lock_class_key *key, unsigned int subclass,
2697 		 unsigned long ip)
2698 {
2699 	struct task_struct *curr = current;
2700 	struct held_lock *hlock, *prev_hlock;
2701 	struct lock_class *class;
2702 	unsigned int depth;
2703 	int i;
2704 
2705 	depth = curr->lockdep_depth;
2706 	if (DEBUG_LOCKS_WARN_ON(!depth))
2707 		return 0;
2708 
2709 	prev_hlock = NULL;
2710 	for (i = depth-1; i >= 0; i--) {
2711 		hlock = curr->held_locks + i;
2712 		/*
2713 		 * We must not cross into another context:
2714 		 */
2715 		if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2716 			break;
2717 		if (hlock->instance == lock)
2718 			goto found_it;
2719 		prev_hlock = hlock;
2720 	}
2721 	return print_unlock_inbalance_bug(curr, lock, ip);
2722 
2723 found_it:
2724 	lockdep_init_map(lock, name, key, 0);
2725 	class = register_lock_class(lock, subclass, 0);
2726 	hlock->class_idx = class - lock_classes + 1;
2727 
2728 	curr->lockdep_depth = i;
2729 	curr->curr_chain_key = hlock->prev_chain_key;
2730 
2731 	for (; i < depth; i++) {
2732 		hlock = curr->held_locks + i;
2733 		if (!__lock_acquire(hlock->instance,
2734 			hlock_class(hlock)->subclass, hlock->trylock,
2735 				hlock->read, hlock->check, hlock->hardirqs_off,
2736 				hlock->nest_lock, hlock->acquire_ip))
2737 			return 0;
2738 	}
2739 
2740 	if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
2741 		return 0;
2742 	return 1;
2743 }
2744 
2745 /*
2746  * Remove the lock to the list of currently held locks in a
2747  * potentially non-nested (out of order) manner. This is a
2748  * relatively rare operation, as all the unlock APIs default
2749  * to nested mode (which uses lock_release()):
2750  */
2751 static int
lock_release_non_nested(struct task_struct * curr,struct lockdep_map * lock,unsigned long ip)2752 lock_release_non_nested(struct task_struct *curr,
2753 			struct lockdep_map *lock, unsigned long ip)
2754 {
2755 	struct held_lock *hlock, *prev_hlock;
2756 	unsigned int depth;
2757 	int i;
2758 
2759 	/*
2760 	 * Check whether the lock exists in the current stack
2761 	 * of held locks:
2762 	 */
2763 	depth = curr->lockdep_depth;
2764 	if (DEBUG_LOCKS_WARN_ON(!depth))
2765 		return 0;
2766 
2767 	prev_hlock = NULL;
2768 	for (i = depth-1; i >= 0; i--) {
2769 		hlock = curr->held_locks + i;
2770 		/*
2771 		 * We must not cross into another context:
2772 		 */
2773 		if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2774 			break;
2775 		if (hlock->instance == lock)
2776 			goto found_it;
2777 		prev_hlock = hlock;
2778 	}
2779 	return print_unlock_inbalance_bug(curr, lock, ip);
2780 
2781 found_it:
2782 	lock_release_holdtime(hlock);
2783 
2784 	/*
2785 	 * We have the right lock to unlock, 'hlock' points to it.
2786 	 * Now we remove it from the stack, and add back the other
2787 	 * entries (if any), recalculating the hash along the way:
2788 	 */
2789 	curr->lockdep_depth = i;
2790 	curr->curr_chain_key = hlock->prev_chain_key;
2791 
2792 	for (i++; i < depth; i++) {
2793 		hlock = curr->held_locks + i;
2794 		if (!__lock_acquire(hlock->instance,
2795 			hlock_class(hlock)->subclass, hlock->trylock,
2796 				hlock->read, hlock->check, hlock->hardirqs_off,
2797 				hlock->nest_lock, hlock->acquire_ip))
2798 			return 0;
2799 	}
2800 
2801 	if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
2802 		return 0;
2803 	return 1;
2804 }
2805 
2806 /*
2807  * Remove the lock to the list of currently held locks - this gets
2808  * called on mutex_unlock()/spin_unlock*() (or on a failed
2809  * mutex_lock_interruptible()). This is done for unlocks that nest
2810  * perfectly. (i.e. the current top of the lock-stack is unlocked)
2811  */
lock_release_nested(struct task_struct * curr,struct lockdep_map * lock,unsigned long ip)2812 static int lock_release_nested(struct task_struct *curr,
2813 			       struct lockdep_map *lock, unsigned long ip)
2814 {
2815 	struct held_lock *hlock;
2816 	unsigned int depth;
2817 
2818 	/*
2819 	 * Pop off the top of the lock stack:
2820 	 */
2821 	depth = curr->lockdep_depth - 1;
2822 	hlock = curr->held_locks + depth;
2823 
2824 	/*
2825 	 * Is the unlock non-nested:
2826 	 */
2827 	if (hlock->instance != lock)
2828 		return lock_release_non_nested(curr, lock, ip);
2829 	curr->lockdep_depth--;
2830 
2831 	if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
2832 		return 0;
2833 
2834 	curr->curr_chain_key = hlock->prev_chain_key;
2835 
2836 	lock_release_holdtime(hlock);
2837 
2838 #ifdef CONFIG_DEBUG_LOCKDEP
2839 	hlock->prev_chain_key = 0;
2840 	hlock->class_idx = 0;
2841 	hlock->acquire_ip = 0;
2842 	hlock->irq_context = 0;
2843 #endif
2844 	return 1;
2845 }
2846 
2847 /*
2848  * Remove the lock to the list of currently held locks - this gets
2849  * called on mutex_unlock()/spin_unlock*() (or on a failed
2850  * mutex_lock_interruptible()). This is done for unlocks that nest
2851  * perfectly. (i.e. the current top of the lock-stack is unlocked)
2852  */
2853 static void
__lock_release(struct lockdep_map * lock,int nested,unsigned long ip)2854 __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
2855 {
2856 	struct task_struct *curr = current;
2857 
2858 	if (!check_unlock(curr, lock, ip))
2859 		return;
2860 
2861 	if (nested) {
2862 		if (!lock_release_nested(curr, lock, ip))
2863 			return;
2864 	} else {
2865 		if (!lock_release_non_nested(curr, lock, ip))
2866 			return;
2867 	}
2868 
2869 	check_chain_key(curr);
2870 }
2871 
2872 /*
2873  * Check whether we follow the irq-flags state precisely:
2874  */
check_flags(unsigned long flags)2875 static void check_flags(unsigned long flags)
2876 {
2877 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
2878     defined(CONFIG_TRACE_IRQFLAGS)
2879 	if (!debug_locks)
2880 		return;
2881 
2882 	if (irqs_disabled_flags(flags)) {
2883 		if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
2884 			printk("possible reason: unannotated irqs-off.\n");
2885 		}
2886 	} else {
2887 		if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
2888 			printk("possible reason: unannotated irqs-on.\n");
2889 		}
2890 	}
2891 
2892 	/*
2893 	 * We dont accurately track softirq state in e.g.
2894 	 * hardirq contexts (such as on 4KSTACKS), so only
2895 	 * check if not in hardirq contexts:
2896 	 */
2897 	if (!hardirq_count()) {
2898 		if (softirq_count())
2899 			DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
2900 		else
2901 			DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
2902 	}
2903 
2904 	if (!debug_locks)
2905 		print_irqtrace_events(current);
2906 #endif
2907 }
2908 
lock_set_class(struct lockdep_map * lock,const char * name,struct lock_class_key * key,unsigned int subclass,unsigned long ip)2909 void lock_set_class(struct lockdep_map *lock, const char *name,
2910 		    struct lock_class_key *key, unsigned int subclass,
2911 		    unsigned long ip)
2912 {
2913 	unsigned long flags;
2914 
2915 	if (unlikely(current->lockdep_recursion))
2916 		return;
2917 
2918 	raw_local_irq_save(flags);
2919 	current->lockdep_recursion = 1;
2920 	check_flags(flags);
2921 	if (__lock_set_class(lock, name, key, subclass, ip))
2922 		check_chain_key(current);
2923 	current->lockdep_recursion = 0;
2924 	raw_local_irq_restore(flags);
2925 }
2926 EXPORT_SYMBOL_GPL(lock_set_class);
2927 
2928 /*
2929  * We are not always called with irqs disabled - do that here,
2930  * and also avoid lockdep recursion:
2931  */
lock_acquire(struct lockdep_map * lock,unsigned int subclass,int trylock,int read,int check,struct lockdep_map * nest_lock,unsigned long ip)2932 void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2933 			  int trylock, int read, int check,
2934 			  struct lockdep_map *nest_lock, unsigned long ip)
2935 {
2936 	unsigned long flags;
2937 
2938 	if (unlikely(current->lockdep_recursion))
2939 		return;
2940 
2941 	raw_local_irq_save(flags);
2942 	check_flags(flags);
2943 
2944 	current->lockdep_recursion = 1;
2945 	__lock_acquire(lock, subclass, trylock, read, check,
2946 		       irqs_disabled_flags(flags), nest_lock, ip);
2947 	current->lockdep_recursion = 0;
2948 	raw_local_irq_restore(flags);
2949 }
2950 EXPORT_SYMBOL_GPL(lock_acquire);
2951 
lock_release(struct lockdep_map * lock,int nested,unsigned long ip)2952 void lock_release(struct lockdep_map *lock, int nested,
2953 			  unsigned long ip)
2954 {
2955 	unsigned long flags;
2956 
2957 	if (unlikely(current->lockdep_recursion))
2958 		return;
2959 
2960 	raw_local_irq_save(flags);
2961 	check_flags(flags);
2962 	current->lockdep_recursion = 1;
2963 	__lock_release(lock, nested, ip);
2964 	current->lockdep_recursion = 0;
2965 	raw_local_irq_restore(flags);
2966 }
2967 EXPORT_SYMBOL_GPL(lock_release);
2968 
2969 #ifdef CONFIG_LOCK_STAT
2970 static int
print_lock_contention_bug(struct task_struct * curr,struct lockdep_map * lock,unsigned long ip)2971 print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
2972 			   unsigned long ip)
2973 {
2974 	if (!debug_locks_off())
2975 		return 0;
2976 	if (debug_locks_silent)
2977 		return 0;
2978 
2979 	printk("\n=================================\n");
2980 	printk(  "[ BUG: bad contention detected! ]\n");
2981 	printk(  "---------------------------------\n");
2982 	printk("%s/%d is trying to contend lock (",
2983 		curr->comm, task_pid_nr(curr));
2984 	print_lockdep_cache(lock);
2985 	printk(") at:\n");
2986 	print_ip_sym(ip);
2987 	printk("but there are no locks held!\n");
2988 	printk("\nother info that might help us debug this:\n");
2989 	lockdep_print_held_locks(curr);
2990 
2991 	printk("\nstack backtrace:\n");
2992 	dump_stack();
2993 
2994 	return 0;
2995 }
2996 
2997 static void
__lock_contended(struct lockdep_map * lock,unsigned long ip)2998 __lock_contended(struct lockdep_map *lock, unsigned long ip)
2999 {
3000 	struct task_struct *curr = current;
3001 	struct held_lock *hlock, *prev_hlock;
3002 	struct lock_class_stats *stats;
3003 	unsigned int depth;
3004 	int i, contention_point, contending_point;
3005 
3006 	depth = curr->lockdep_depth;
3007 	if (DEBUG_LOCKS_WARN_ON(!depth))
3008 		return;
3009 
3010 	prev_hlock = NULL;
3011 	for (i = depth-1; i >= 0; i--) {
3012 		hlock = curr->held_locks + i;
3013 		/*
3014 		 * We must not cross into another context:
3015 		 */
3016 		if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3017 			break;
3018 		if (hlock->instance == lock)
3019 			goto found_it;
3020 		prev_hlock = hlock;
3021 	}
3022 	print_lock_contention_bug(curr, lock, ip);
3023 	return;
3024 
3025 found_it:
3026 	hlock->waittime_stamp = sched_clock();
3027 
3028 	contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
3029 	contending_point = lock_point(hlock_class(hlock)->contending_point,
3030 				      lock->ip);
3031 
3032 	stats = get_lock_stats(hlock_class(hlock));
3033 	if (contention_point < LOCKSTAT_POINTS)
3034 		stats->contention_point[contention_point]++;
3035 	if (contending_point < LOCKSTAT_POINTS)
3036 		stats->contending_point[contending_point]++;
3037 	if (lock->cpu != smp_processor_id())
3038 		stats->bounces[bounce_contended + !!hlock->read]++;
3039 	put_lock_stats(stats);
3040 }
3041 
3042 static void
__lock_acquired(struct lockdep_map * lock,unsigned long ip)3043 __lock_acquired(struct lockdep_map *lock, unsigned long ip)
3044 {
3045 	struct task_struct *curr = current;
3046 	struct held_lock *hlock, *prev_hlock;
3047 	struct lock_class_stats *stats;
3048 	unsigned int depth;
3049 	u64 now;
3050 	s64 waittime = 0;
3051 	int i, cpu;
3052 
3053 	depth = curr->lockdep_depth;
3054 	if (DEBUG_LOCKS_WARN_ON(!depth))
3055 		return;
3056 
3057 	prev_hlock = NULL;
3058 	for (i = depth-1; i >= 0; i--) {
3059 		hlock = curr->held_locks + i;
3060 		/*
3061 		 * We must not cross into another context:
3062 		 */
3063 		if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3064 			break;
3065 		if (hlock->instance == lock)
3066 			goto found_it;
3067 		prev_hlock = hlock;
3068 	}
3069 	print_lock_contention_bug(curr, lock, _RET_IP_);
3070 	return;
3071 
3072 found_it:
3073 	cpu = smp_processor_id();
3074 	if (hlock->waittime_stamp) {
3075 		now = sched_clock();
3076 		waittime = now - hlock->waittime_stamp;
3077 		hlock->holdtime_stamp = now;
3078 	}
3079 
3080 	stats = get_lock_stats(hlock_class(hlock));
3081 	if (waittime) {
3082 		if (hlock->read)
3083 			lock_time_inc(&stats->read_waittime, waittime);
3084 		else
3085 			lock_time_inc(&stats->write_waittime, waittime);
3086 	}
3087 	if (lock->cpu != cpu)
3088 		stats->bounces[bounce_acquired + !!hlock->read]++;
3089 	put_lock_stats(stats);
3090 
3091 	lock->cpu = cpu;
3092 	lock->ip = ip;
3093 }
3094 
lock_contended(struct lockdep_map * lock,unsigned long ip)3095 void lock_contended(struct lockdep_map *lock, unsigned long ip)
3096 {
3097 	unsigned long flags;
3098 
3099 	if (unlikely(!lock_stat))
3100 		return;
3101 
3102 	if (unlikely(current->lockdep_recursion))
3103 		return;
3104 
3105 	raw_local_irq_save(flags);
3106 	check_flags(flags);
3107 	current->lockdep_recursion = 1;
3108 	__lock_contended(lock, ip);
3109 	current->lockdep_recursion = 0;
3110 	raw_local_irq_restore(flags);
3111 }
3112 EXPORT_SYMBOL_GPL(lock_contended);
3113 
lock_acquired(struct lockdep_map * lock,unsigned long ip)3114 void lock_acquired(struct lockdep_map *lock, unsigned long ip)
3115 {
3116 	unsigned long flags;
3117 
3118 	if (unlikely(!lock_stat))
3119 		return;
3120 
3121 	if (unlikely(current->lockdep_recursion))
3122 		return;
3123 
3124 	raw_local_irq_save(flags);
3125 	check_flags(flags);
3126 	current->lockdep_recursion = 1;
3127 	__lock_acquired(lock, ip);
3128 	current->lockdep_recursion = 0;
3129 	raw_local_irq_restore(flags);
3130 }
3131 EXPORT_SYMBOL_GPL(lock_acquired);
3132 #endif
3133 
3134 /*
3135  * Used by the testsuite, sanitize the validator state
3136  * after a simulated failure:
3137  */
3138 
lockdep_reset(void)3139 void lockdep_reset(void)
3140 {
3141 	unsigned long flags;
3142 	int i;
3143 
3144 	raw_local_irq_save(flags);
3145 	current->curr_chain_key = 0;
3146 	current->lockdep_depth = 0;
3147 	current->lockdep_recursion = 0;
3148 	memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
3149 	nr_hardirq_chains = 0;
3150 	nr_softirq_chains = 0;
3151 	nr_process_chains = 0;
3152 	debug_locks = 1;
3153 	for (i = 0; i < CHAINHASH_SIZE; i++)
3154 		INIT_LIST_HEAD(chainhash_table + i);
3155 	raw_local_irq_restore(flags);
3156 }
3157 
zap_class(struct lock_class * class)3158 static void zap_class(struct lock_class *class)
3159 {
3160 	int i;
3161 
3162 	/*
3163 	 * Remove all dependencies this lock is
3164 	 * involved in:
3165 	 */
3166 	for (i = 0; i < nr_list_entries; i++) {
3167 		if (list_entries[i].class == class)
3168 			list_del_rcu(&list_entries[i].entry);
3169 	}
3170 	/*
3171 	 * Unhash the class and remove it from the all_lock_classes list:
3172 	 */
3173 	list_del_rcu(&class->hash_entry);
3174 	list_del_rcu(&class->lock_entry);
3175 
3176 	class->key = NULL;
3177 }
3178 
within(const void * addr,void * start,unsigned long size)3179 static inline int within(const void *addr, void *start, unsigned long size)
3180 {
3181 	return addr >= start && addr < start + size;
3182 }
3183 
lockdep_free_key_range(void * start,unsigned long size)3184 void lockdep_free_key_range(void *start, unsigned long size)
3185 {
3186 	struct lock_class *class, *next;
3187 	struct list_head *head;
3188 	unsigned long flags;
3189 	int i;
3190 	int locked;
3191 
3192 	raw_local_irq_save(flags);
3193 	locked = graph_lock();
3194 
3195 	/*
3196 	 * Unhash all classes that were created by this module:
3197 	 */
3198 	for (i = 0; i < CLASSHASH_SIZE; i++) {
3199 		head = classhash_table + i;
3200 		if (list_empty(head))
3201 			continue;
3202 		list_for_each_entry_safe(class, next, head, hash_entry) {
3203 			if (within(class->key, start, size))
3204 				zap_class(class);
3205 			else if (within(class->name, start, size))
3206 				zap_class(class);
3207 		}
3208 	}
3209 
3210 	if (locked)
3211 		graph_unlock();
3212 	raw_local_irq_restore(flags);
3213 }
3214 
lockdep_reset_lock(struct lockdep_map * lock)3215 void lockdep_reset_lock(struct lockdep_map *lock)
3216 {
3217 	struct lock_class *class, *next;
3218 	struct list_head *head;
3219 	unsigned long flags;
3220 	int i, j;
3221 	int locked;
3222 
3223 	raw_local_irq_save(flags);
3224 
3225 	/*
3226 	 * Remove all classes this lock might have:
3227 	 */
3228 	for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
3229 		/*
3230 		 * If the class exists we look it up and zap it:
3231 		 */
3232 		class = look_up_lock_class(lock, j);
3233 		if (class)
3234 			zap_class(class);
3235 	}
3236 	/*
3237 	 * Debug check: in the end all mapped classes should
3238 	 * be gone.
3239 	 */
3240 	locked = graph_lock();
3241 	for (i = 0; i < CLASSHASH_SIZE; i++) {
3242 		head = classhash_table + i;
3243 		if (list_empty(head))
3244 			continue;
3245 		list_for_each_entry_safe(class, next, head, hash_entry) {
3246 			if (unlikely(class == lock->class_cache)) {
3247 				if (debug_locks_off_graph_unlock())
3248 					WARN_ON(1);
3249 				goto out_restore;
3250 			}
3251 		}
3252 	}
3253 	if (locked)
3254 		graph_unlock();
3255 
3256 out_restore:
3257 	raw_local_irq_restore(flags);
3258 }
3259 
lockdep_init(void)3260 void lockdep_init(void)
3261 {
3262 	int i;
3263 
3264 	/*
3265 	 * Some architectures have their own start_kernel()
3266 	 * code which calls lockdep_init(), while we also
3267 	 * call lockdep_init() from the start_kernel() itself,
3268 	 * and we want to initialize the hashes only once:
3269 	 */
3270 	if (lockdep_initialized)
3271 		return;
3272 
3273 	for (i = 0; i < CLASSHASH_SIZE; i++)
3274 		INIT_LIST_HEAD(classhash_table + i);
3275 
3276 	for (i = 0; i < CHAINHASH_SIZE; i++)
3277 		INIT_LIST_HEAD(chainhash_table + i);
3278 
3279 	lockdep_initialized = 1;
3280 }
3281 
lockdep_info(void)3282 void __init lockdep_info(void)
3283 {
3284 	printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
3285 
3286 	printk("... MAX_LOCKDEP_SUBCLASSES:  %lu\n", MAX_LOCKDEP_SUBCLASSES);
3287 	printk("... MAX_LOCK_DEPTH:          %lu\n", MAX_LOCK_DEPTH);
3288 	printk("... MAX_LOCKDEP_KEYS:        %lu\n", MAX_LOCKDEP_KEYS);
3289 	printk("... CLASSHASH_SIZE:          %lu\n", CLASSHASH_SIZE);
3290 	printk("... MAX_LOCKDEP_ENTRIES:     %lu\n", MAX_LOCKDEP_ENTRIES);
3291 	printk("... MAX_LOCKDEP_CHAINS:      %lu\n", MAX_LOCKDEP_CHAINS);
3292 	printk("... CHAINHASH_SIZE:          %lu\n", CHAINHASH_SIZE);
3293 
3294 	printk(" memory used by lock dependency info: %lu kB\n",
3295 		(sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
3296 		sizeof(struct list_head) * CLASSHASH_SIZE +
3297 		sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
3298 		sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
3299 		sizeof(struct list_head) * CHAINHASH_SIZE) / 1024);
3300 
3301 	printk(" per task-struct memory footprint: %lu bytes\n",
3302 		sizeof(struct held_lock) * MAX_LOCK_DEPTH);
3303 
3304 #ifdef CONFIG_DEBUG_LOCKDEP
3305 	if (lockdep_init_error) {
3306 		printk("WARNING: lockdep init error! Arch code didn't call lockdep_init() early enough?\n");
3307 		printk("Call stack leading to lockdep invocation was:\n");
3308 		print_stack_trace(&lockdep_init_trace, 0);
3309 	}
3310 #endif
3311 }
3312 
3313 static void
print_freed_lock_bug(struct task_struct * curr,const void * mem_from,const void * mem_to,struct held_lock * hlock)3314 print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
3315 		     const void *mem_to, struct held_lock *hlock)
3316 {
3317 	if (!debug_locks_off())
3318 		return;
3319 	if (debug_locks_silent)
3320 		return;
3321 
3322 	printk("\n=========================\n");
3323 	printk(  "[ BUG: held lock freed! ]\n");
3324 	printk(  "-------------------------\n");
3325 	printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
3326 		curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
3327 	print_lock(hlock);
3328 	lockdep_print_held_locks(curr);
3329 
3330 	printk("\nstack backtrace:\n");
3331 	dump_stack();
3332 }
3333 
not_in_range(const void * mem_from,unsigned long mem_len,const void * lock_from,unsigned long lock_len)3334 static inline int not_in_range(const void* mem_from, unsigned long mem_len,
3335 				const void* lock_from, unsigned long lock_len)
3336 {
3337 	return lock_from + lock_len <= mem_from ||
3338 		mem_from + mem_len <= lock_from;
3339 }
3340 
3341 /*
3342  * Called when kernel memory is freed (or unmapped), or if a lock
3343  * is destroyed or reinitialized - this code checks whether there is
3344  * any held lock in the memory range of <from> to <to>:
3345  */
debug_check_no_locks_freed(const void * mem_from,unsigned long mem_len)3346 void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
3347 {
3348 	struct task_struct *curr = current;
3349 	struct held_lock *hlock;
3350 	unsigned long flags;
3351 	int i;
3352 
3353 	if (unlikely(!debug_locks))
3354 		return;
3355 
3356 	local_irq_save(flags);
3357 	for (i = 0; i < curr->lockdep_depth; i++) {
3358 		hlock = curr->held_locks + i;
3359 
3360 		if (not_in_range(mem_from, mem_len, hlock->instance,
3361 					sizeof(*hlock->instance)))
3362 			continue;
3363 
3364 		print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
3365 		break;
3366 	}
3367 	local_irq_restore(flags);
3368 }
3369 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
3370 
print_held_locks_bug(struct task_struct * curr)3371 static void print_held_locks_bug(struct task_struct *curr)
3372 {
3373 	if (!debug_locks_off())
3374 		return;
3375 	if (debug_locks_silent)
3376 		return;
3377 
3378 	printk("\n=====================================\n");
3379 	printk(  "[ BUG: lock held at task exit time! ]\n");
3380 	printk(  "-------------------------------------\n");
3381 	printk("%s/%d is exiting with locks still held!\n",
3382 		curr->comm, task_pid_nr(curr));
3383 	lockdep_print_held_locks(curr);
3384 
3385 	printk("\nstack backtrace:\n");
3386 	dump_stack();
3387 }
3388 
debug_check_no_locks_held(struct task_struct * task)3389 void debug_check_no_locks_held(struct task_struct *task)
3390 {
3391 	if (unlikely(task->lockdep_depth > 0))
3392 		print_held_locks_bug(task);
3393 }
3394 
debug_show_all_locks(void)3395 void debug_show_all_locks(void)
3396 {
3397 	struct task_struct *g, *p;
3398 	int count = 10;
3399 	int unlock = 1;
3400 
3401 	if (unlikely(!debug_locks)) {
3402 		printk("INFO: lockdep is turned off.\n");
3403 		return;
3404 	}
3405 	printk("\nShowing all locks held in the system:\n");
3406 
3407 	/*
3408 	 * Here we try to get the tasklist_lock as hard as possible,
3409 	 * if not successful after 2 seconds we ignore it (but keep
3410 	 * trying). This is to enable a debug printout even if a
3411 	 * tasklist_lock-holding task deadlocks or crashes.
3412 	 */
3413 retry:
3414 	if (!read_trylock(&tasklist_lock)) {
3415 		if (count == 10)
3416 			printk("hm, tasklist_lock locked, retrying... ");
3417 		if (count) {
3418 			count--;
3419 			printk(" #%d", 10-count);
3420 			mdelay(200);
3421 			goto retry;
3422 		}
3423 		printk(" ignoring it.\n");
3424 		unlock = 0;
3425 	} else {
3426 		if (count != 10)
3427 			printk(KERN_CONT " locked it.\n");
3428 	}
3429 
3430 	do_each_thread(g, p) {
3431 		/*
3432 		 * It's not reliable to print a task's held locks
3433 		 * if it's not sleeping (or if it's not the current
3434 		 * task):
3435 		 */
3436 		if (p->state == TASK_RUNNING && p != current)
3437 			continue;
3438 		if (p->lockdep_depth)
3439 			lockdep_print_held_locks(p);
3440 		if (!unlock)
3441 			if (read_trylock(&tasklist_lock))
3442 				unlock = 1;
3443 	} while_each_thread(g, p);
3444 
3445 	printk("\n");
3446 	printk("=============================================\n\n");
3447 
3448 	if (unlock)
3449 		read_unlock(&tasklist_lock);
3450 }
3451 EXPORT_SYMBOL_GPL(debug_show_all_locks);
3452 
3453 /*
3454  * Careful: only use this function if you are sure that
3455  * the task cannot run in parallel!
3456  */
__debug_show_held_locks(struct task_struct * task)3457 void __debug_show_held_locks(struct task_struct *task)
3458 {
3459 	if (unlikely(!debug_locks)) {
3460 		printk("INFO: lockdep is turned off.\n");
3461 		return;
3462 	}
3463 	lockdep_print_held_locks(task);
3464 }
3465 EXPORT_SYMBOL_GPL(__debug_show_held_locks);
3466 
debug_show_held_locks(struct task_struct * task)3467 void debug_show_held_locks(struct task_struct *task)
3468 {
3469 		__debug_show_held_locks(task);
3470 }
3471 EXPORT_SYMBOL_GPL(debug_show_held_locks);
3472 
lockdep_sys_exit(void)3473 void lockdep_sys_exit(void)
3474 {
3475 	struct task_struct *curr = current;
3476 
3477 	if (unlikely(curr->lockdep_depth)) {
3478 		if (!debug_locks_off())
3479 			return;
3480 		printk("\n================================================\n");
3481 		printk(  "[ BUG: lock held when returning to user space! ]\n");
3482 		printk(  "------------------------------------------------\n");
3483 		printk("%s/%d is leaving the kernel with locks still held!\n",
3484 				curr->comm, curr->pid);
3485 		lockdep_print_held_locks(curr);
3486 	}
3487 }
3488