• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Generic infrastructure for lifetime debugging of objects.
3  *
4  * Started by Thomas Gleixner
5  *
6  * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
7  *
8  * For licencing details see kernel-base/COPYING
9  */
10 #include <linux/debugobjects.h>
11 #include <linux/interrupt.h>
12 #include <linux/seq_file.h>
13 #include <linux/debugfs.h>
14 #include <linux/hash.h>
15 
16 #define ODEBUG_HASH_BITS	14
17 #define ODEBUG_HASH_SIZE	(1 << ODEBUG_HASH_BITS)
18 
19 #define ODEBUG_POOL_SIZE	512
20 #define ODEBUG_POOL_MIN_LEVEL	256
21 
22 #define ODEBUG_CHUNK_SHIFT	PAGE_SHIFT
23 #define ODEBUG_CHUNK_SIZE	(1 << ODEBUG_CHUNK_SHIFT)
24 #define ODEBUG_CHUNK_MASK	(~(ODEBUG_CHUNK_SIZE - 1))
25 
26 struct debug_bucket {
27 	struct hlist_head	list;
28 	spinlock_t		lock;
29 };
30 
31 static struct debug_bucket	obj_hash[ODEBUG_HASH_SIZE];
32 
33 static struct debug_obj		obj_static_pool[ODEBUG_POOL_SIZE];
34 
35 static DEFINE_SPINLOCK(pool_lock);
36 
37 static HLIST_HEAD(obj_pool);
38 
39 static int			obj_pool_min_free = ODEBUG_POOL_SIZE;
40 static int			obj_pool_free = ODEBUG_POOL_SIZE;
41 static int			obj_pool_used;
42 static int			obj_pool_max_used;
43 static struct kmem_cache	*obj_cache;
44 
45 static int			debug_objects_maxchain __read_mostly;
46 static int			debug_objects_fixups __read_mostly;
47 static int			debug_objects_warnings __read_mostly;
48 static int			debug_objects_enabled __read_mostly
49 				= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
50 
51 static struct debug_obj_descr	*descr_test  __read_mostly;
52 
enable_object_debug(char * str)53 static int __init enable_object_debug(char *str)
54 {
55 	debug_objects_enabled = 1;
56 	return 0;
57 }
58 early_param("debug_objects", enable_object_debug);
59 
60 static const char *obj_states[ODEBUG_STATE_MAX] = {
61 	[ODEBUG_STATE_NONE]		= "none",
62 	[ODEBUG_STATE_INIT]		= "initialized",
63 	[ODEBUG_STATE_INACTIVE]		= "inactive",
64 	[ODEBUG_STATE_ACTIVE]		= "active",
65 	[ODEBUG_STATE_DESTROYED]	= "destroyed",
66 	[ODEBUG_STATE_NOTAVAILABLE]	= "not available",
67 };
68 
fill_pool(void)69 static int fill_pool(void)
70 {
71 	gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
72 	struct debug_obj *new;
73 	unsigned long flags;
74 
75 	if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
76 		return obj_pool_free;
77 
78 	if (unlikely(!obj_cache))
79 		return obj_pool_free;
80 
81 	while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) {
82 
83 		new = kmem_cache_zalloc(obj_cache, gfp);
84 		if (!new)
85 			return obj_pool_free;
86 
87 		spin_lock_irqsave(&pool_lock, flags);
88 		hlist_add_head(&new->node, &obj_pool);
89 		obj_pool_free++;
90 		spin_unlock_irqrestore(&pool_lock, flags);
91 	}
92 	return obj_pool_free;
93 }
94 
95 /*
96  * Lookup an object in the hash bucket.
97  */
lookup_object(void * addr,struct debug_bucket * b)98 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
99 {
100 	struct hlist_node *node;
101 	struct debug_obj *obj;
102 	int cnt = 0;
103 
104 	hlist_for_each_entry(obj, node, &b->list, node) {
105 		cnt++;
106 		if (obj->object == addr)
107 			return obj;
108 	}
109 	if (cnt > debug_objects_maxchain)
110 		debug_objects_maxchain = cnt;
111 
112 	return NULL;
113 }
114 
115 /*
116  * Allocate a new object. If the pool is empty, switch off the debugger.
117  * Must be called with interrupts disabled.
118  */
119 static struct debug_obj *
alloc_object(void * addr,struct debug_bucket * b,struct debug_obj_descr * descr)120 alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
121 {
122 	struct debug_obj *obj = NULL;
123 
124 	spin_lock(&pool_lock);
125 	if (obj_pool.first) {
126 		obj	    = hlist_entry(obj_pool.first, typeof(*obj), node);
127 
128 		obj->object = addr;
129 		obj->descr  = descr;
130 		obj->state  = ODEBUG_STATE_NONE;
131 		hlist_del(&obj->node);
132 
133 		hlist_add_head(&obj->node, &b->list);
134 
135 		obj_pool_used++;
136 		if (obj_pool_used > obj_pool_max_used)
137 			obj_pool_max_used = obj_pool_used;
138 
139 		obj_pool_free--;
140 		if (obj_pool_free < obj_pool_min_free)
141 			obj_pool_min_free = obj_pool_free;
142 	}
143 	spin_unlock(&pool_lock);
144 
145 	return obj;
146 }
147 
148 /*
149  * Put the object back into the pool or give it back to kmem_cache:
150  */
free_object(struct debug_obj * obj)151 static void free_object(struct debug_obj *obj)
152 {
153 	unsigned long idx = (unsigned long)(obj - obj_static_pool);
154 	unsigned long flags;
155 
156 	if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) {
157 		spin_lock_irqsave(&pool_lock, flags);
158 		hlist_add_head(&obj->node, &obj_pool);
159 		obj_pool_free++;
160 		obj_pool_used--;
161 		spin_unlock_irqrestore(&pool_lock, flags);
162 	} else {
163 		spin_lock_irqsave(&pool_lock, flags);
164 		obj_pool_used--;
165 		spin_unlock_irqrestore(&pool_lock, flags);
166 		kmem_cache_free(obj_cache, obj);
167 	}
168 }
169 
170 /*
171  * We run out of memory. That means we probably have tons of objects
172  * allocated.
173  */
debug_objects_oom(void)174 static void debug_objects_oom(void)
175 {
176 	struct debug_bucket *db = obj_hash;
177 	struct hlist_node *node, *tmp;
178 	HLIST_HEAD(freelist);
179 	struct debug_obj *obj;
180 	unsigned long flags;
181 	int i;
182 
183 	printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
184 
185 	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
186 		spin_lock_irqsave(&db->lock, flags);
187 		hlist_move_list(&db->list, &freelist);
188 		spin_unlock_irqrestore(&db->lock, flags);
189 
190 		/* Now free them */
191 		hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
192 			hlist_del(&obj->node);
193 			free_object(obj);
194 		}
195 	}
196 }
197 
198 /*
199  * We use the pfn of the address for the hash. That way we can check
200  * for freed objects simply by checking the affected bucket.
201  */
get_bucket(unsigned long addr)202 static struct debug_bucket *get_bucket(unsigned long addr)
203 {
204 	unsigned long hash;
205 
206 	hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
207 	return &obj_hash[hash];
208 }
209 
debug_print_object(struct debug_obj * obj,char * msg)210 static void debug_print_object(struct debug_obj *obj, char *msg)
211 {
212 	static int limit;
213 
214 	if (limit < 5 && obj->descr != descr_test) {
215 		limit++;
216 		WARN(1, KERN_ERR "ODEBUG: %s %s object type: %s\n", msg,
217 		       obj_states[obj->state], obj->descr->name);
218 	}
219 	debug_objects_warnings++;
220 }
221 
222 /*
223  * Try to repair the damage, so we have a better chance to get useful
224  * debug output.
225  */
226 static void
debug_object_fixup(int (* fixup)(void * addr,enum debug_obj_state state),void * addr,enum debug_obj_state state)227 debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state),
228 		   void * addr, enum debug_obj_state state)
229 {
230 	if (fixup)
231 		debug_objects_fixups += fixup(addr, state);
232 }
233 
debug_object_is_on_stack(void * addr,int onstack)234 static void debug_object_is_on_stack(void *addr, int onstack)
235 {
236 	int is_on_stack;
237 	static int limit;
238 
239 	if (limit > 4)
240 		return;
241 
242 	is_on_stack = object_is_on_stack(addr);
243 	if (is_on_stack == onstack)
244 		return;
245 
246 	limit++;
247 	if (is_on_stack)
248 		printk(KERN_WARNING
249 		       "ODEBUG: object is on stack, but not annotated\n");
250 	else
251 		printk(KERN_WARNING
252 		       "ODEBUG: object is not on stack, but annotated\n");
253 	WARN_ON(1);
254 }
255 
256 static void
__debug_object_init(void * addr,struct debug_obj_descr * descr,int onstack)257 __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
258 {
259 	enum debug_obj_state state;
260 	struct debug_bucket *db;
261 	struct debug_obj *obj;
262 	unsigned long flags;
263 
264 	fill_pool();
265 
266 	db = get_bucket((unsigned long) addr);
267 
268 	spin_lock_irqsave(&db->lock, flags);
269 
270 	obj = lookup_object(addr, db);
271 	if (!obj) {
272 		obj = alloc_object(addr, db, descr);
273 		if (!obj) {
274 			debug_objects_enabled = 0;
275 			spin_unlock_irqrestore(&db->lock, flags);
276 			debug_objects_oom();
277 			return;
278 		}
279 		debug_object_is_on_stack(addr, onstack);
280 	}
281 
282 	switch (obj->state) {
283 	case ODEBUG_STATE_NONE:
284 	case ODEBUG_STATE_INIT:
285 	case ODEBUG_STATE_INACTIVE:
286 		obj->state = ODEBUG_STATE_INIT;
287 		break;
288 
289 	case ODEBUG_STATE_ACTIVE:
290 		debug_print_object(obj, "init");
291 		state = obj->state;
292 		spin_unlock_irqrestore(&db->lock, flags);
293 		debug_object_fixup(descr->fixup_init, addr, state);
294 		return;
295 
296 	case ODEBUG_STATE_DESTROYED:
297 		debug_print_object(obj, "init");
298 		break;
299 	default:
300 		break;
301 	}
302 
303 	spin_unlock_irqrestore(&db->lock, flags);
304 }
305 
306 /**
307  * debug_object_init - debug checks when an object is initialized
308  * @addr:	address of the object
309  * @descr:	pointer to an object specific debug description structure
310  */
debug_object_init(void * addr,struct debug_obj_descr * descr)311 void debug_object_init(void *addr, struct debug_obj_descr *descr)
312 {
313 	if (!debug_objects_enabled)
314 		return;
315 
316 	__debug_object_init(addr, descr, 0);
317 }
318 
319 /**
320  * debug_object_init_on_stack - debug checks when an object on stack is
321  *				initialized
322  * @addr:	address of the object
323  * @descr:	pointer to an object specific debug description structure
324  */
debug_object_init_on_stack(void * addr,struct debug_obj_descr * descr)325 void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
326 {
327 	if (!debug_objects_enabled)
328 		return;
329 
330 	__debug_object_init(addr, descr, 1);
331 }
332 
333 /**
334  * debug_object_activate - debug checks when an object is activated
335  * @addr:	address of the object
336  * @descr:	pointer to an object specific debug description structure
337  */
debug_object_activate(void * addr,struct debug_obj_descr * descr)338 void debug_object_activate(void *addr, struct debug_obj_descr *descr)
339 {
340 	enum debug_obj_state state;
341 	struct debug_bucket *db;
342 	struct debug_obj *obj;
343 	unsigned long flags;
344 
345 	if (!debug_objects_enabled)
346 		return;
347 
348 	db = get_bucket((unsigned long) addr);
349 
350 	spin_lock_irqsave(&db->lock, flags);
351 
352 	obj = lookup_object(addr, db);
353 	if (obj) {
354 		switch (obj->state) {
355 		case ODEBUG_STATE_INIT:
356 		case ODEBUG_STATE_INACTIVE:
357 			obj->state = ODEBUG_STATE_ACTIVE;
358 			break;
359 
360 		case ODEBUG_STATE_ACTIVE:
361 			debug_print_object(obj, "activate");
362 			state = obj->state;
363 			spin_unlock_irqrestore(&db->lock, flags);
364 			debug_object_fixup(descr->fixup_activate, addr, state);
365 			return;
366 
367 		case ODEBUG_STATE_DESTROYED:
368 			debug_print_object(obj, "activate");
369 			break;
370 		default:
371 			break;
372 		}
373 		spin_unlock_irqrestore(&db->lock, flags);
374 		return;
375 	}
376 
377 	spin_unlock_irqrestore(&db->lock, flags);
378 	/*
379 	 * This happens when a static object is activated. We
380 	 * let the type specific code decide whether this is
381 	 * true or not.
382 	 */
383 	debug_object_fixup(descr->fixup_activate, addr,
384 			   ODEBUG_STATE_NOTAVAILABLE);
385 }
386 
387 /**
388  * debug_object_deactivate - debug checks when an object is deactivated
389  * @addr:	address of the object
390  * @descr:	pointer to an object specific debug description structure
391  */
debug_object_deactivate(void * addr,struct debug_obj_descr * descr)392 void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
393 {
394 	struct debug_bucket *db;
395 	struct debug_obj *obj;
396 	unsigned long flags;
397 
398 	if (!debug_objects_enabled)
399 		return;
400 
401 	db = get_bucket((unsigned long) addr);
402 
403 	spin_lock_irqsave(&db->lock, flags);
404 
405 	obj = lookup_object(addr, db);
406 	if (obj) {
407 		switch (obj->state) {
408 		case ODEBUG_STATE_INIT:
409 		case ODEBUG_STATE_INACTIVE:
410 		case ODEBUG_STATE_ACTIVE:
411 			obj->state = ODEBUG_STATE_INACTIVE;
412 			break;
413 
414 		case ODEBUG_STATE_DESTROYED:
415 			debug_print_object(obj, "deactivate");
416 			break;
417 		default:
418 			break;
419 		}
420 	} else {
421 		struct debug_obj o = { .object = addr,
422 				       .state = ODEBUG_STATE_NOTAVAILABLE,
423 				       .descr = descr };
424 
425 		debug_print_object(&o, "deactivate");
426 	}
427 
428 	spin_unlock_irqrestore(&db->lock, flags);
429 }
430 
431 /**
432  * debug_object_destroy - debug checks when an object is destroyed
433  * @addr:	address of the object
434  * @descr:	pointer to an object specific debug description structure
435  */
debug_object_destroy(void * addr,struct debug_obj_descr * descr)436 void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
437 {
438 	enum debug_obj_state state;
439 	struct debug_bucket *db;
440 	struct debug_obj *obj;
441 	unsigned long flags;
442 
443 	if (!debug_objects_enabled)
444 		return;
445 
446 	db = get_bucket((unsigned long) addr);
447 
448 	spin_lock_irqsave(&db->lock, flags);
449 
450 	obj = lookup_object(addr, db);
451 	if (!obj)
452 		goto out_unlock;
453 
454 	switch (obj->state) {
455 	case ODEBUG_STATE_NONE:
456 	case ODEBUG_STATE_INIT:
457 	case ODEBUG_STATE_INACTIVE:
458 		obj->state = ODEBUG_STATE_DESTROYED;
459 		break;
460 	case ODEBUG_STATE_ACTIVE:
461 		debug_print_object(obj, "destroy");
462 		state = obj->state;
463 		spin_unlock_irqrestore(&db->lock, flags);
464 		debug_object_fixup(descr->fixup_destroy, addr, state);
465 		return;
466 
467 	case ODEBUG_STATE_DESTROYED:
468 		debug_print_object(obj, "destroy");
469 		break;
470 	default:
471 		break;
472 	}
473 out_unlock:
474 	spin_unlock_irqrestore(&db->lock, flags);
475 }
476 
477 /**
478  * debug_object_free - debug checks when an object is freed
479  * @addr:	address of the object
480  * @descr:	pointer to an object specific debug description structure
481  */
debug_object_free(void * addr,struct debug_obj_descr * descr)482 void debug_object_free(void *addr, struct debug_obj_descr *descr)
483 {
484 	enum debug_obj_state state;
485 	struct debug_bucket *db;
486 	struct debug_obj *obj;
487 	unsigned long flags;
488 
489 	if (!debug_objects_enabled)
490 		return;
491 
492 	db = get_bucket((unsigned long) addr);
493 
494 	spin_lock_irqsave(&db->lock, flags);
495 
496 	obj = lookup_object(addr, db);
497 	if (!obj)
498 		goto out_unlock;
499 
500 	switch (obj->state) {
501 	case ODEBUG_STATE_ACTIVE:
502 		debug_print_object(obj, "free");
503 		state = obj->state;
504 		spin_unlock_irqrestore(&db->lock, flags);
505 		debug_object_fixup(descr->fixup_free, addr, state);
506 		return;
507 	default:
508 		hlist_del(&obj->node);
509 		spin_unlock_irqrestore(&db->lock, flags);
510 		free_object(obj);
511 		return;
512 	}
513 out_unlock:
514 	spin_unlock_irqrestore(&db->lock, flags);
515 }
516 
517 #ifdef CONFIG_DEBUG_OBJECTS_FREE
__debug_check_no_obj_freed(const void * address,unsigned long size)518 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
519 {
520 	unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
521 	struct hlist_node *node, *tmp;
522 	HLIST_HEAD(freelist);
523 	struct debug_obj_descr *descr;
524 	enum debug_obj_state state;
525 	struct debug_bucket *db;
526 	struct debug_obj *obj;
527 	int cnt;
528 
529 	saddr = (unsigned long) address;
530 	eaddr = saddr + size;
531 	paddr = saddr & ODEBUG_CHUNK_MASK;
532 	chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
533 	chunks >>= ODEBUG_CHUNK_SHIFT;
534 
535 	for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
536 		db = get_bucket(paddr);
537 
538 repeat:
539 		cnt = 0;
540 		spin_lock_irqsave(&db->lock, flags);
541 		hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
542 			cnt++;
543 			oaddr = (unsigned long) obj->object;
544 			if (oaddr < saddr || oaddr >= eaddr)
545 				continue;
546 
547 			switch (obj->state) {
548 			case ODEBUG_STATE_ACTIVE:
549 				debug_print_object(obj, "free");
550 				descr = obj->descr;
551 				state = obj->state;
552 				spin_unlock_irqrestore(&db->lock, flags);
553 				debug_object_fixup(descr->fixup_free,
554 						   (void *) oaddr, state);
555 				goto repeat;
556 			default:
557 				hlist_del(&obj->node);
558 				hlist_add_head(&obj->node, &freelist);
559 				break;
560 			}
561 		}
562 		spin_unlock_irqrestore(&db->lock, flags);
563 
564 		/* Now free them */
565 		hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
566 			hlist_del(&obj->node);
567 			free_object(obj);
568 		}
569 
570 		if (cnt > debug_objects_maxchain)
571 			debug_objects_maxchain = cnt;
572 	}
573 }
574 
debug_check_no_obj_freed(const void * address,unsigned long size)575 void debug_check_no_obj_freed(const void *address, unsigned long size)
576 {
577 	if (debug_objects_enabled)
578 		__debug_check_no_obj_freed(address, size);
579 }
580 #endif
581 
582 #ifdef CONFIG_DEBUG_FS
583 
debug_stats_show(struct seq_file * m,void * v)584 static int debug_stats_show(struct seq_file *m, void *v)
585 {
586 	seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
587 	seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
588 	seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
589 	seq_printf(m, "pool_free     :%d\n", obj_pool_free);
590 	seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
591 	seq_printf(m, "pool_used     :%d\n", obj_pool_used);
592 	seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
593 	return 0;
594 }
595 
debug_stats_open(struct inode * inode,struct file * filp)596 static int debug_stats_open(struct inode *inode, struct file *filp)
597 {
598 	return single_open(filp, debug_stats_show, NULL);
599 }
600 
601 static const struct file_operations debug_stats_fops = {
602 	.open		= debug_stats_open,
603 	.read		= seq_read,
604 	.llseek		= seq_lseek,
605 	.release	= single_release,
606 };
607 
debug_objects_init_debugfs(void)608 static int __init debug_objects_init_debugfs(void)
609 {
610 	struct dentry *dbgdir, *dbgstats;
611 
612 	if (!debug_objects_enabled)
613 		return 0;
614 
615 	dbgdir = debugfs_create_dir("debug_objects", NULL);
616 	if (!dbgdir)
617 		return -ENOMEM;
618 
619 	dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
620 				       &debug_stats_fops);
621 	if (!dbgstats)
622 		goto err;
623 
624 	return 0;
625 
626 err:
627 	debugfs_remove(dbgdir);
628 
629 	return -ENOMEM;
630 }
631 __initcall(debug_objects_init_debugfs);
632 
633 #else
debug_objects_init_debugfs(void)634 static inline void debug_objects_init_debugfs(void) { }
635 #endif
636 
637 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
638 
639 /* Random data structure for the self test */
640 struct self_test {
641 	unsigned long	dummy1[6];
642 	int		static_init;
643 	unsigned long	dummy2[3];
644 };
645 
646 static __initdata struct debug_obj_descr descr_type_test;
647 
648 /*
649  * fixup_init is called when:
650  * - an active object is initialized
651  */
fixup_init(void * addr,enum debug_obj_state state)652 static int __init fixup_init(void *addr, enum debug_obj_state state)
653 {
654 	struct self_test *obj = addr;
655 
656 	switch (state) {
657 	case ODEBUG_STATE_ACTIVE:
658 		debug_object_deactivate(obj, &descr_type_test);
659 		debug_object_init(obj, &descr_type_test);
660 		return 1;
661 	default:
662 		return 0;
663 	}
664 }
665 
666 /*
667  * fixup_activate is called when:
668  * - an active object is activated
669  * - an unknown object is activated (might be a statically initialized object)
670  */
fixup_activate(void * addr,enum debug_obj_state state)671 static int __init fixup_activate(void *addr, enum debug_obj_state state)
672 {
673 	struct self_test *obj = addr;
674 
675 	switch (state) {
676 	case ODEBUG_STATE_NOTAVAILABLE:
677 		if (obj->static_init == 1) {
678 			debug_object_init(obj, &descr_type_test);
679 			debug_object_activate(obj, &descr_type_test);
680 			/*
681 			 * Real code should return 0 here ! This is
682 			 * not a fixup of some bad behaviour. We
683 			 * merily call the debug_init function to keep
684 			 * track of the object.
685 			 */
686 			return 1;
687 		} else {
688 			/* Real code needs to emit a warning here */
689 		}
690 		return 0;
691 
692 	case ODEBUG_STATE_ACTIVE:
693 		debug_object_deactivate(obj, &descr_type_test);
694 		debug_object_activate(obj, &descr_type_test);
695 		return 1;
696 
697 	default:
698 		return 0;
699 	}
700 }
701 
702 /*
703  * fixup_destroy is called when:
704  * - an active object is destroyed
705  */
fixup_destroy(void * addr,enum debug_obj_state state)706 static int __init fixup_destroy(void *addr, enum debug_obj_state state)
707 {
708 	struct self_test *obj = addr;
709 
710 	switch (state) {
711 	case ODEBUG_STATE_ACTIVE:
712 		debug_object_deactivate(obj, &descr_type_test);
713 		debug_object_destroy(obj, &descr_type_test);
714 		return 1;
715 	default:
716 		return 0;
717 	}
718 }
719 
720 /*
721  * fixup_free is called when:
722  * - an active object is freed
723  */
fixup_free(void * addr,enum debug_obj_state state)724 static int __init fixup_free(void *addr, enum debug_obj_state state)
725 {
726 	struct self_test *obj = addr;
727 
728 	switch (state) {
729 	case ODEBUG_STATE_ACTIVE:
730 		debug_object_deactivate(obj, &descr_type_test);
731 		debug_object_free(obj, &descr_type_test);
732 		return 1;
733 	default:
734 		return 0;
735 	}
736 }
737 
738 static int
check_results(void * addr,enum debug_obj_state state,int fixups,int warnings)739 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
740 {
741 	struct debug_bucket *db;
742 	struct debug_obj *obj;
743 	unsigned long flags;
744 	int res = -EINVAL;
745 
746 	db = get_bucket((unsigned long) addr);
747 
748 	spin_lock_irqsave(&db->lock, flags);
749 
750 	obj = lookup_object(addr, db);
751 	if (!obj && state != ODEBUG_STATE_NONE) {
752 		WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
753 		goto out;
754 	}
755 	if (obj && obj->state != state) {
756 		WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
757 		       obj->state, state);
758 		goto out;
759 	}
760 	if (fixups != debug_objects_fixups) {
761 		WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
762 		       fixups, debug_objects_fixups);
763 		goto out;
764 	}
765 	if (warnings != debug_objects_warnings) {
766 		WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
767 		       warnings, debug_objects_warnings);
768 		goto out;
769 	}
770 	res = 0;
771 out:
772 	spin_unlock_irqrestore(&db->lock, flags);
773 	if (res)
774 		debug_objects_enabled = 0;
775 	return res;
776 }
777 
778 static __initdata struct debug_obj_descr descr_type_test = {
779 	.name			= "selftest",
780 	.fixup_init		= fixup_init,
781 	.fixup_activate		= fixup_activate,
782 	.fixup_destroy		= fixup_destroy,
783 	.fixup_free		= fixup_free,
784 };
785 
786 static __initdata struct self_test obj = { .static_init = 0 };
787 
debug_objects_selftest(void)788 static void __init debug_objects_selftest(void)
789 {
790 	int fixups, oldfixups, warnings, oldwarnings;
791 	unsigned long flags;
792 
793 	local_irq_save(flags);
794 
795 	fixups = oldfixups = debug_objects_fixups;
796 	warnings = oldwarnings = debug_objects_warnings;
797 	descr_test = &descr_type_test;
798 
799 	debug_object_init(&obj, &descr_type_test);
800 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
801 		goto out;
802 	debug_object_activate(&obj, &descr_type_test);
803 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
804 		goto out;
805 	debug_object_activate(&obj, &descr_type_test);
806 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
807 		goto out;
808 	debug_object_deactivate(&obj, &descr_type_test);
809 	if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
810 		goto out;
811 	debug_object_destroy(&obj, &descr_type_test);
812 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
813 		goto out;
814 	debug_object_init(&obj, &descr_type_test);
815 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
816 		goto out;
817 	debug_object_activate(&obj, &descr_type_test);
818 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
819 		goto out;
820 	debug_object_deactivate(&obj, &descr_type_test);
821 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
822 		goto out;
823 	debug_object_free(&obj, &descr_type_test);
824 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
825 		goto out;
826 
827 	obj.static_init = 1;
828 	debug_object_activate(&obj, &descr_type_test);
829 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, warnings))
830 		goto out;
831 	debug_object_init(&obj, &descr_type_test);
832 	if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
833 		goto out;
834 	debug_object_free(&obj, &descr_type_test);
835 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
836 		goto out;
837 
838 #ifdef CONFIG_DEBUG_OBJECTS_FREE
839 	debug_object_init(&obj, &descr_type_test);
840 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
841 		goto out;
842 	debug_object_activate(&obj, &descr_type_test);
843 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
844 		goto out;
845 	__debug_check_no_obj_freed(&obj, sizeof(obj));
846 	if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
847 		goto out;
848 #endif
849 	printk(KERN_INFO "ODEBUG: selftest passed\n");
850 
851 out:
852 	debug_objects_fixups = oldfixups;
853 	debug_objects_warnings = oldwarnings;
854 	descr_test = NULL;
855 
856 	local_irq_restore(flags);
857 }
858 #else
debug_objects_selftest(void)859 static inline void debug_objects_selftest(void) { }
860 #endif
861 
862 /*
863  * Called during early boot to initialize the hash buckets and link
864  * the static object pool objects into the poll list. After this call
865  * the object tracker is fully operational.
866  */
debug_objects_early_init(void)867 void __init debug_objects_early_init(void)
868 {
869 	int i;
870 
871 	for (i = 0; i < ODEBUG_HASH_SIZE; i++)
872 		spin_lock_init(&obj_hash[i].lock);
873 
874 	for (i = 0; i < ODEBUG_POOL_SIZE; i++)
875 		hlist_add_head(&obj_static_pool[i].node, &obj_pool);
876 }
877 
878 /*
879  * Called after the kmem_caches are functional to setup a dedicated
880  * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
881  * prevents that the debug code is called on kmem_cache_free() for the
882  * debug tracker objects to avoid recursive calls.
883  */
debug_objects_mem_init(void)884 void __init debug_objects_mem_init(void)
885 {
886 	if (!debug_objects_enabled)
887 		return;
888 
889 	obj_cache = kmem_cache_create("debug_objects_cache",
890 				      sizeof (struct debug_obj), 0,
891 				      SLAB_DEBUG_OBJECTS, NULL);
892 
893 	if (!obj_cache)
894 		debug_objects_enabled = 0;
895 	else
896 		debug_objects_selftest();
897 }
898