1 /*
2 * Generic infrastructure for lifetime debugging of objects.
3 *
4 * Started by Thomas Gleixner
5 *
6 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
7 *
8 * For licencing details see kernel-base/COPYING
9 */
10
11 #define pr_fmt(fmt) "ODEBUG: " fmt
12
13 #include <linux/debugobjects.h>
14 #include <linux/interrupt.h>
15 #include <linux/sched.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/seq_file.h>
18 #include <linux/debugfs.h>
19 #include <linux/slab.h>
20 #include <linux/hash.h>
21 #include <linux/kmemleak.h>
22 #include <linux/cpu.h>
23
24 #define ODEBUG_HASH_BITS 14
25 #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
26
27 #define ODEBUG_POOL_SIZE 1024
28 #define ODEBUG_POOL_MIN_LEVEL 256
29 #define ODEBUG_POOL_PERCPU_SIZE 64
30 #define ODEBUG_BATCH_SIZE 16
31
32 #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
33 #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
34 #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
35
36 /*
37 * We limit the freeing of debug objects via workqueue at a maximum
38 * frequency of 10Hz and about 1024 objects for each freeing operation.
39 * So it is freeing at most 10k debug objects per second.
40 */
41 #define ODEBUG_FREE_WORK_MAX 1024
42 #define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10)
43
44 struct debug_bucket {
45 struct hlist_head list;
46 raw_spinlock_t lock;
47 };
48
49 /*
50 * Debug object percpu free list
51 * Access is protected by disabling irq
52 */
53 struct debug_percpu_free {
54 struct hlist_head free_objs;
55 int obj_free;
56 };
57
58 static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
59
60 static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
61
62 static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
63
64 static DEFINE_RAW_SPINLOCK(pool_lock);
65
66 static HLIST_HEAD(obj_pool);
67 static HLIST_HEAD(obj_to_free);
68
69 /*
70 * Because of the presence of percpu free pools, obj_pool_free will
71 * under-count those in the percpu free pools. Similarly, obj_pool_used
72 * will over-count those in the percpu free pools. Adjustments will be
73 * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
74 * can be off.
75 */
76 static int obj_pool_min_free = ODEBUG_POOL_SIZE;
77 static int obj_pool_free = ODEBUG_POOL_SIZE;
78 static int obj_pool_used;
79 static int obj_pool_max_used;
80 static bool obj_freeing;
81 /* The number of objs on the global free list */
82 static int obj_nr_tofree;
83
84 static int debug_objects_maxchain __read_mostly;
85 static int __maybe_unused debug_objects_maxchecked __read_mostly;
86 static int debug_objects_fixups __read_mostly;
87 static int debug_objects_warnings __read_mostly;
88 static int debug_objects_enabled __read_mostly
89 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
90 static int debug_objects_pool_size __read_mostly
91 = ODEBUG_POOL_SIZE;
92 static int debug_objects_pool_min_level __read_mostly
93 = ODEBUG_POOL_MIN_LEVEL;
94 static const struct debug_obj_descr *descr_test __read_mostly;
95 static struct kmem_cache *obj_cache __read_mostly;
96
97 /*
98 * Track numbers of kmem_cache_alloc()/free() calls done.
99 */
100 static int debug_objects_allocated;
101 static int debug_objects_freed;
102
103 static void free_obj_work(struct work_struct *work);
104 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
105
enable_object_debug(char * str)106 static int __init enable_object_debug(char *str)
107 {
108 debug_objects_enabled = 1;
109 return 0;
110 }
111
disable_object_debug(char * str)112 static int __init disable_object_debug(char *str)
113 {
114 debug_objects_enabled = 0;
115 return 0;
116 }
117
118 early_param("debug_objects", enable_object_debug);
119 early_param("no_debug_objects", disable_object_debug);
120
121 static const char *obj_states[ODEBUG_STATE_MAX] = {
122 [ODEBUG_STATE_NONE] = "none",
123 [ODEBUG_STATE_INIT] = "initialized",
124 [ODEBUG_STATE_INACTIVE] = "inactive",
125 [ODEBUG_STATE_ACTIVE] = "active",
126 [ODEBUG_STATE_DESTROYED] = "destroyed",
127 [ODEBUG_STATE_NOTAVAILABLE] = "not available",
128 };
129
fill_pool(void)130 static void fill_pool(void)
131 {
132 gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
133 struct debug_obj *obj;
134 unsigned long flags;
135
136 if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
137 return;
138
139 /*
140 * Reuse objs from the global free list; they will be reinitialized
141 * when allocating.
142 *
143 * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
144 * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
145 * sections.
146 */
147 while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
148 raw_spin_lock_irqsave(&pool_lock, flags);
149 /*
150 * Recheck with the lock held as the worker thread might have
151 * won the race and freed the global free list already.
152 */
153 while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
154 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
155 hlist_del(&obj->node);
156 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
157 hlist_add_head(&obj->node, &obj_pool);
158 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
159 }
160 raw_spin_unlock_irqrestore(&pool_lock, flags);
161 }
162
163 if (unlikely(!obj_cache))
164 return;
165
166 while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
167 struct debug_obj *new[ODEBUG_BATCH_SIZE];
168 int cnt;
169
170 for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
171 new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
172 if (!new[cnt])
173 break;
174 }
175 if (!cnt)
176 return;
177
178 raw_spin_lock_irqsave(&pool_lock, flags);
179 while (cnt) {
180 hlist_add_head(&new[--cnt]->node, &obj_pool);
181 debug_objects_allocated++;
182 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
183 }
184 raw_spin_unlock_irqrestore(&pool_lock, flags);
185 }
186 }
187
188 /*
189 * Lookup an object in the hash bucket.
190 */
lookup_object(void * addr,struct debug_bucket * b)191 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
192 {
193 struct debug_obj *obj;
194 int cnt = 0;
195
196 hlist_for_each_entry(obj, &b->list, node) {
197 cnt++;
198 if (obj->object == addr)
199 return obj;
200 }
201 if (cnt > debug_objects_maxchain)
202 debug_objects_maxchain = cnt;
203
204 return NULL;
205 }
206
207 /*
208 * Allocate a new object from the hlist
209 */
__alloc_object(struct hlist_head * list)210 static struct debug_obj *__alloc_object(struct hlist_head *list)
211 {
212 struct debug_obj *obj = NULL;
213
214 if (list->first) {
215 obj = hlist_entry(list->first, typeof(*obj), node);
216 hlist_del(&obj->node);
217 }
218
219 return obj;
220 }
221
222 static struct debug_obj *
alloc_object(void * addr,struct debug_bucket * b,const struct debug_obj_descr * descr)223 alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
224 {
225 struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
226 struct debug_obj *obj;
227
228 if (likely(obj_cache)) {
229 obj = __alloc_object(&percpu_pool->free_objs);
230 if (obj) {
231 percpu_pool->obj_free--;
232 goto init_obj;
233 }
234 }
235
236 raw_spin_lock(&pool_lock);
237 obj = __alloc_object(&obj_pool);
238 if (obj) {
239 obj_pool_used++;
240 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
241
242 /*
243 * Looking ahead, allocate one batch of debug objects and
244 * put them into the percpu free pool.
245 */
246 if (likely(obj_cache)) {
247 int i;
248
249 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
250 struct debug_obj *obj2;
251
252 obj2 = __alloc_object(&obj_pool);
253 if (!obj2)
254 break;
255 hlist_add_head(&obj2->node,
256 &percpu_pool->free_objs);
257 percpu_pool->obj_free++;
258 obj_pool_used++;
259 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
260 }
261 }
262
263 if (obj_pool_used > obj_pool_max_used)
264 obj_pool_max_used = obj_pool_used;
265
266 if (obj_pool_free < obj_pool_min_free)
267 obj_pool_min_free = obj_pool_free;
268 }
269 raw_spin_unlock(&pool_lock);
270
271 init_obj:
272 if (obj) {
273 obj->object = addr;
274 obj->descr = descr;
275 obj->state = ODEBUG_STATE_NONE;
276 obj->astate = 0;
277 hlist_add_head(&obj->node, &b->list);
278 }
279 return obj;
280 }
281
282 /*
283 * workqueue function to free objects.
284 *
285 * To reduce contention on the global pool_lock, the actual freeing of
286 * debug objects will be delayed if the pool_lock is busy.
287 */
free_obj_work(struct work_struct * work)288 static void free_obj_work(struct work_struct *work)
289 {
290 struct hlist_node *tmp;
291 struct debug_obj *obj;
292 unsigned long flags;
293 HLIST_HEAD(tofree);
294
295 WRITE_ONCE(obj_freeing, false);
296 if (!raw_spin_trylock_irqsave(&pool_lock, flags))
297 return;
298
299 if (obj_pool_free >= debug_objects_pool_size)
300 goto free_objs;
301
302 /*
303 * The objs on the pool list might be allocated before the work is
304 * run, so recheck if pool list it full or not, if not fill pool
305 * list from the global free list. As it is likely that a workload
306 * may be gearing up to use more and more objects, don't free any
307 * of them until the next round.
308 */
309 while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
310 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
311 hlist_del(&obj->node);
312 hlist_add_head(&obj->node, &obj_pool);
313 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
314 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
315 }
316 raw_spin_unlock_irqrestore(&pool_lock, flags);
317 return;
318
319 free_objs:
320 /*
321 * Pool list is already full and there are still objs on the free
322 * list. Move remaining free objs to a temporary list to free the
323 * memory outside the pool_lock held region.
324 */
325 if (obj_nr_tofree) {
326 hlist_move_list(&obj_to_free, &tofree);
327 debug_objects_freed += obj_nr_tofree;
328 WRITE_ONCE(obj_nr_tofree, 0);
329 }
330 raw_spin_unlock_irqrestore(&pool_lock, flags);
331
332 hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
333 hlist_del(&obj->node);
334 kmem_cache_free(obj_cache, obj);
335 }
336 }
337
__free_object(struct debug_obj * obj)338 static void __free_object(struct debug_obj *obj)
339 {
340 struct debug_obj *objs[ODEBUG_BATCH_SIZE];
341 struct debug_percpu_free *percpu_pool;
342 int lookahead_count = 0;
343 unsigned long flags;
344 bool work;
345
346 local_irq_save(flags);
347 if (!obj_cache)
348 goto free_to_obj_pool;
349
350 /*
351 * Try to free it into the percpu pool first.
352 */
353 percpu_pool = this_cpu_ptr(&percpu_obj_pool);
354 if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
355 hlist_add_head(&obj->node, &percpu_pool->free_objs);
356 percpu_pool->obj_free++;
357 local_irq_restore(flags);
358 return;
359 }
360
361 /*
362 * As the percpu pool is full, look ahead and pull out a batch
363 * of objects from the percpu pool and free them as well.
364 */
365 for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
366 objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
367 if (!objs[lookahead_count])
368 break;
369 percpu_pool->obj_free--;
370 }
371
372 free_to_obj_pool:
373 raw_spin_lock(&pool_lock);
374 work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
375 (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
376 obj_pool_used--;
377
378 if (work) {
379 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
380 hlist_add_head(&obj->node, &obj_to_free);
381 if (lookahead_count) {
382 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
383 obj_pool_used -= lookahead_count;
384 while (lookahead_count) {
385 hlist_add_head(&objs[--lookahead_count]->node,
386 &obj_to_free);
387 }
388 }
389
390 if ((obj_pool_free > debug_objects_pool_size) &&
391 (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
392 int i;
393
394 /*
395 * Free one more batch of objects from obj_pool.
396 */
397 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
398 obj = __alloc_object(&obj_pool);
399 hlist_add_head(&obj->node, &obj_to_free);
400 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
401 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
402 }
403 }
404 } else {
405 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
406 hlist_add_head(&obj->node, &obj_pool);
407 if (lookahead_count) {
408 WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
409 obj_pool_used -= lookahead_count;
410 while (lookahead_count) {
411 hlist_add_head(&objs[--lookahead_count]->node,
412 &obj_pool);
413 }
414 }
415 }
416 raw_spin_unlock(&pool_lock);
417 local_irq_restore(flags);
418 }
419
420 /*
421 * Put the object back into the pool and schedule work to free objects
422 * if necessary.
423 */
free_object(struct debug_obj * obj)424 static void free_object(struct debug_obj *obj)
425 {
426 __free_object(obj);
427 if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
428 WRITE_ONCE(obj_freeing, true);
429 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
430 }
431 }
432
433 #ifdef CONFIG_HOTPLUG_CPU
object_cpu_offline(unsigned int cpu)434 static int object_cpu_offline(unsigned int cpu)
435 {
436 struct debug_percpu_free *percpu_pool;
437 struct hlist_node *tmp;
438 struct debug_obj *obj;
439 unsigned long flags;
440
441 /* Remote access is safe as the CPU is dead already */
442 percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
443 hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) {
444 hlist_del(&obj->node);
445 kmem_cache_free(obj_cache, obj);
446 }
447
448 raw_spin_lock_irqsave(&pool_lock, flags);
449 obj_pool_used -= percpu_pool->obj_free;
450 debug_objects_freed += percpu_pool->obj_free;
451 raw_spin_unlock_irqrestore(&pool_lock, flags);
452
453 percpu_pool->obj_free = 0;
454
455 return 0;
456 }
457 #endif
458
459 /*
460 * We run out of memory. That means we probably have tons of objects
461 * allocated.
462 */
debug_objects_oom(void)463 static void debug_objects_oom(void)
464 {
465 struct debug_bucket *db = obj_hash;
466 struct hlist_node *tmp;
467 HLIST_HEAD(freelist);
468 struct debug_obj *obj;
469 unsigned long flags;
470 int i;
471
472 pr_warn("Out of memory. ODEBUG disabled\n");
473
474 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
475 raw_spin_lock_irqsave(&db->lock, flags);
476 hlist_move_list(&db->list, &freelist);
477 raw_spin_unlock_irqrestore(&db->lock, flags);
478
479 /* Now free them */
480 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
481 hlist_del(&obj->node);
482 free_object(obj);
483 }
484 }
485 }
486
487 /*
488 * We use the pfn of the address for the hash. That way we can check
489 * for freed objects simply by checking the affected bucket.
490 */
get_bucket(unsigned long addr)491 static struct debug_bucket *get_bucket(unsigned long addr)
492 {
493 unsigned long hash;
494
495 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
496 return &obj_hash[hash];
497 }
498
debug_print_object(struct debug_obj * obj,char * msg)499 static void debug_print_object(struct debug_obj *obj, char *msg)
500 {
501 const struct debug_obj_descr *descr = obj->descr;
502 static int limit;
503
504 if (limit < 5 && descr != descr_test) {
505 void *hint = descr->debug_hint ?
506 descr->debug_hint(obj->object) : NULL;
507 limit++;
508 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
509 "object type: %s hint: %pS\n",
510 msg, obj_states[obj->state], obj->astate,
511 descr->name, hint);
512 }
513 debug_objects_warnings++;
514 }
515
516 /*
517 * Try to repair the damage, so we have a better chance to get useful
518 * debug output.
519 */
520 static bool
debug_object_fixup(bool (* fixup)(void * addr,enum debug_obj_state state),void * addr,enum debug_obj_state state)521 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
522 void * addr, enum debug_obj_state state)
523 {
524 if (fixup && fixup(addr, state)) {
525 debug_objects_fixups++;
526 return true;
527 }
528 return false;
529 }
530
debug_object_is_on_stack(void * addr,int onstack)531 static void debug_object_is_on_stack(void *addr, int onstack)
532 {
533 int is_on_stack;
534 static int limit;
535
536 if (limit > 4)
537 return;
538
539 is_on_stack = object_is_on_stack(addr);
540 if (is_on_stack == onstack)
541 return;
542
543 limit++;
544 if (is_on_stack)
545 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
546 task_stack_page(current));
547 else
548 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
549 task_stack_page(current));
550
551 WARN_ON(1);
552 }
553
lookup_object_or_alloc(void * addr,struct debug_bucket * b,const struct debug_obj_descr * descr,bool onstack,bool alloc_ifstatic)554 static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
555 const struct debug_obj_descr *descr,
556 bool onstack, bool alloc_ifstatic)
557 {
558 struct debug_obj *obj = lookup_object(addr, b);
559 enum debug_obj_state state = ODEBUG_STATE_NONE;
560
561 if (likely(obj))
562 return obj;
563
564 /*
565 * debug_object_init() unconditionally allocates untracked
566 * objects. It does not matter whether it is a static object or
567 * not.
568 *
569 * debug_object_assert_init() and debug_object_activate() allow
570 * allocation only if the descriptor callback confirms that the
571 * object is static and considered initialized. For non-static
572 * objects the allocation needs to be done from the fixup callback.
573 */
574 if (unlikely(alloc_ifstatic)) {
575 if (!descr->is_static_object || !descr->is_static_object(addr))
576 return ERR_PTR(-ENOENT);
577 /* Statically allocated objects are considered initialized */
578 state = ODEBUG_STATE_INIT;
579 }
580
581 obj = alloc_object(addr, b, descr);
582 if (likely(obj)) {
583 obj->state = state;
584 debug_object_is_on_stack(addr, onstack);
585 return obj;
586 }
587
588 /* Out of memory. Do the cleanup outside of the locked region */
589 debug_objects_enabled = 0;
590 return NULL;
591 }
592
593 static void
__debug_object_init(void * addr,const struct debug_obj_descr * descr,int onstack)594 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
595 {
596 struct debug_obj *obj, o;
597 struct debug_bucket *db;
598 unsigned long flags;
599
600 /*
601 * On RT enabled kernels the pool refill must happen in preemptible
602 * context:
603 */
604 if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible())
605 fill_pool();
606
607 db = get_bucket((unsigned long) addr);
608
609 raw_spin_lock_irqsave(&db->lock, flags);
610
611 obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
612 if (unlikely(!obj)) {
613 raw_spin_unlock_irqrestore(&db->lock, flags);
614 debug_objects_oom();
615 return;
616 }
617
618 switch (obj->state) {
619 case ODEBUG_STATE_NONE:
620 case ODEBUG_STATE_INIT:
621 case ODEBUG_STATE_INACTIVE:
622 obj->state = ODEBUG_STATE_INIT;
623 raw_spin_unlock_irqrestore(&db->lock, flags);
624 return;
625 default:
626 break;
627 }
628
629 o = *obj;
630 raw_spin_unlock_irqrestore(&db->lock, flags);
631 debug_print_object(&o, "init");
632
633 if (o.state == ODEBUG_STATE_ACTIVE)
634 debug_object_fixup(descr->fixup_init, addr, o.state);
635 }
636
637 /**
638 * debug_object_init - debug checks when an object is initialized
639 * @addr: address of the object
640 * @descr: pointer to an object specific debug description structure
641 */
debug_object_init(void * addr,const struct debug_obj_descr * descr)642 void debug_object_init(void *addr, const struct debug_obj_descr *descr)
643 {
644 if (!debug_objects_enabled)
645 return;
646
647 __debug_object_init(addr, descr, 0);
648 }
649 EXPORT_SYMBOL_GPL(debug_object_init);
650
651 /**
652 * debug_object_init_on_stack - debug checks when an object on stack is
653 * initialized
654 * @addr: address of the object
655 * @descr: pointer to an object specific debug description structure
656 */
debug_object_init_on_stack(void * addr,const struct debug_obj_descr * descr)657 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
658 {
659 if (!debug_objects_enabled)
660 return;
661
662 __debug_object_init(addr, descr, 1);
663 }
664 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
665
666 /**
667 * debug_object_activate - debug checks when an object is activated
668 * @addr: address of the object
669 * @descr: pointer to an object specific debug description structure
670 * Returns 0 for success, -EINVAL for check failed.
671 */
debug_object_activate(void * addr,const struct debug_obj_descr * descr)672 int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
673 {
674 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
675 struct debug_bucket *db;
676 struct debug_obj *obj;
677 unsigned long flags;
678
679 if (!debug_objects_enabled)
680 return 0;
681
682 db = get_bucket((unsigned long) addr);
683
684 raw_spin_lock_irqsave(&db->lock, flags);
685
686 obj = lookup_object_or_alloc(addr, db, descr, false, true);
687 if (unlikely(!obj)) {
688 raw_spin_unlock_irqrestore(&db->lock, flags);
689 debug_objects_oom();
690 return 0;
691 } else if (likely(!IS_ERR(obj))) {
692 switch (obj->state) {
693 case ODEBUG_STATE_ACTIVE:
694 case ODEBUG_STATE_DESTROYED:
695 o = *obj;
696 break;
697 case ODEBUG_STATE_INIT:
698 case ODEBUG_STATE_INACTIVE:
699 obj->state = ODEBUG_STATE_ACTIVE;
700 fallthrough;
701 default:
702 raw_spin_unlock_irqrestore(&db->lock, flags);
703 return 0;
704 }
705 }
706
707 raw_spin_unlock_irqrestore(&db->lock, flags);
708 debug_print_object(&o, "activate");
709
710 switch (o.state) {
711 case ODEBUG_STATE_ACTIVE:
712 case ODEBUG_STATE_NOTAVAILABLE:
713 if (debug_object_fixup(descr->fixup_activate, addr, o.state))
714 return 0;
715 fallthrough;
716 default:
717 return -EINVAL;
718 }
719 }
720 EXPORT_SYMBOL_GPL(debug_object_activate);
721
722 /**
723 * debug_object_deactivate - debug checks when an object is deactivated
724 * @addr: address of the object
725 * @descr: pointer to an object specific debug description structure
726 */
debug_object_deactivate(void * addr,const struct debug_obj_descr * descr)727 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
728 {
729 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
730 struct debug_bucket *db;
731 struct debug_obj *obj;
732 unsigned long flags;
733
734 if (!debug_objects_enabled)
735 return;
736
737 db = get_bucket((unsigned long) addr);
738
739 raw_spin_lock_irqsave(&db->lock, flags);
740
741 obj = lookup_object(addr, db);
742 if (obj) {
743 switch (obj->state) {
744 case ODEBUG_STATE_DESTROYED:
745 break;
746 case ODEBUG_STATE_INIT:
747 case ODEBUG_STATE_INACTIVE:
748 case ODEBUG_STATE_ACTIVE:
749 if (obj->astate)
750 break;
751 obj->state = ODEBUG_STATE_INACTIVE;
752 fallthrough;
753 default:
754 raw_spin_unlock_irqrestore(&db->lock, flags);
755 return;
756 }
757 o = *obj;
758 }
759
760 raw_spin_unlock_irqrestore(&db->lock, flags);
761 debug_print_object(&o, "deactivate");
762 }
763 EXPORT_SYMBOL_GPL(debug_object_deactivate);
764
765 /**
766 * debug_object_destroy - debug checks when an object is destroyed
767 * @addr: address of the object
768 * @descr: pointer to an object specific debug description structure
769 */
debug_object_destroy(void * addr,const struct debug_obj_descr * descr)770 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
771 {
772 struct debug_obj *obj, o;
773 struct debug_bucket *db;
774 unsigned long flags;
775
776 if (!debug_objects_enabled)
777 return;
778
779 db = get_bucket((unsigned long) addr);
780
781 raw_spin_lock_irqsave(&db->lock, flags);
782
783 obj = lookup_object(addr, db);
784 if (!obj) {
785 raw_spin_unlock_irqrestore(&db->lock, flags);
786 return;
787 }
788
789 switch (obj->state) {
790 case ODEBUG_STATE_ACTIVE:
791 case ODEBUG_STATE_DESTROYED:
792 break;
793 case ODEBUG_STATE_NONE:
794 case ODEBUG_STATE_INIT:
795 case ODEBUG_STATE_INACTIVE:
796 obj->state = ODEBUG_STATE_DESTROYED;
797 fallthrough;
798 default:
799 raw_spin_unlock_irqrestore(&db->lock, flags);
800 return;
801 }
802
803 o = *obj;
804 raw_spin_unlock_irqrestore(&db->lock, flags);
805 debug_print_object(&o, "destroy");
806
807 if (o.state == ODEBUG_STATE_ACTIVE)
808 debug_object_fixup(descr->fixup_destroy, addr, o.state);
809 }
810 EXPORT_SYMBOL_GPL(debug_object_destroy);
811
812 /**
813 * debug_object_free - debug checks when an object is freed
814 * @addr: address of the object
815 * @descr: pointer to an object specific debug description structure
816 */
debug_object_free(void * addr,const struct debug_obj_descr * descr)817 void debug_object_free(void *addr, const struct debug_obj_descr *descr)
818 {
819 struct debug_obj *obj, o;
820 struct debug_bucket *db;
821 unsigned long flags;
822
823 if (!debug_objects_enabled)
824 return;
825
826 db = get_bucket((unsigned long) addr);
827
828 raw_spin_lock_irqsave(&db->lock, flags);
829
830 obj = lookup_object(addr, db);
831 if (!obj) {
832 raw_spin_unlock_irqrestore(&db->lock, flags);
833 return;
834 }
835
836 switch (obj->state) {
837 case ODEBUG_STATE_ACTIVE:
838 break;
839 default:
840 hlist_del(&obj->node);
841 raw_spin_unlock_irqrestore(&db->lock, flags);
842 free_object(obj);
843 return;
844 }
845
846 o = *obj;
847 raw_spin_unlock_irqrestore(&db->lock, flags);
848 debug_print_object(&o, "free");
849
850 debug_object_fixup(descr->fixup_free, addr, o.state);
851 }
852 EXPORT_SYMBOL_GPL(debug_object_free);
853
854 /**
855 * debug_object_assert_init - debug checks when object should be init-ed
856 * @addr: address of the object
857 * @descr: pointer to an object specific debug description structure
858 */
debug_object_assert_init(void * addr,const struct debug_obj_descr * descr)859 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
860 {
861 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
862 struct debug_bucket *db;
863 struct debug_obj *obj;
864 unsigned long flags;
865
866 if (!debug_objects_enabled)
867 return;
868
869 db = get_bucket((unsigned long) addr);
870
871 raw_spin_lock_irqsave(&db->lock, flags);
872 obj = lookup_object_or_alloc(addr, db, descr, false, true);
873 raw_spin_unlock_irqrestore(&db->lock, flags);
874 if (likely(!IS_ERR_OR_NULL(obj)))
875 return;
876
877 /* If NULL the allocation has hit OOM */
878 if (!obj) {
879 debug_objects_oom();
880 return;
881 }
882
883 /* Object is neither tracked nor static. It's not initialized. */
884 debug_print_object(&o, "assert_init");
885 debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
886 }
887 EXPORT_SYMBOL_GPL(debug_object_assert_init);
888
889 /**
890 * debug_object_active_state - debug checks object usage state machine
891 * @addr: address of the object
892 * @descr: pointer to an object specific debug description structure
893 * @expect: expected state
894 * @next: state to move to if expected state is found
895 */
896 void
debug_object_active_state(void * addr,const struct debug_obj_descr * descr,unsigned int expect,unsigned int next)897 debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
898 unsigned int expect, unsigned int next)
899 {
900 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
901 struct debug_bucket *db;
902 struct debug_obj *obj;
903 unsigned long flags;
904
905 if (!debug_objects_enabled)
906 return;
907
908 db = get_bucket((unsigned long) addr);
909
910 raw_spin_lock_irqsave(&db->lock, flags);
911
912 obj = lookup_object(addr, db);
913 if (obj) {
914 switch (obj->state) {
915 case ODEBUG_STATE_ACTIVE:
916 if (obj->astate != expect)
917 break;
918 obj->astate = next;
919 raw_spin_unlock_irqrestore(&db->lock, flags);
920 return;
921 default:
922 break;
923 }
924 o = *obj;
925 }
926
927 raw_spin_unlock_irqrestore(&db->lock, flags);
928 debug_print_object(&o, "active_state");
929 }
930 EXPORT_SYMBOL_GPL(debug_object_active_state);
931
932 #ifdef CONFIG_DEBUG_OBJECTS_FREE
__debug_check_no_obj_freed(const void * address,unsigned long size)933 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
934 {
935 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
936 int cnt, objs_checked = 0;
937 struct debug_obj *obj, o;
938 struct debug_bucket *db;
939 struct hlist_node *tmp;
940
941 saddr = (unsigned long) address;
942 eaddr = saddr + size;
943 paddr = saddr & ODEBUG_CHUNK_MASK;
944 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
945 chunks >>= ODEBUG_CHUNK_SHIFT;
946
947 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
948 db = get_bucket(paddr);
949
950 repeat:
951 cnt = 0;
952 raw_spin_lock_irqsave(&db->lock, flags);
953 hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
954 cnt++;
955 oaddr = (unsigned long) obj->object;
956 if (oaddr < saddr || oaddr >= eaddr)
957 continue;
958
959 switch (obj->state) {
960 case ODEBUG_STATE_ACTIVE:
961 o = *obj;
962 raw_spin_unlock_irqrestore(&db->lock, flags);
963 debug_print_object(&o, "free");
964 debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state);
965 goto repeat;
966 default:
967 hlist_del(&obj->node);
968 __free_object(obj);
969 break;
970 }
971 }
972 raw_spin_unlock_irqrestore(&db->lock, flags);
973
974 if (cnt > debug_objects_maxchain)
975 debug_objects_maxchain = cnt;
976
977 objs_checked += cnt;
978 }
979
980 if (objs_checked > debug_objects_maxchecked)
981 debug_objects_maxchecked = objs_checked;
982
983 /* Schedule work to actually kmem_cache_free() objects */
984 if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
985 WRITE_ONCE(obj_freeing, true);
986 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
987 }
988 }
989
debug_check_no_obj_freed(const void * address,unsigned long size)990 void debug_check_no_obj_freed(const void *address, unsigned long size)
991 {
992 if (debug_objects_enabled)
993 __debug_check_no_obj_freed(address, size);
994 }
995 #endif
996
997 #ifdef CONFIG_DEBUG_FS
998
debug_stats_show(struct seq_file * m,void * v)999 static int debug_stats_show(struct seq_file *m, void *v)
1000 {
1001 int cpu, obj_percpu_free = 0;
1002
1003 for_each_possible_cpu(cpu)
1004 obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1005
1006 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
1007 seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
1008 seq_printf(m, "warnings :%d\n", debug_objects_warnings);
1009 seq_printf(m, "fixups :%d\n", debug_objects_fixups);
1010 seq_printf(m, "pool_free :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
1011 seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1012 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1013 seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free);
1014 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1015 seq_printf(m, "on_free_list :%d\n", READ_ONCE(obj_nr_tofree));
1016 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1017 seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
1018 return 0;
1019 }
1020 DEFINE_SHOW_ATTRIBUTE(debug_stats);
1021
debug_objects_init_debugfs(void)1022 static int __init debug_objects_init_debugfs(void)
1023 {
1024 struct dentry *dbgdir;
1025
1026 if (!debug_objects_enabled)
1027 return 0;
1028
1029 dbgdir = debugfs_create_dir("debug_objects", NULL);
1030
1031 debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1032
1033 return 0;
1034 }
1035 __initcall(debug_objects_init_debugfs);
1036
1037 #else
debug_objects_init_debugfs(void)1038 static inline void debug_objects_init_debugfs(void) { }
1039 #endif
1040
1041 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1042
1043 /* Random data structure for the self test */
1044 struct self_test {
1045 unsigned long dummy1[6];
1046 int static_init;
1047 unsigned long dummy2[3];
1048 };
1049
1050 static __initconst const struct debug_obj_descr descr_type_test;
1051
is_static_object(void * addr)1052 static bool __init is_static_object(void *addr)
1053 {
1054 struct self_test *obj = addr;
1055
1056 return obj->static_init;
1057 }
1058
1059 /*
1060 * fixup_init is called when:
1061 * - an active object is initialized
1062 */
fixup_init(void * addr,enum debug_obj_state state)1063 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1064 {
1065 struct self_test *obj = addr;
1066
1067 switch (state) {
1068 case ODEBUG_STATE_ACTIVE:
1069 debug_object_deactivate(obj, &descr_type_test);
1070 debug_object_init(obj, &descr_type_test);
1071 return true;
1072 default:
1073 return false;
1074 }
1075 }
1076
1077 /*
1078 * fixup_activate is called when:
1079 * - an active object is activated
1080 * - an unknown non-static object is activated
1081 */
fixup_activate(void * addr,enum debug_obj_state state)1082 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1083 {
1084 struct self_test *obj = addr;
1085
1086 switch (state) {
1087 case ODEBUG_STATE_NOTAVAILABLE:
1088 return true;
1089 case ODEBUG_STATE_ACTIVE:
1090 debug_object_deactivate(obj, &descr_type_test);
1091 debug_object_activate(obj, &descr_type_test);
1092 return true;
1093
1094 default:
1095 return false;
1096 }
1097 }
1098
1099 /*
1100 * fixup_destroy is called when:
1101 * - an active object is destroyed
1102 */
fixup_destroy(void * addr,enum debug_obj_state state)1103 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1104 {
1105 struct self_test *obj = addr;
1106
1107 switch (state) {
1108 case ODEBUG_STATE_ACTIVE:
1109 debug_object_deactivate(obj, &descr_type_test);
1110 debug_object_destroy(obj, &descr_type_test);
1111 return true;
1112 default:
1113 return false;
1114 }
1115 }
1116
1117 /*
1118 * fixup_free is called when:
1119 * - an active object is freed
1120 */
fixup_free(void * addr,enum debug_obj_state state)1121 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1122 {
1123 struct self_test *obj = addr;
1124
1125 switch (state) {
1126 case ODEBUG_STATE_ACTIVE:
1127 debug_object_deactivate(obj, &descr_type_test);
1128 debug_object_free(obj, &descr_type_test);
1129 return true;
1130 default:
1131 return false;
1132 }
1133 }
1134
1135 static int __init
check_results(void * addr,enum debug_obj_state state,int fixups,int warnings)1136 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1137 {
1138 struct debug_bucket *db;
1139 struct debug_obj *obj;
1140 unsigned long flags;
1141 int res = -EINVAL;
1142
1143 db = get_bucket((unsigned long) addr);
1144
1145 raw_spin_lock_irqsave(&db->lock, flags);
1146
1147 obj = lookup_object(addr, db);
1148 if (!obj && state != ODEBUG_STATE_NONE) {
1149 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1150 goto out;
1151 }
1152 if (obj && obj->state != state) {
1153 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1154 obj->state, state);
1155 goto out;
1156 }
1157 if (fixups != debug_objects_fixups) {
1158 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1159 fixups, debug_objects_fixups);
1160 goto out;
1161 }
1162 if (warnings != debug_objects_warnings) {
1163 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1164 warnings, debug_objects_warnings);
1165 goto out;
1166 }
1167 res = 0;
1168 out:
1169 raw_spin_unlock_irqrestore(&db->lock, flags);
1170 if (res)
1171 debug_objects_enabled = 0;
1172 return res;
1173 }
1174
1175 static __initconst const struct debug_obj_descr descr_type_test = {
1176 .name = "selftest",
1177 .is_static_object = is_static_object,
1178 .fixup_init = fixup_init,
1179 .fixup_activate = fixup_activate,
1180 .fixup_destroy = fixup_destroy,
1181 .fixup_free = fixup_free,
1182 };
1183
1184 static __initdata struct self_test obj = { .static_init = 0 };
1185
debug_objects_selftest(void)1186 static void __init debug_objects_selftest(void)
1187 {
1188 int fixups, oldfixups, warnings, oldwarnings;
1189 unsigned long flags;
1190
1191 local_irq_save(flags);
1192
1193 fixups = oldfixups = debug_objects_fixups;
1194 warnings = oldwarnings = debug_objects_warnings;
1195 descr_test = &descr_type_test;
1196
1197 debug_object_init(&obj, &descr_type_test);
1198 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1199 goto out;
1200 debug_object_activate(&obj, &descr_type_test);
1201 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1202 goto out;
1203 debug_object_activate(&obj, &descr_type_test);
1204 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1205 goto out;
1206 debug_object_deactivate(&obj, &descr_type_test);
1207 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1208 goto out;
1209 debug_object_destroy(&obj, &descr_type_test);
1210 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1211 goto out;
1212 debug_object_init(&obj, &descr_type_test);
1213 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1214 goto out;
1215 debug_object_activate(&obj, &descr_type_test);
1216 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1217 goto out;
1218 debug_object_deactivate(&obj, &descr_type_test);
1219 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1220 goto out;
1221 debug_object_free(&obj, &descr_type_test);
1222 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1223 goto out;
1224
1225 obj.static_init = 1;
1226 debug_object_activate(&obj, &descr_type_test);
1227 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1228 goto out;
1229 debug_object_init(&obj, &descr_type_test);
1230 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1231 goto out;
1232 debug_object_free(&obj, &descr_type_test);
1233 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1234 goto out;
1235
1236 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1237 debug_object_init(&obj, &descr_type_test);
1238 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1239 goto out;
1240 debug_object_activate(&obj, &descr_type_test);
1241 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1242 goto out;
1243 __debug_check_no_obj_freed(&obj, sizeof(obj));
1244 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1245 goto out;
1246 #endif
1247 pr_info("selftest passed\n");
1248
1249 out:
1250 debug_objects_fixups = oldfixups;
1251 debug_objects_warnings = oldwarnings;
1252 descr_test = NULL;
1253
1254 local_irq_restore(flags);
1255 }
1256 #else
debug_objects_selftest(void)1257 static inline void debug_objects_selftest(void) { }
1258 #endif
1259
1260 /*
1261 * Called during early boot to initialize the hash buckets and link
1262 * the static object pool objects into the poll list. After this call
1263 * the object tracker is fully operational.
1264 */
debug_objects_early_init(void)1265 void __init debug_objects_early_init(void)
1266 {
1267 int i;
1268
1269 for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1270 raw_spin_lock_init(&obj_hash[i].lock);
1271
1272 for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1273 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1274 }
1275
1276 /*
1277 * Convert the statically allocated objects to dynamic ones:
1278 */
debug_objects_replace_static_objects(void)1279 static int __init debug_objects_replace_static_objects(void)
1280 {
1281 struct debug_bucket *db = obj_hash;
1282 struct hlist_node *tmp;
1283 struct debug_obj *obj, *new;
1284 HLIST_HEAD(objects);
1285 int i, cnt = 0;
1286
1287 for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1288 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1289 if (!obj)
1290 goto free;
1291 hlist_add_head(&obj->node, &objects);
1292 }
1293
1294 debug_objects_allocated += i;
1295
1296 /*
1297 * debug_objects_mem_init() is now called early that only one CPU is up
1298 * and interrupts have been disabled, so it is safe to replace the
1299 * active object references.
1300 */
1301
1302 /* Remove the statically allocated objects from the pool */
1303 hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1304 hlist_del(&obj->node);
1305 /* Move the allocated objects to the pool */
1306 hlist_move_list(&objects, &obj_pool);
1307
1308 /* Replace the active object references */
1309 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1310 hlist_move_list(&db->list, &objects);
1311
1312 hlist_for_each_entry(obj, &objects, node) {
1313 new = hlist_entry(obj_pool.first, typeof(*obj), node);
1314 hlist_del(&new->node);
1315 /* copy object data */
1316 *new = *obj;
1317 hlist_add_head(&new->node, &db->list);
1318 cnt++;
1319 }
1320 }
1321
1322 pr_debug("%d of %d active objects replaced\n",
1323 cnt, obj_pool_used);
1324 return 0;
1325 free:
1326 hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1327 hlist_del(&obj->node);
1328 kmem_cache_free(obj_cache, obj);
1329 }
1330 return -ENOMEM;
1331 }
1332
1333 /*
1334 * Called after the kmem_caches are functional to setup a dedicated
1335 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1336 * prevents that the debug code is called on kmem_cache_free() for the
1337 * debug tracker objects to avoid recursive calls.
1338 */
debug_objects_mem_init(void)1339 void __init debug_objects_mem_init(void)
1340 {
1341 int cpu, extras;
1342
1343 if (!debug_objects_enabled)
1344 return;
1345
1346 /*
1347 * Initialize the percpu object pools
1348 *
1349 * Initialization is not strictly necessary, but was done for
1350 * completeness.
1351 */
1352 for_each_possible_cpu(cpu)
1353 INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
1354
1355 obj_cache = kmem_cache_create("debug_objects_cache",
1356 sizeof (struct debug_obj), 0,
1357 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1358 NULL);
1359
1360 if (!obj_cache || debug_objects_replace_static_objects()) {
1361 debug_objects_enabled = 0;
1362 kmem_cache_destroy(obj_cache);
1363 pr_warn("out of memory.\n");
1364 return;
1365 } else
1366 debug_objects_selftest();
1367
1368 #ifdef CONFIG_HOTPLUG_CPU
1369 cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1370 object_cpu_offline);
1371 #endif
1372
1373 /*
1374 * Increase the thresholds for allocating and freeing objects
1375 * according to the number of possible CPUs available in the system.
1376 */
1377 extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1378 debug_objects_pool_size += extras;
1379 debug_objects_pool_min_level += extras;
1380 }
1381