1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Generic infrastructure for lifetime debugging of objects.
4 *
5 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
6 */
7
8 #define pr_fmt(fmt) "ODEBUG: " fmt
9
10 #include <linux/debugobjects.h>
11 #include <linux/interrupt.h>
12 #include <linux/sched.h>
13 #include <linux/sched/task_stack.h>
14 #include <linux/seq_file.h>
15 #include <linux/debugfs.h>
16 #include <linux/slab.h>
17 #include <linux/hash.h>
18 #include <linux/kmemleak.h>
19 #include <linux/cpu.h>
20
21 #define ODEBUG_HASH_BITS 14
22 #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
23
24 #define ODEBUG_POOL_SIZE 1024
25 #define ODEBUG_POOL_MIN_LEVEL 256
26 #define ODEBUG_POOL_PERCPU_SIZE 64
27 #define ODEBUG_BATCH_SIZE 16
28
29 #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
30 #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
31 #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
32
33 /*
34 * We limit the freeing of debug objects via workqueue at a maximum
35 * frequency of 10Hz and about 1024 objects for each freeing operation.
36 * So it is freeing at most 10k debug objects per second.
37 */
38 #define ODEBUG_FREE_WORK_MAX 1024
39 #define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10)
40
41 struct debug_bucket {
42 struct hlist_head list;
43 raw_spinlock_t lock;
44 };
45
46 /*
47 * Debug object percpu free list
48 * Access is protected by disabling irq
49 */
50 struct debug_percpu_free {
51 struct hlist_head free_objs;
52 int obj_free;
53 };
54
55 static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
56
57 static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
58
59 static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
60
61 static DEFINE_RAW_SPINLOCK(pool_lock);
62
63 static HLIST_HEAD(obj_pool);
64 static HLIST_HEAD(obj_to_free);
65
66 /*
67 * Because of the presence of percpu free pools, obj_pool_free will
68 * under-count those in the percpu free pools. Similarly, obj_pool_used
69 * will over-count those in the percpu free pools. Adjustments will be
70 * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
71 * can be off.
72 */
73 static int obj_pool_min_free = ODEBUG_POOL_SIZE;
74 static int obj_pool_free = ODEBUG_POOL_SIZE;
75 static int obj_pool_used;
76 static int obj_pool_max_used;
77 static bool obj_freeing;
78 /* The number of objs on the global free list */
79 static int obj_nr_tofree;
80
81 static int debug_objects_maxchain __read_mostly;
82 static int __maybe_unused debug_objects_maxchecked __read_mostly;
83 static int debug_objects_fixups __read_mostly;
84 static int debug_objects_warnings __read_mostly;
85 static int debug_objects_enabled __read_mostly
86 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
87 static int debug_objects_pool_size __read_mostly
88 = ODEBUG_POOL_SIZE;
89 static int debug_objects_pool_min_level __read_mostly
90 = ODEBUG_POOL_MIN_LEVEL;
91 static const struct debug_obj_descr *descr_test __read_mostly;
92 static struct kmem_cache *obj_cache __read_mostly;
93
94 /*
95 * Track numbers of kmem_cache_alloc()/free() calls done.
96 */
97 static int debug_objects_allocated;
98 static int debug_objects_freed;
99
100 static void free_obj_work(struct work_struct *work);
101 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
102
enable_object_debug(char * str)103 static int __init enable_object_debug(char *str)
104 {
105 debug_objects_enabled = 1;
106 return 0;
107 }
108
disable_object_debug(char * str)109 static int __init disable_object_debug(char *str)
110 {
111 debug_objects_enabled = 0;
112 return 0;
113 }
114
115 early_param("debug_objects", enable_object_debug);
116 early_param("no_debug_objects", disable_object_debug);
117
118 static const char *obj_states[ODEBUG_STATE_MAX] = {
119 [ODEBUG_STATE_NONE] = "none",
120 [ODEBUG_STATE_INIT] = "initialized",
121 [ODEBUG_STATE_INACTIVE] = "inactive",
122 [ODEBUG_STATE_ACTIVE] = "active",
123 [ODEBUG_STATE_DESTROYED] = "destroyed",
124 [ODEBUG_STATE_NOTAVAILABLE] = "not available",
125 };
126
fill_pool(void)127 static void fill_pool(void)
128 {
129 gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
130 struct debug_obj *obj;
131 unsigned long flags;
132
133 if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
134 return;
135
136 /*
137 * Reuse objs from the global free list; they will be reinitialized
138 * when allocating.
139 *
140 * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
141 * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
142 * sections.
143 */
144 while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
145 raw_spin_lock_irqsave(&pool_lock, flags);
146 /*
147 * Recheck with the lock held as the worker thread might have
148 * won the race and freed the global free list already.
149 */
150 while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
151 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
152 hlist_del(&obj->node);
153 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
154 hlist_add_head(&obj->node, &obj_pool);
155 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
156 }
157 raw_spin_unlock_irqrestore(&pool_lock, flags);
158 }
159
160 if (unlikely(!obj_cache))
161 return;
162
163 while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
164 struct debug_obj *new[ODEBUG_BATCH_SIZE];
165 int cnt;
166
167 for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
168 new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
169 if (!new[cnt])
170 break;
171 }
172 if (!cnt)
173 return;
174
175 raw_spin_lock_irqsave(&pool_lock, flags);
176 while (cnt) {
177 hlist_add_head(&new[--cnt]->node, &obj_pool);
178 debug_objects_allocated++;
179 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
180 }
181 raw_spin_unlock_irqrestore(&pool_lock, flags);
182 }
183 }
184
185 /*
186 * Lookup an object in the hash bucket.
187 */
lookup_object(void * addr,struct debug_bucket * b)188 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
189 {
190 struct debug_obj *obj;
191 int cnt = 0;
192
193 hlist_for_each_entry(obj, &b->list, node) {
194 cnt++;
195 if (obj->object == addr)
196 return obj;
197 }
198 if (cnt > debug_objects_maxchain)
199 debug_objects_maxchain = cnt;
200
201 return NULL;
202 }
203
204 /*
205 * Allocate a new object from the hlist
206 */
__alloc_object(struct hlist_head * list)207 static struct debug_obj *__alloc_object(struct hlist_head *list)
208 {
209 struct debug_obj *obj = NULL;
210
211 if (list->first) {
212 obj = hlist_entry(list->first, typeof(*obj), node);
213 hlist_del(&obj->node);
214 }
215
216 return obj;
217 }
218
219 static struct debug_obj *
alloc_object(void * addr,struct debug_bucket * b,const struct debug_obj_descr * descr)220 alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
221 {
222 struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
223 struct debug_obj *obj;
224
225 if (likely(obj_cache)) {
226 obj = __alloc_object(&percpu_pool->free_objs);
227 if (obj) {
228 percpu_pool->obj_free--;
229 goto init_obj;
230 }
231 }
232
233 raw_spin_lock(&pool_lock);
234 obj = __alloc_object(&obj_pool);
235 if (obj) {
236 obj_pool_used++;
237 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
238
239 /*
240 * Looking ahead, allocate one batch of debug objects and
241 * put them into the percpu free pool.
242 */
243 if (likely(obj_cache)) {
244 int i;
245
246 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
247 struct debug_obj *obj2;
248
249 obj2 = __alloc_object(&obj_pool);
250 if (!obj2)
251 break;
252 hlist_add_head(&obj2->node,
253 &percpu_pool->free_objs);
254 percpu_pool->obj_free++;
255 obj_pool_used++;
256 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
257 }
258 }
259
260 if (obj_pool_used > obj_pool_max_used)
261 obj_pool_max_used = obj_pool_used;
262
263 if (obj_pool_free < obj_pool_min_free)
264 obj_pool_min_free = obj_pool_free;
265 }
266 raw_spin_unlock(&pool_lock);
267
268 init_obj:
269 if (obj) {
270 obj->object = addr;
271 obj->descr = descr;
272 obj->state = ODEBUG_STATE_NONE;
273 obj->astate = 0;
274 hlist_add_head(&obj->node, &b->list);
275 }
276 return obj;
277 }
278
279 /*
280 * workqueue function to free objects.
281 *
282 * To reduce contention on the global pool_lock, the actual freeing of
283 * debug objects will be delayed if the pool_lock is busy.
284 */
free_obj_work(struct work_struct * work)285 static void free_obj_work(struct work_struct *work)
286 {
287 struct hlist_node *tmp;
288 struct debug_obj *obj;
289 unsigned long flags;
290 HLIST_HEAD(tofree);
291
292 WRITE_ONCE(obj_freeing, false);
293 if (!raw_spin_trylock_irqsave(&pool_lock, flags))
294 return;
295
296 if (obj_pool_free >= debug_objects_pool_size)
297 goto free_objs;
298
299 /*
300 * The objs on the pool list might be allocated before the work is
301 * run, so recheck if pool list it full or not, if not fill pool
302 * list from the global free list. As it is likely that a workload
303 * may be gearing up to use more and more objects, don't free any
304 * of them until the next round.
305 */
306 while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
307 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
308 hlist_del(&obj->node);
309 hlist_add_head(&obj->node, &obj_pool);
310 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
311 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
312 }
313 raw_spin_unlock_irqrestore(&pool_lock, flags);
314 return;
315
316 free_objs:
317 /*
318 * Pool list is already full and there are still objs on the free
319 * list. Move remaining free objs to a temporary list to free the
320 * memory outside the pool_lock held region.
321 */
322 if (obj_nr_tofree) {
323 hlist_move_list(&obj_to_free, &tofree);
324 debug_objects_freed += obj_nr_tofree;
325 WRITE_ONCE(obj_nr_tofree, 0);
326 }
327 raw_spin_unlock_irqrestore(&pool_lock, flags);
328
329 hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
330 hlist_del(&obj->node);
331 kmem_cache_free(obj_cache, obj);
332 }
333 }
334
__free_object(struct debug_obj * obj)335 static void __free_object(struct debug_obj *obj)
336 {
337 struct debug_obj *objs[ODEBUG_BATCH_SIZE];
338 struct debug_percpu_free *percpu_pool;
339 int lookahead_count = 0;
340 unsigned long flags;
341 bool work;
342
343 local_irq_save(flags);
344 if (!obj_cache)
345 goto free_to_obj_pool;
346
347 /*
348 * Try to free it into the percpu pool first.
349 */
350 percpu_pool = this_cpu_ptr(&percpu_obj_pool);
351 if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
352 hlist_add_head(&obj->node, &percpu_pool->free_objs);
353 percpu_pool->obj_free++;
354 local_irq_restore(flags);
355 return;
356 }
357
358 /*
359 * As the percpu pool is full, look ahead and pull out a batch
360 * of objects from the percpu pool and free them as well.
361 */
362 for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
363 objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
364 if (!objs[lookahead_count])
365 break;
366 percpu_pool->obj_free--;
367 }
368
369 free_to_obj_pool:
370 raw_spin_lock(&pool_lock);
371 work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
372 (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
373 obj_pool_used--;
374
375 if (work) {
376 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
377 hlist_add_head(&obj->node, &obj_to_free);
378 if (lookahead_count) {
379 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
380 obj_pool_used -= lookahead_count;
381 while (lookahead_count) {
382 hlist_add_head(&objs[--lookahead_count]->node,
383 &obj_to_free);
384 }
385 }
386
387 if ((obj_pool_free > debug_objects_pool_size) &&
388 (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
389 int i;
390
391 /*
392 * Free one more batch of objects from obj_pool.
393 */
394 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
395 obj = __alloc_object(&obj_pool);
396 hlist_add_head(&obj->node, &obj_to_free);
397 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
398 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
399 }
400 }
401 } else {
402 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
403 hlist_add_head(&obj->node, &obj_pool);
404 if (lookahead_count) {
405 WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
406 obj_pool_used -= lookahead_count;
407 while (lookahead_count) {
408 hlist_add_head(&objs[--lookahead_count]->node,
409 &obj_pool);
410 }
411 }
412 }
413 raw_spin_unlock(&pool_lock);
414 local_irq_restore(flags);
415 }
416
417 /*
418 * Put the object back into the pool and schedule work to free objects
419 * if necessary.
420 */
free_object(struct debug_obj * obj)421 static void free_object(struct debug_obj *obj)
422 {
423 __free_object(obj);
424 if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
425 WRITE_ONCE(obj_freeing, true);
426 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
427 }
428 }
429
430 #ifdef CONFIG_HOTPLUG_CPU
object_cpu_offline(unsigned int cpu)431 static int object_cpu_offline(unsigned int cpu)
432 {
433 struct debug_percpu_free *percpu_pool;
434 struct hlist_node *tmp;
435 struct debug_obj *obj;
436 unsigned long flags;
437
438 /* Remote access is safe as the CPU is dead already */
439 percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
440 hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) {
441 hlist_del(&obj->node);
442 kmem_cache_free(obj_cache, obj);
443 }
444
445 raw_spin_lock_irqsave(&pool_lock, flags);
446 obj_pool_used -= percpu_pool->obj_free;
447 debug_objects_freed += percpu_pool->obj_free;
448 raw_spin_unlock_irqrestore(&pool_lock, flags);
449
450 percpu_pool->obj_free = 0;
451
452 return 0;
453 }
454 #endif
455
456 /*
457 * We run out of memory. That means we probably have tons of objects
458 * allocated.
459 */
debug_objects_oom(void)460 static void debug_objects_oom(void)
461 {
462 struct debug_bucket *db = obj_hash;
463 struct hlist_node *tmp;
464 HLIST_HEAD(freelist);
465 struct debug_obj *obj;
466 unsigned long flags;
467 int i;
468
469 pr_warn("Out of memory. ODEBUG disabled\n");
470
471 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
472 raw_spin_lock_irqsave(&db->lock, flags);
473 hlist_move_list(&db->list, &freelist);
474 raw_spin_unlock_irqrestore(&db->lock, flags);
475
476 /* Now free them */
477 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
478 hlist_del(&obj->node);
479 free_object(obj);
480 }
481 }
482 }
483
484 /*
485 * We use the pfn of the address for the hash. That way we can check
486 * for freed objects simply by checking the affected bucket.
487 */
get_bucket(unsigned long addr)488 static struct debug_bucket *get_bucket(unsigned long addr)
489 {
490 unsigned long hash;
491
492 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
493 return &obj_hash[hash];
494 }
495
debug_print_object(struct debug_obj * obj,char * msg)496 static void debug_print_object(struct debug_obj *obj, char *msg)
497 {
498 const struct debug_obj_descr *descr = obj->descr;
499 static int limit;
500
501 /*
502 * Don't report if lookup_object_or_alloc() by the current thread
503 * failed because lookup_object_or_alloc()/debug_objects_oom() by a
504 * concurrent thread turned off debug_objects_enabled and cleared
505 * the hash buckets.
506 */
507 if (!debug_objects_enabled)
508 return;
509
510 if (limit < 5 && descr != descr_test) {
511 void *hint = descr->debug_hint ?
512 descr->debug_hint(obj->object) : NULL;
513 limit++;
514 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
515 "object type: %s hint: %pS\n",
516 msg, obj_states[obj->state], obj->astate,
517 descr->name, hint);
518 }
519 debug_objects_warnings++;
520 }
521
522 /*
523 * Try to repair the damage, so we have a better chance to get useful
524 * debug output.
525 */
526 static bool
debug_object_fixup(bool (* fixup)(void * addr,enum debug_obj_state state),void * addr,enum debug_obj_state state)527 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
528 void * addr, enum debug_obj_state state)
529 {
530 if (fixup && fixup(addr, state)) {
531 debug_objects_fixups++;
532 return true;
533 }
534 return false;
535 }
536
debug_object_is_on_stack(void * addr,int onstack)537 static void debug_object_is_on_stack(void *addr, int onstack)
538 {
539 int is_on_stack;
540 static int limit;
541
542 if (limit > 4)
543 return;
544
545 is_on_stack = object_is_on_stack(addr);
546 if (is_on_stack == onstack)
547 return;
548
549 limit++;
550 if (is_on_stack)
551 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
552 task_stack_page(current));
553 else
554 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
555 task_stack_page(current));
556
557 WARN_ON(1);
558 }
559
lookup_object_or_alloc(void * addr,struct debug_bucket * b,const struct debug_obj_descr * descr,bool onstack,bool alloc_ifstatic)560 static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
561 const struct debug_obj_descr *descr,
562 bool onstack, bool alloc_ifstatic)
563 {
564 struct debug_obj *obj = lookup_object(addr, b);
565 enum debug_obj_state state = ODEBUG_STATE_NONE;
566
567 if (likely(obj))
568 return obj;
569
570 /*
571 * debug_object_init() unconditionally allocates untracked
572 * objects. It does not matter whether it is a static object or
573 * not.
574 *
575 * debug_object_assert_init() and debug_object_activate() allow
576 * allocation only if the descriptor callback confirms that the
577 * object is static and considered initialized. For non-static
578 * objects the allocation needs to be done from the fixup callback.
579 */
580 if (unlikely(alloc_ifstatic)) {
581 if (!descr->is_static_object || !descr->is_static_object(addr))
582 return ERR_PTR(-ENOENT);
583 /* Statically allocated objects are considered initialized */
584 state = ODEBUG_STATE_INIT;
585 }
586
587 obj = alloc_object(addr, b, descr);
588 if (likely(obj)) {
589 obj->state = state;
590 debug_object_is_on_stack(addr, onstack);
591 return obj;
592 }
593
594 /* Out of memory. Do the cleanup outside of the locked region */
595 debug_objects_enabled = 0;
596 return NULL;
597 }
598
debug_objects_fill_pool(void)599 static void debug_objects_fill_pool(void)
600 {
601 /*
602 * On RT enabled kernels the pool refill must happen in preemptible
603 * context:
604 */
605 if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible())
606 fill_pool();
607 }
608
609 static void
__debug_object_init(void * addr,const struct debug_obj_descr * descr,int onstack)610 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
611 {
612 struct debug_obj *obj, o;
613 struct debug_bucket *db;
614 unsigned long flags;
615
616 debug_objects_fill_pool();
617
618 db = get_bucket((unsigned long) addr);
619
620 raw_spin_lock_irqsave(&db->lock, flags);
621
622 obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
623 if (unlikely(!obj)) {
624 raw_spin_unlock_irqrestore(&db->lock, flags);
625 debug_objects_oom();
626 return;
627 }
628
629 switch (obj->state) {
630 case ODEBUG_STATE_NONE:
631 case ODEBUG_STATE_INIT:
632 case ODEBUG_STATE_INACTIVE:
633 obj->state = ODEBUG_STATE_INIT;
634 raw_spin_unlock_irqrestore(&db->lock, flags);
635 return;
636 default:
637 break;
638 }
639
640 o = *obj;
641 raw_spin_unlock_irqrestore(&db->lock, flags);
642 debug_print_object(&o, "init");
643
644 if (o.state == ODEBUG_STATE_ACTIVE)
645 debug_object_fixup(descr->fixup_init, addr, o.state);
646 }
647
648 /**
649 * debug_object_init - debug checks when an object is initialized
650 * @addr: address of the object
651 * @descr: pointer to an object specific debug description structure
652 */
debug_object_init(void * addr,const struct debug_obj_descr * descr)653 void debug_object_init(void *addr, const struct debug_obj_descr *descr)
654 {
655 if (!debug_objects_enabled)
656 return;
657
658 __debug_object_init(addr, descr, 0);
659 }
660 EXPORT_SYMBOL_GPL(debug_object_init);
661
662 /**
663 * debug_object_init_on_stack - debug checks when an object on stack is
664 * initialized
665 * @addr: address of the object
666 * @descr: pointer to an object specific debug description structure
667 */
debug_object_init_on_stack(void * addr,const struct debug_obj_descr * descr)668 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
669 {
670 if (!debug_objects_enabled)
671 return;
672
673 __debug_object_init(addr, descr, 1);
674 }
675 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
676
677 /**
678 * debug_object_activate - debug checks when an object is activated
679 * @addr: address of the object
680 * @descr: pointer to an object specific debug description structure
681 * Returns 0 for success, -EINVAL for check failed.
682 */
debug_object_activate(void * addr,const struct debug_obj_descr * descr)683 int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
684 {
685 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
686 struct debug_bucket *db;
687 struct debug_obj *obj;
688 unsigned long flags;
689
690 if (!debug_objects_enabled)
691 return 0;
692
693 debug_objects_fill_pool();
694
695 db = get_bucket((unsigned long) addr);
696
697 raw_spin_lock_irqsave(&db->lock, flags);
698
699 obj = lookup_object_or_alloc(addr, db, descr, false, true);
700 if (unlikely(!obj)) {
701 raw_spin_unlock_irqrestore(&db->lock, flags);
702 debug_objects_oom();
703 return 0;
704 } else if (likely(!IS_ERR(obj))) {
705 switch (obj->state) {
706 case ODEBUG_STATE_ACTIVE:
707 case ODEBUG_STATE_DESTROYED:
708 o = *obj;
709 break;
710 case ODEBUG_STATE_INIT:
711 case ODEBUG_STATE_INACTIVE:
712 obj->state = ODEBUG_STATE_ACTIVE;
713 fallthrough;
714 default:
715 raw_spin_unlock_irqrestore(&db->lock, flags);
716 return 0;
717 }
718 }
719
720 raw_spin_unlock_irqrestore(&db->lock, flags);
721 debug_print_object(&o, "activate");
722
723 switch (o.state) {
724 case ODEBUG_STATE_ACTIVE:
725 case ODEBUG_STATE_NOTAVAILABLE:
726 if (debug_object_fixup(descr->fixup_activate, addr, o.state))
727 return 0;
728 fallthrough;
729 default:
730 return -EINVAL;
731 }
732 }
733 EXPORT_SYMBOL_GPL(debug_object_activate);
734
735 /**
736 * debug_object_deactivate - debug checks when an object is deactivated
737 * @addr: address of the object
738 * @descr: pointer to an object specific debug description structure
739 */
debug_object_deactivate(void * addr,const struct debug_obj_descr * descr)740 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
741 {
742 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
743 struct debug_bucket *db;
744 struct debug_obj *obj;
745 unsigned long flags;
746
747 if (!debug_objects_enabled)
748 return;
749
750 db = get_bucket((unsigned long) addr);
751
752 raw_spin_lock_irqsave(&db->lock, flags);
753
754 obj = lookup_object(addr, db);
755 if (obj) {
756 switch (obj->state) {
757 case ODEBUG_STATE_DESTROYED:
758 break;
759 case ODEBUG_STATE_INIT:
760 case ODEBUG_STATE_INACTIVE:
761 case ODEBUG_STATE_ACTIVE:
762 if (obj->astate)
763 break;
764 obj->state = ODEBUG_STATE_INACTIVE;
765 fallthrough;
766 default:
767 raw_spin_unlock_irqrestore(&db->lock, flags);
768 return;
769 }
770 o = *obj;
771 }
772
773 raw_spin_unlock_irqrestore(&db->lock, flags);
774 debug_print_object(&o, "deactivate");
775 }
776 EXPORT_SYMBOL_GPL(debug_object_deactivate);
777
778 /**
779 * debug_object_destroy - debug checks when an object is destroyed
780 * @addr: address of the object
781 * @descr: pointer to an object specific debug description structure
782 */
debug_object_destroy(void * addr,const struct debug_obj_descr * descr)783 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
784 {
785 struct debug_obj *obj, o;
786 struct debug_bucket *db;
787 unsigned long flags;
788
789 if (!debug_objects_enabled)
790 return;
791
792 db = get_bucket((unsigned long) addr);
793
794 raw_spin_lock_irqsave(&db->lock, flags);
795
796 obj = lookup_object(addr, db);
797 if (!obj) {
798 raw_spin_unlock_irqrestore(&db->lock, flags);
799 return;
800 }
801
802 switch (obj->state) {
803 case ODEBUG_STATE_ACTIVE:
804 case ODEBUG_STATE_DESTROYED:
805 break;
806 case ODEBUG_STATE_NONE:
807 case ODEBUG_STATE_INIT:
808 case ODEBUG_STATE_INACTIVE:
809 obj->state = ODEBUG_STATE_DESTROYED;
810 fallthrough;
811 default:
812 raw_spin_unlock_irqrestore(&db->lock, flags);
813 return;
814 }
815
816 o = *obj;
817 raw_spin_unlock_irqrestore(&db->lock, flags);
818 debug_print_object(&o, "destroy");
819
820 if (o.state == ODEBUG_STATE_ACTIVE)
821 debug_object_fixup(descr->fixup_destroy, addr, o.state);
822 }
823 EXPORT_SYMBOL_GPL(debug_object_destroy);
824
825 /**
826 * debug_object_free - debug checks when an object is freed
827 * @addr: address of the object
828 * @descr: pointer to an object specific debug description structure
829 */
debug_object_free(void * addr,const struct debug_obj_descr * descr)830 void debug_object_free(void *addr, const struct debug_obj_descr *descr)
831 {
832 struct debug_obj *obj, o;
833 struct debug_bucket *db;
834 unsigned long flags;
835
836 if (!debug_objects_enabled)
837 return;
838
839 db = get_bucket((unsigned long) addr);
840
841 raw_spin_lock_irqsave(&db->lock, flags);
842
843 obj = lookup_object(addr, db);
844 if (!obj) {
845 raw_spin_unlock_irqrestore(&db->lock, flags);
846 return;
847 }
848
849 switch (obj->state) {
850 case ODEBUG_STATE_ACTIVE:
851 break;
852 default:
853 hlist_del(&obj->node);
854 raw_spin_unlock_irqrestore(&db->lock, flags);
855 free_object(obj);
856 return;
857 }
858
859 o = *obj;
860 raw_spin_unlock_irqrestore(&db->lock, flags);
861 debug_print_object(&o, "free");
862
863 debug_object_fixup(descr->fixup_free, addr, o.state);
864 }
865 EXPORT_SYMBOL_GPL(debug_object_free);
866
867 /**
868 * debug_object_assert_init - debug checks when object should be init-ed
869 * @addr: address of the object
870 * @descr: pointer to an object specific debug description structure
871 */
debug_object_assert_init(void * addr,const struct debug_obj_descr * descr)872 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
873 {
874 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
875 struct debug_bucket *db;
876 struct debug_obj *obj;
877 unsigned long flags;
878
879 if (!debug_objects_enabled)
880 return;
881
882 debug_objects_fill_pool();
883
884 db = get_bucket((unsigned long) addr);
885
886 raw_spin_lock_irqsave(&db->lock, flags);
887 obj = lookup_object_or_alloc(addr, db, descr, false, true);
888 raw_spin_unlock_irqrestore(&db->lock, flags);
889 if (likely(!IS_ERR_OR_NULL(obj)))
890 return;
891
892 /* If NULL the allocation has hit OOM */
893 if (!obj) {
894 debug_objects_oom();
895 return;
896 }
897
898 /* Object is neither tracked nor static. It's not initialized. */
899 debug_print_object(&o, "assert_init");
900 debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
901 }
902 EXPORT_SYMBOL_GPL(debug_object_assert_init);
903
904 /**
905 * debug_object_active_state - debug checks object usage state machine
906 * @addr: address of the object
907 * @descr: pointer to an object specific debug description structure
908 * @expect: expected state
909 * @next: state to move to if expected state is found
910 */
911 void
debug_object_active_state(void * addr,const struct debug_obj_descr * descr,unsigned int expect,unsigned int next)912 debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
913 unsigned int expect, unsigned int next)
914 {
915 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
916 struct debug_bucket *db;
917 struct debug_obj *obj;
918 unsigned long flags;
919
920 if (!debug_objects_enabled)
921 return;
922
923 db = get_bucket((unsigned long) addr);
924
925 raw_spin_lock_irqsave(&db->lock, flags);
926
927 obj = lookup_object(addr, db);
928 if (obj) {
929 switch (obj->state) {
930 case ODEBUG_STATE_ACTIVE:
931 if (obj->astate != expect)
932 break;
933 obj->astate = next;
934 raw_spin_unlock_irqrestore(&db->lock, flags);
935 return;
936 default:
937 break;
938 }
939 o = *obj;
940 }
941
942 raw_spin_unlock_irqrestore(&db->lock, flags);
943 debug_print_object(&o, "active_state");
944 }
945 EXPORT_SYMBOL_GPL(debug_object_active_state);
946
947 #ifdef CONFIG_DEBUG_OBJECTS_FREE
__debug_check_no_obj_freed(const void * address,unsigned long size)948 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
949 {
950 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
951 int cnt, objs_checked = 0;
952 struct debug_obj *obj, o;
953 struct debug_bucket *db;
954 struct hlist_node *tmp;
955
956 saddr = (unsigned long) address;
957 eaddr = saddr + size;
958 paddr = saddr & ODEBUG_CHUNK_MASK;
959 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
960 chunks >>= ODEBUG_CHUNK_SHIFT;
961
962 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
963 db = get_bucket(paddr);
964
965 repeat:
966 cnt = 0;
967 raw_spin_lock_irqsave(&db->lock, flags);
968 hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
969 cnt++;
970 oaddr = (unsigned long) obj->object;
971 if (oaddr < saddr || oaddr >= eaddr)
972 continue;
973
974 switch (obj->state) {
975 case ODEBUG_STATE_ACTIVE:
976 o = *obj;
977 raw_spin_unlock_irqrestore(&db->lock, flags);
978 debug_print_object(&o, "free");
979 debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state);
980 goto repeat;
981 default:
982 hlist_del(&obj->node);
983 __free_object(obj);
984 break;
985 }
986 }
987 raw_spin_unlock_irqrestore(&db->lock, flags);
988
989 if (cnt > debug_objects_maxchain)
990 debug_objects_maxchain = cnt;
991
992 objs_checked += cnt;
993 }
994
995 if (objs_checked > debug_objects_maxchecked)
996 debug_objects_maxchecked = objs_checked;
997
998 /* Schedule work to actually kmem_cache_free() objects */
999 if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
1000 WRITE_ONCE(obj_freeing, true);
1001 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1002 }
1003 }
1004
debug_check_no_obj_freed(const void * address,unsigned long size)1005 void debug_check_no_obj_freed(const void *address, unsigned long size)
1006 {
1007 if (debug_objects_enabled)
1008 __debug_check_no_obj_freed(address, size);
1009 }
1010 #endif
1011
1012 #ifdef CONFIG_DEBUG_FS
1013
debug_stats_show(struct seq_file * m,void * v)1014 static int debug_stats_show(struct seq_file *m, void *v)
1015 {
1016 int cpu, obj_percpu_free = 0;
1017
1018 for_each_possible_cpu(cpu)
1019 obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1020
1021 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
1022 seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
1023 seq_printf(m, "warnings :%d\n", debug_objects_warnings);
1024 seq_printf(m, "fixups :%d\n", debug_objects_fixups);
1025 seq_printf(m, "pool_free :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
1026 seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1027 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1028 seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free);
1029 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1030 seq_printf(m, "on_free_list :%d\n", READ_ONCE(obj_nr_tofree));
1031 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1032 seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
1033 return 0;
1034 }
1035 DEFINE_SHOW_ATTRIBUTE(debug_stats);
1036
debug_objects_init_debugfs(void)1037 static int __init debug_objects_init_debugfs(void)
1038 {
1039 struct dentry *dbgdir;
1040
1041 if (!debug_objects_enabled)
1042 return 0;
1043
1044 dbgdir = debugfs_create_dir("debug_objects", NULL);
1045
1046 debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1047
1048 return 0;
1049 }
1050 __initcall(debug_objects_init_debugfs);
1051
1052 #else
debug_objects_init_debugfs(void)1053 static inline void debug_objects_init_debugfs(void) { }
1054 #endif
1055
1056 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1057
1058 /* Random data structure for the self test */
1059 struct self_test {
1060 unsigned long dummy1[6];
1061 int static_init;
1062 unsigned long dummy2[3];
1063 };
1064
1065 static __initconst const struct debug_obj_descr descr_type_test;
1066
is_static_object(void * addr)1067 static bool __init is_static_object(void *addr)
1068 {
1069 struct self_test *obj = addr;
1070
1071 return obj->static_init;
1072 }
1073
1074 /*
1075 * fixup_init is called when:
1076 * - an active object is initialized
1077 */
fixup_init(void * addr,enum debug_obj_state state)1078 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1079 {
1080 struct self_test *obj = addr;
1081
1082 switch (state) {
1083 case ODEBUG_STATE_ACTIVE:
1084 debug_object_deactivate(obj, &descr_type_test);
1085 debug_object_init(obj, &descr_type_test);
1086 return true;
1087 default:
1088 return false;
1089 }
1090 }
1091
1092 /*
1093 * fixup_activate is called when:
1094 * - an active object is activated
1095 * - an unknown non-static object is activated
1096 */
fixup_activate(void * addr,enum debug_obj_state state)1097 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1098 {
1099 struct self_test *obj = addr;
1100
1101 switch (state) {
1102 case ODEBUG_STATE_NOTAVAILABLE:
1103 return true;
1104 case ODEBUG_STATE_ACTIVE:
1105 debug_object_deactivate(obj, &descr_type_test);
1106 debug_object_activate(obj, &descr_type_test);
1107 return true;
1108
1109 default:
1110 return false;
1111 }
1112 }
1113
1114 /*
1115 * fixup_destroy is called when:
1116 * - an active object is destroyed
1117 */
fixup_destroy(void * addr,enum debug_obj_state state)1118 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1119 {
1120 struct self_test *obj = addr;
1121
1122 switch (state) {
1123 case ODEBUG_STATE_ACTIVE:
1124 debug_object_deactivate(obj, &descr_type_test);
1125 debug_object_destroy(obj, &descr_type_test);
1126 return true;
1127 default:
1128 return false;
1129 }
1130 }
1131
1132 /*
1133 * fixup_free is called when:
1134 * - an active object is freed
1135 */
fixup_free(void * addr,enum debug_obj_state state)1136 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1137 {
1138 struct self_test *obj = addr;
1139
1140 switch (state) {
1141 case ODEBUG_STATE_ACTIVE:
1142 debug_object_deactivate(obj, &descr_type_test);
1143 debug_object_free(obj, &descr_type_test);
1144 return true;
1145 default:
1146 return false;
1147 }
1148 }
1149
1150 static int __init
check_results(void * addr,enum debug_obj_state state,int fixups,int warnings)1151 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1152 {
1153 struct debug_bucket *db;
1154 struct debug_obj *obj;
1155 unsigned long flags;
1156 int res = -EINVAL;
1157
1158 db = get_bucket((unsigned long) addr);
1159
1160 raw_spin_lock_irqsave(&db->lock, flags);
1161
1162 obj = lookup_object(addr, db);
1163 if (!obj && state != ODEBUG_STATE_NONE) {
1164 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1165 goto out;
1166 }
1167 if (obj && obj->state != state) {
1168 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1169 obj->state, state);
1170 goto out;
1171 }
1172 if (fixups != debug_objects_fixups) {
1173 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1174 fixups, debug_objects_fixups);
1175 goto out;
1176 }
1177 if (warnings != debug_objects_warnings) {
1178 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1179 warnings, debug_objects_warnings);
1180 goto out;
1181 }
1182 res = 0;
1183 out:
1184 raw_spin_unlock_irqrestore(&db->lock, flags);
1185 if (res)
1186 debug_objects_enabled = 0;
1187 return res;
1188 }
1189
1190 static __initconst const struct debug_obj_descr descr_type_test = {
1191 .name = "selftest",
1192 .is_static_object = is_static_object,
1193 .fixup_init = fixup_init,
1194 .fixup_activate = fixup_activate,
1195 .fixup_destroy = fixup_destroy,
1196 .fixup_free = fixup_free,
1197 };
1198
1199 static __initdata struct self_test obj = { .static_init = 0 };
1200
debug_objects_selftest(void)1201 static void __init debug_objects_selftest(void)
1202 {
1203 int fixups, oldfixups, warnings, oldwarnings;
1204 unsigned long flags;
1205
1206 local_irq_save(flags);
1207
1208 fixups = oldfixups = debug_objects_fixups;
1209 warnings = oldwarnings = debug_objects_warnings;
1210 descr_test = &descr_type_test;
1211
1212 debug_object_init(&obj, &descr_type_test);
1213 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1214 goto out;
1215 debug_object_activate(&obj, &descr_type_test);
1216 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1217 goto out;
1218 debug_object_activate(&obj, &descr_type_test);
1219 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1220 goto out;
1221 debug_object_deactivate(&obj, &descr_type_test);
1222 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1223 goto out;
1224 debug_object_destroy(&obj, &descr_type_test);
1225 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1226 goto out;
1227 debug_object_init(&obj, &descr_type_test);
1228 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1229 goto out;
1230 debug_object_activate(&obj, &descr_type_test);
1231 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1232 goto out;
1233 debug_object_deactivate(&obj, &descr_type_test);
1234 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1235 goto out;
1236 debug_object_free(&obj, &descr_type_test);
1237 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1238 goto out;
1239
1240 obj.static_init = 1;
1241 debug_object_activate(&obj, &descr_type_test);
1242 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1243 goto out;
1244 debug_object_init(&obj, &descr_type_test);
1245 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1246 goto out;
1247 debug_object_free(&obj, &descr_type_test);
1248 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1249 goto out;
1250
1251 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1252 debug_object_init(&obj, &descr_type_test);
1253 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1254 goto out;
1255 debug_object_activate(&obj, &descr_type_test);
1256 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1257 goto out;
1258 __debug_check_no_obj_freed(&obj, sizeof(obj));
1259 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1260 goto out;
1261 #endif
1262 pr_info("selftest passed\n");
1263
1264 out:
1265 debug_objects_fixups = oldfixups;
1266 debug_objects_warnings = oldwarnings;
1267 descr_test = NULL;
1268
1269 local_irq_restore(flags);
1270 }
1271 #else
debug_objects_selftest(void)1272 static inline void debug_objects_selftest(void) { }
1273 #endif
1274
1275 /*
1276 * Called during early boot to initialize the hash buckets and link
1277 * the static object pool objects into the poll list. After this call
1278 * the object tracker is fully operational.
1279 */
debug_objects_early_init(void)1280 void __init debug_objects_early_init(void)
1281 {
1282 int i;
1283
1284 for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1285 raw_spin_lock_init(&obj_hash[i].lock);
1286
1287 for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1288 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1289 }
1290
1291 /*
1292 * Convert the statically allocated objects to dynamic ones:
1293 */
debug_objects_replace_static_objects(void)1294 static int __init debug_objects_replace_static_objects(void)
1295 {
1296 struct debug_bucket *db = obj_hash;
1297 struct hlist_node *tmp;
1298 struct debug_obj *obj, *new;
1299 HLIST_HEAD(objects);
1300 int i, cnt = 0;
1301
1302 for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1303 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1304 if (!obj)
1305 goto free;
1306 hlist_add_head(&obj->node, &objects);
1307 }
1308
1309 debug_objects_allocated += i;
1310
1311 /*
1312 * debug_objects_mem_init() is now called early that only one CPU is up
1313 * and interrupts have been disabled, so it is safe to replace the
1314 * active object references.
1315 */
1316
1317 /* Remove the statically allocated objects from the pool */
1318 hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1319 hlist_del(&obj->node);
1320 /* Move the allocated objects to the pool */
1321 hlist_move_list(&objects, &obj_pool);
1322
1323 /* Replace the active object references */
1324 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1325 hlist_move_list(&db->list, &objects);
1326
1327 hlist_for_each_entry(obj, &objects, node) {
1328 new = hlist_entry(obj_pool.first, typeof(*obj), node);
1329 hlist_del(&new->node);
1330 /* copy object data */
1331 *new = *obj;
1332 hlist_add_head(&new->node, &db->list);
1333 cnt++;
1334 }
1335 }
1336
1337 pr_debug("%d of %d active objects replaced\n",
1338 cnt, obj_pool_used);
1339 return 0;
1340 free:
1341 hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1342 hlist_del(&obj->node);
1343 kmem_cache_free(obj_cache, obj);
1344 }
1345 return -ENOMEM;
1346 }
1347
1348 /*
1349 * Called after the kmem_caches are functional to setup a dedicated
1350 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1351 * prevents that the debug code is called on kmem_cache_free() for the
1352 * debug tracker objects to avoid recursive calls.
1353 */
debug_objects_mem_init(void)1354 void __init debug_objects_mem_init(void)
1355 {
1356 int cpu, extras;
1357
1358 if (!debug_objects_enabled)
1359 return;
1360
1361 /*
1362 * Initialize the percpu object pools
1363 *
1364 * Initialization is not strictly necessary, but was done for
1365 * completeness.
1366 */
1367 for_each_possible_cpu(cpu)
1368 INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
1369
1370 obj_cache = kmem_cache_create("debug_objects_cache",
1371 sizeof (struct debug_obj), 0,
1372 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1373 NULL);
1374
1375 if (!obj_cache || debug_objects_replace_static_objects()) {
1376 debug_objects_enabled = 0;
1377 kmem_cache_destroy(obj_cache);
1378 pr_warn("out of memory.\n");
1379 return;
1380 } else
1381 debug_objects_selftest();
1382
1383 #ifdef CONFIG_HOTPLUG_CPU
1384 cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1385 object_cpu_offline);
1386 #endif
1387
1388 /*
1389 * Increase the thresholds for allocating and freeing objects
1390 * according to the number of possible CPUs available in the system.
1391 */
1392 extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1393 debug_objects_pool_size += extras;
1394 debug_objects_pool_min_level += extras;
1395 }
1396