1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * mm/kmemleak.c
4 *
5 * Copyright (C) 2008 ARM Limited
6 * Written by Catalin Marinas <catalin.marinas@arm.com>
7 *
8 * For more information on the algorithm and kmemleak usage, please see
9 * Documentation/dev-tools/kmemleak.rst.
10 *
11 * Notes on locking
12 * ----------------
13 *
14 * The following locks and mutexes are used by kmemleak:
15 *
16 * - kmemleak_lock (raw_spinlock_t): protects the object_list modifications and
17 * accesses to the object_tree_root. The object_list is the main list
18 * holding the metadata (struct kmemleak_object) for the allocated memory
19 * blocks. The object_tree_root is a red black tree used to look-up
20 * metadata based on a pointer to the corresponding memory block. The
21 * kmemleak_object structures are added to the object_list and
22 * object_tree_root in the create_object() function called from the
23 * kmemleak_alloc() callback and removed in delete_object() called from the
24 * kmemleak_free() callback
25 * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
26 * Accesses to the metadata (e.g. count) are protected by this lock. Note
27 * that some members of this structure may be protected by other means
28 * (atomic or kmemleak_lock). This lock is also held when scanning the
29 * corresponding memory block to avoid the kernel freeing it via the
30 * kmemleak_free() callback. This is less heavyweight than holding a global
31 * lock like kmemleak_lock during scanning.
32 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
33 * unreferenced objects at a time. The gray_list contains the objects which
34 * are already referenced or marked as false positives and need to be
35 * scanned. This list is only modified during a scanning episode when the
36 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
37 * Note that the kmemleak_object.use_count is incremented when an object is
38 * added to the gray_list and therefore cannot be freed. This mutex also
39 * prevents multiple users of the "kmemleak" debugfs file together with
40 * modifications to the memory scanning parameters including the scan_thread
41 * pointer
42 *
43 * Locks and mutexes are acquired/nested in the following order:
44 *
45 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
46 *
47 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
48 * regions.
49 *
50 * The kmemleak_object structures have a use_count incremented or decremented
51 * using the get_object()/put_object() functions. When the use_count becomes
52 * 0, this count can no longer be incremented and put_object() schedules the
53 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
54 * function must be protected by rcu_read_lock() to avoid accessing a freed
55 * structure.
56 */
57
58 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
59
60 #include <linux/init.h>
61 #include <linux/kernel.h>
62 #include <linux/list.h>
63 #include <linux/sched/signal.h>
64 #include <linux/sched/task.h>
65 #include <linux/sched/task_stack.h>
66 #include <linux/jiffies.h>
67 #include <linux/delay.h>
68 #include <linux/export.h>
69 #include <linux/kthread.h>
70 #include <linux/rbtree.h>
71 #include <linux/fs.h>
72 #include <linux/debugfs.h>
73 #include <linux/seq_file.h>
74 #include <linux/cpumask.h>
75 #include <linux/spinlock.h>
76 #include <linux/module.h>
77 #include <linux/mutex.h>
78 #include <linux/rcupdate.h>
79 #include <linux/stacktrace.h>
80 #include <linux/cache.h>
81 #include <linux/percpu.h>
82 #include <linux/memblock.h>
83 #include <linux/pfn.h>
84 #include <linux/mmzone.h>
85 #include <linux/slab.h>
86 #include <linux/thread_info.h>
87 #include <linux/err.h>
88 #include <linux/uaccess.h>
89 #include <linux/string.h>
90 #include <linux/nodemask.h>
91 #include <linux/mm.h>
92 #include <linux/workqueue.h>
93 #include <linux/crc32.h>
94
95 #include <asm/sections.h>
96 #include <asm/processor.h>
97 #include <linux/atomic.h>
98
99 #include <linux/kasan.h>
100 #include <linux/kmemleak.h>
101 #include <linux/memory_hotplug.h>
102
103 /*
104 * Kmemleak configuration and common defines.
105 */
106 #define MAX_TRACE 16 /* stack trace length */
107 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
108 #define SECS_FIRST_SCAN 60 /* delay before the first scan */
109 #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
110 #define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
111
112 #define BYTES_PER_POINTER sizeof(void *)
113
114 /* GFP bitmask for kmemleak internal allocations */
115 #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
116 __GFP_NORETRY | __GFP_NOMEMALLOC | \
117 __GFP_NOWARN)
118
119 /* scanning area inside a memory block */
120 struct kmemleak_scan_area {
121 struct hlist_node node;
122 unsigned long start;
123 size_t size;
124 };
125
126 #define KMEMLEAK_GREY 0
127 #define KMEMLEAK_BLACK -1
128
129 /*
130 * Structure holding the metadata for each allocated memory block.
131 * Modifications to such objects should be made while holding the
132 * object->lock. Insertions or deletions from object_list, gray_list or
133 * rb_node are already protected by the corresponding locks or mutex (see
134 * the notes on locking above). These objects are reference-counted
135 * (use_count) and freed using the RCU mechanism.
136 */
137 struct kmemleak_object {
138 raw_spinlock_t lock;
139 unsigned int flags; /* object status flags */
140 struct list_head object_list;
141 struct list_head gray_list;
142 struct rb_node rb_node;
143 struct rcu_head rcu; /* object_list lockless traversal */
144 /* object usage count; object freed when use_count == 0 */
145 atomic_t use_count;
146 unsigned long pointer;
147 size_t size;
148 /* pass surplus references to this pointer */
149 unsigned long excess_ref;
150 /* minimum number of a pointers found before it is considered leak */
151 int min_count;
152 /* the total number of pointers found pointing to this object */
153 int count;
154 /* checksum for detecting modified objects */
155 u32 checksum;
156 /* memory ranges to be scanned inside an object (empty for all) */
157 struct hlist_head area_list;
158 unsigned long trace[MAX_TRACE];
159 unsigned int trace_len;
160 unsigned long jiffies; /* creation timestamp */
161 pid_t pid; /* pid of the current task */
162 char comm[TASK_COMM_LEN]; /* executable name */
163 };
164
165 /* flag representing the memory block allocation status */
166 #define OBJECT_ALLOCATED (1 << 0)
167 /* flag set after the first reporting of an unreference object */
168 #define OBJECT_REPORTED (1 << 1)
169 /* flag set to not scan the object */
170 #define OBJECT_NO_SCAN (1 << 2)
171 /* flag set to fully scan the object when scan_area allocation failed */
172 #define OBJECT_FULL_SCAN (1 << 3)
173
174 #define HEX_PREFIX " "
175 /* number of bytes to print per line; must be 16 or 32 */
176 #define HEX_ROW_SIZE 16
177 /* number of bytes to print at a time (1, 2, 4, 8) */
178 #define HEX_GROUP_SIZE 1
179 /* include ASCII after the hex output */
180 #define HEX_ASCII 1
181 /* max number of lines to be printed */
182 #define HEX_MAX_LINES 2
183
184 /* the list of all allocated objects */
185 static LIST_HEAD(object_list);
186 /* the list of gray-colored objects (see color_gray comment below) */
187 static LIST_HEAD(gray_list);
188 /* memory pool allocation */
189 static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
190 static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
191 static LIST_HEAD(mem_pool_free_list);
192 /* search tree for object boundaries */
193 static struct rb_root object_tree_root = RB_ROOT;
194 /* protecting the access to object_list and object_tree_root */
195 static DEFINE_RAW_SPINLOCK(kmemleak_lock);
196
197 /* allocation caches for kmemleak internal data */
198 static struct kmem_cache *object_cache;
199 static struct kmem_cache *scan_area_cache;
200
201 /* set if tracing memory operations is enabled */
202 static int kmemleak_enabled = 1;
203 /* same as above but only for the kmemleak_free() callback */
204 static int kmemleak_free_enabled = 1;
205 /* set in the late_initcall if there were no errors */
206 static int kmemleak_initialized;
207 /* set if a kmemleak warning was issued */
208 static int kmemleak_warning;
209 /* set if a fatal kmemleak error has occurred */
210 static int kmemleak_error;
211
212 /* minimum and maximum address that may be valid pointers */
213 static unsigned long min_addr = ULONG_MAX;
214 static unsigned long max_addr;
215
216 static struct task_struct *scan_thread;
217 /* used to avoid reporting of recently allocated objects */
218 static unsigned long jiffies_min_age;
219 static unsigned long jiffies_last_scan;
220 /* delay between automatic memory scannings */
221 static signed long jiffies_scan_wait;
222 /* enables or disables the task stacks scanning */
223 static int kmemleak_stack_scan = 1;
224 /* protects the memory scanning, parameters and debug/kmemleak file access */
225 static DEFINE_MUTEX(scan_mutex);
226 /* setting kmemleak=on, will set this var, skipping the disable */
227 static int kmemleak_skip_disable;
228 /* If there are leaks that can be reported */
229 static bool kmemleak_found_leaks;
230
231 static bool kmemleak_verbose;
232 module_param_named(verbose, kmemleak_verbose, bool, 0600);
233
234 static void kmemleak_disable(void);
235
236 /*
237 * Print a warning and dump the stack trace.
238 */
239 #define kmemleak_warn(x...) do { \
240 pr_warn(x); \
241 dump_stack(); \
242 kmemleak_warning = 1; \
243 } while (0)
244
245 /*
246 * Macro invoked when a serious kmemleak condition occurred and cannot be
247 * recovered from. Kmemleak will be disabled and further allocation/freeing
248 * tracing no longer available.
249 */
250 #define kmemleak_stop(x...) do { \
251 kmemleak_warn(x); \
252 kmemleak_disable(); \
253 } while (0)
254
255 #define warn_or_seq_printf(seq, fmt, ...) do { \
256 if (seq) \
257 seq_printf(seq, fmt, ##__VA_ARGS__); \
258 else \
259 pr_warn(fmt, ##__VA_ARGS__); \
260 } while (0)
261
warn_or_seq_hex_dump(struct seq_file * seq,int prefix_type,int rowsize,int groupsize,const void * buf,size_t len,bool ascii)262 static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
263 int rowsize, int groupsize, const void *buf,
264 size_t len, bool ascii)
265 {
266 if (seq)
267 seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
268 buf, len, ascii);
269 else
270 print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
271 rowsize, groupsize, buf, len, ascii);
272 }
273
274 /*
275 * Printing of the objects hex dump to the seq file. The number of lines to be
276 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
277 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
278 * with the object->lock held.
279 */
hex_dump_object(struct seq_file * seq,struct kmemleak_object * object)280 static void hex_dump_object(struct seq_file *seq,
281 struct kmemleak_object *object)
282 {
283 const u8 *ptr = (const u8 *)object->pointer;
284 size_t len;
285
286 /* limit the number of lines to HEX_MAX_LINES */
287 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
288
289 warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len);
290 kasan_disable_current();
291 warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
292 HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
293 kasan_enable_current();
294 }
295
296 /*
297 * Object colors, encoded with count and min_count:
298 * - white - orphan object, not enough references to it (count < min_count)
299 * - gray - not orphan, not marked as false positive (min_count == 0) or
300 * sufficient references to it (count >= min_count)
301 * - black - ignore, it doesn't contain references (e.g. text section)
302 * (min_count == -1). No function defined for this color.
303 * Newly created objects don't have any color assigned (object->count == -1)
304 * before the next memory scan when they become white.
305 */
color_white(const struct kmemleak_object * object)306 static bool color_white(const struct kmemleak_object *object)
307 {
308 return object->count != KMEMLEAK_BLACK &&
309 object->count < object->min_count;
310 }
311
color_gray(const struct kmemleak_object * object)312 static bool color_gray(const struct kmemleak_object *object)
313 {
314 return object->min_count != KMEMLEAK_BLACK &&
315 object->count >= object->min_count;
316 }
317
318 /*
319 * Objects are considered unreferenced only if their color is white, they have
320 * not be deleted and have a minimum age to avoid false positives caused by
321 * pointers temporarily stored in CPU registers.
322 */
unreferenced_object(struct kmemleak_object * object)323 static bool unreferenced_object(struct kmemleak_object *object)
324 {
325 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
326 time_before_eq(object->jiffies + jiffies_min_age,
327 jiffies_last_scan);
328 }
329
330 /*
331 * Printing of the unreferenced objects information to the seq file. The
332 * print_unreferenced function must be called with the object->lock held.
333 */
print_unreferenced(struct seq_file * seq,struct kmemleak_object * object)334 static void print_unreferenced(struct seq_file *seq,
335 struct kmemleak_object *object)
336 {
337 int i;
338 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
339
340 warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
341 object->pointer, object->size);
342 warn_or_seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
343 object->comm, object->pid, object->jiffies,
344 msecs_age / 1000, msecs_age % 1000);
345 hex_dump_object(seq, object);
346 warn_or_seq_printf(seq, " backtrace:\n");
347
348 for (i = 0; i < object->trace_len; i++) {
349 void *ptr = (void *)object->trace[i];
350 warn_or_seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
351 }
352 }
353
354 /*
355 * Print the kmemleak_object information. This function is used mainly for
356 * debugging special cases when kmemleak operations. It must be called with
357 * the object->lock held.
358 */
dump_object_info(struct kmemleak_object * object)359 static void dump_object_info(struct kmemleak_object *object)
360 {
361 pr_notice("Object 0x%08lx (size %zu):\n",
362 object->pointer, object->size);
363 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
364 object->comm, object->pid, object->jiffies);
365 pr_notice(" min_count = %d\n", object->min_count);
366 pr_notice(" count = %d\n", object->count);
367 pr_notice(" flags = 0x%x\n", object->flags);
368 pr_notice(" checksum = %u\n", object->checksum);
369 pr_notice(" backtrace:\n");
370 stack_trace_print(object->trace, object->trace_len, 4);
371 }
372
373 /*
374 * Look-up a memory block metadata (kmemleak_object) in the object search
375 * tree based on a pointer value. If alias is 0, only values pointing to the
376 * beginning of the memory block are allowed. The kmemleak_lock must be held
377 * when calling this function.
378 */
lookup_object(unsigned long ptr,int alias)379 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
380 {
381 struct rb_node *rb = object_tree_root.rb_node;
382
383 while (rb) {
384 struct kmemleak_object *object =
385 rb_entry(rb, struct kmemleak_object, rb_node);
386 if (ptr < object->pointer)
387 rb = object->rb_node.rb_left;
388 else if (object->pointer + object->size <= ptr)
389 rb = object->rb_node.rb_right;
390 else if (object->pointer == ptr || alias)
391 return object;
392 else {
393 kmemleak_warn("Found object by alias at 0x%08lx\n",
394 ptr);
395 dump_object_info(object);
396 break;
397 }
398 }
399 return NULL;
400 }
401
402 /*
403 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
404 * that once an object's use_count reached 0, the RCU freeing was already
405 * registered and the object should no longer be used. This function must be
406 * called under the protection of rcu_read_lock().
407 */
get_object(struct kmemleak_object * object)408 static int get_object(struct kmemleak_object *object)
409 {
410 return atomic_inc_not_zero(&object->use_count);
411 }
412
413 /*
414 * Memory pool allocation and freeing. kmemleak_lock must not be held.
415 */
mem_pool_alloc(gfp_t gfp)416 static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
417 {
418 unsigned long flags;
419 struct kmemleak_object *object;
420
421 /* try the slab allocator first */
422 if (object_cache) {
423 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
424 if (object)
425 return object;
426 }
427
428 /* slab allocation failed, try the memory pool */
429 raw_spin_lock_irqsave(&kmemleak_lock, flags);
430 object = list_first_entry_or_null(&mem_pool_free_list,
431 typeof(*object), object_list);
432 if (object)
433 list_del(&object->object_list);
434 else if (mem_pool_free_count)
435 object = &mem_pool[--mem_pool_free_count];
436 else
437 pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
438 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
439
440 return object;
441 }
442
443 /*
444 * Return the object to either the slab allocator or the memory pool.
445 */
mem_pool_free(struct kmemleak_object * object)446 static void mem_pool_free(struct kmemleak_object *object)
447 {
448 unsigned long flags;
449
450 if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
451 kmem_cache_free(object_cache, object);
452 return;
453 }
454
455 /* add the object to the memory pool free list */
456 raw_spin_lock_irqsave(&kmemleak_lock, flags);
457 list_add(&object->object_list, &mem_pool_free_list);
458 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
459 }
460
461 /*
462 * RCU callback to free a kmemleak_object.
463 */
free_object_rcu(struct rcu_head * rcu)464 static void free_object_rcu(struct rcu_head *rcu)
465 {
466 struct hlist_node *tmp;
467 struct kmemleak_scan_area *area;
468 struct kmemleak_object *object =
469 container_of(rcu, struct kmemleak_object, rcu);
470
471 /*
472 * Once use_count is 0 (guaranteed by put_object), there is no other
473 * code accessing this object, hence no need for locking.
474 */
475 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
476 hlist_del(&area->node);
477 kmem_cache_free(scan_area_cache, area);
478 }
479 mem_pool_free(object);
480 }
481
482 /*
483 * Decrement the object use_count. Once the count is 0, free the object using
484 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
485 * delete_object() path, the delayed RCU freeing ensures that there is no
486 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
487 * is also possible.
488 */
put_object(struct kmemleak_object * object)489 static void put_object(struct kmemleak_object *object)
490 {
491 if (!atomic_dec_and_test(&object->use_count))
492 return;
493
494 /* should only get here after delete_object was called */
495 WARN_ON(object->flags & OBJECT_ALLOCATED);
496
497 /*
498 * It may be too early for the RCU callbacks, however, there is no
499 * concurrent object_list traversal when !object_cache and all objects
500 * came from the memory pool. Free the object directly.
501 */
502 if (object_cache)
503 call_rcu(&object->rcu, free_object_rcu);
504 else
505 free_object_rcu(&object->rcu);
506 }
507
508 /*
509 * Look up an object in the object search tree and increase its use_count.
510 */
find_and_get_object(unsigned long ptr,int alias)511 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
512 {
513 unsigned long flags;
514 struct kmemleak_object *object;
515
516 rcu_read_lock();
517 raw_spin_lock_irqsave(&kmemleak_lock, flags);
518 object = lookup_object(ptr, alias);
519 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
520
521 /* check whether the object is still available */
522 if (object && !get_object(object))
523 object = NULL;
524 rcu_read_unlock();
525
526 return object;
527 }
528
529 /*
530 * Remove an object from the object_tree_root and object_list. Must be called
531 * with the kmemleak_lock held _if_ kmemleak is still enabled.
532 */
__remove_object(struct kmemleak_object * object)533 static void __remove_object(struct kmemleak_object *object)
534 {
535 rb_erase(&object->rb_node, &object_tree_root);
536 list_del_rcu(&object->object_list);
537 }
538
539 /*
540 * Look up an object in the object search tree and remove it from both
541 * object_tree_root and object_list. The returned object's use_count should be
542 * at least 1, as initially set by create_object().
543 */
find_and_remove_object(unsigned long ptr,int alias)544 static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias)
545 {
546 unsigned long flags;
547 struct kmemleak_object *object;
548
549 raw_spin_lock_irqsave(&kmemleak_lock, flags);
550 object = lookup_object(ptr, alias);
551 if (object)
552 __remove_object(object);
553 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
554
555 return object;
556 }
557
558 /*
559 * Save stack trace to the given array of MAX_TRACE size.
560 */
__save_stack_trace(unsigned long * trace)561 static int __save_stack_trace(unsigned long *trace)
562 {
563 return stack_trace_save(trace, MAX_TRACE, 2);
564 }
565
566 /*
567 * Create the metadata (struct kmemleak_object) corresponding to an allocated
568 * memory block and add it to the object_list and object_tree_root.
569 */
create_object(unsigned long ptr,size_t size,int min_count,gfp_t gfp)570 static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
571 int min_count, gfp_t gfp)
572 {
573 unsigned long flags;
574 struct kmemleak_object *object, *parent;
575 struct rb_node **link, *rb_parent;
576 unsigned long untagged_ptr;
577
578 object = mem_pool_alloc(gfp);
579 if (!object) {
580 pr_warn("Cannot allocate a kmemleak_object structure\n");
581 kmemleak_disable();
582 return NULL;
583 }
584
585 INIT_LIST_HEAD(&object->object_list);
586 INIT_LIST_HEAD(&object->gray_list);
587 INIT_HLIST_HEAD(&object->area_list);
588 raw_spin_lock_init(&object->lock);
589 atomic_set(&object->use_count, 1);
590 object->flags = OBJECT_ALLOCATED;
591 object->pointer = ptr;
592 object->size = size;
593 object->excess_ref = 0;
594 object->min_count = min_count;
595 object->count = 0; /* white color initially */
596 object->jiffies = jiffies;
597 object->checksum = 0;
598
599 /* task information */
600 if (in_irq()) {
601 object->pid = 0;
602 strncpy(object->comm, "hardirq", sizeof(object->comm));
603 } else if (in_serving_softirq()) {
604 object->pid = 0;
605 strncpy(object->comm, "softirq", sizeof(object->comm));
606 } else {
607 object->pid = current->pid;
608 /*
609 * There is a small chance of a race with set_task_comm(),
610 * however using get_task_comm() here may cause locking
611 * dependency issues with current->alloc_lock. In the worst
612 * case, the command line is not correct.
613 */
614 strncpy(object->comm, current->comm, sizeof(object->comm));
615 }
616
617 /* kernel backtrace */
618 object->trace_len = __save_stack_trace(object->trace);
619
620 raw_spin_lock_irqsave(&kmemleak_lock, flags);
621
622 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
623 min_addr = min(min_addr, untagged_ptr);
624 max_addr = max(max_addr, untagged_ptr + size);
625 link = &object_tree_root.rb_node;
626 rb_parent = NULL;
627 while (*link) {
628 rb_parent = *link;
629 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
630 if (ptr + size <= parent->pointer)
631 link = &parent->rb_node.rb_left;
632 else if (parent->pointer + parent->size <= ptr)
633 link = &parent->rb_node.rb_right;
634 else {
635 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
636 ptr);
637 /*
638 * No need for parent->lock here since "parent" cannot
639 * be freed while the kmemleak_lock is held.
640 */
641 dump_object_info(parent);
642 kmem_cache_free(object_cache, object);
643 object = NULL;
644 goto out;
645 }
646 }
647 rb_link_node(&object->rb_node, rb_parent, link);
648 rb_insert_color(&object->rb_node, &object_tree_root);
649
650 list_add_tail_rcu(&object->object_list, &object_list);
651 out:
652 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
653 return object;
654 }
655
656 /*
657 * Mark the object as not allocated and schedule RCU freeing via put_object().
658 */
__delete_object(struct kmemleak_object * object)659 static void __delete_object(struct kmemleak_object *object)
660 {
661 unsigned long flags;
662
663 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
664 WARN_ON(atomic_read(&object->use_count) < 1);
665
666 /*
667 * Locking here also ensures that the corresponding memory block
668 * cannot be freed when it is being scanned.
669 */
670 raw_spin_lock_irqsave(&object->lock, flags);
671 object->flags &= ~OBJECT_ALLOCATED;
672 raw_spin_unlock_irqrestore(&object->lock, flags);
673 put_object(object);
674 }
675
676 /*
677 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
678 * delete it.
679 */
delete_object_full(unsigned long ptr)680 static void delete_object_full(unsigned long ptr)
681 {
682 struct kmemleak_object *object;
683
684 object = find_and_remove_object(ptr, 0);
685 if (!object) {
686 #ifdef DEBUG
687 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
688 ptr);
689 #endif
690 return;
691 }
692 __delete_object(object);
693 }
694
695 /*
696 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
697 * delete it. If the memory block is partially freed, the function may create
698 * additional metadata for the remaining parts of the block.
699 */
delete_object_part(unsigned long ptr,size_t size)700 static void delete_object_part(unsigned long ptr, size_t size)
701 {
702 struct kmemleak_object *object;
703 unsigned long start, end;
704
705 object = find_and_remove_object(ptr, 1);
706 if (!object) {
707 #ifdef DEBUG
708 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
709 ptr, size);
710 #endif
711 return;
712 }
713
714 /*
715 * Create one or two objects that may result from the memory block
716 * split. Note that partial freeing is only done by free_bootmem() and
717 * this happens before kmemleak_init() is called.
718 */
719 start = object->pointer;
720 end = object->pointer + object->size;
721 if (ptr > start)
722 create_object(start, ptr - start, object->min_count,
723 GFP_KERNEL);
724 if (ptr + size < end)
725 create_object(ptr + size, end - ptr - size, object->min_count,
726 GFP_KERNEL);
727
728 __delete_object(object);
729 }
730
__paint_it(struct kmemleak_object * object,int color)731 static void __paint_it(struct kmemleak_object *object, int color)
732 {
733 object->min_count = color;
734 if (color == KMEMLEAK_BLACK)
735 object->flags |= OBJECT_NO_SCAN;
736 }
737
paint_it(struct kmemleak_object * object,int color)738 static void paint_it(struct kmemleak_object *object, int color)
739 {
740 unsigned long flags;
741
742 raw_spin_lock_irqsave(&object->lock, flags);
743 __paint_it(object, color);
744 raw_spin_unlock_irqrestore(&object->lock, flags);
745 }
746
paint_ptr(unsigned long ptr,int color)747 static void paint_ptr(unsigned long ptr, int color)
748 {
749 struct kmemleak_object *object;
750
751 object = find_and_get_object(ptr, 0);
752 if (!object) {
753 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
754 ptr,
755 (color == KMEMLEAK_GREY) ? "Grey" :
756 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
757 return;
758 }
759 paint_it(object, color);
760 put_object(object);
761 }
762
763 /*
764 * Mark an object permanently as gray-colored so that it can no longer be
765 * reported as a leak. This is used in general to mark a false positive.
766 */
make_gray_object(unsigned long ptr)767 static void make_gray_object(unsigned long ptr)
768 {
769 paint_ptr(ptr, KMEMLEAK_GREY);
770 }
771
772 /*
773 * Mark the object as black-colored so that it is ignored from scans and
774 * reporting.
775 */
make_black_object(unsigned long ptr)776 static void make_black_object(unsigned long ptr)
777 {
778 paint_ptr(ptr, KMEMLEAK_BLACK);
779 }
780
781 /*
782 * Add a scanning area to the object. If at least one such area is added,
783 * kmemleak will only scan these ranges rather than the whole memory block.
784 */
add_scan_area(unsigned long ptr,size_t size,gfp_t gfp)785 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
786 {
787 unsigned long flags;
788 struct kmemleak_object *object;
789 struct kmemleak_scan_area *area = NULL;
790 unsigned long untagged_ptr;
791 unsigned long untagged_objp;
792
793 object = find_and_get_object(ptr, 1);
794 if (!object) {
795 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
796 ptr);
797 return;
798 }
799
800 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
801 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
802
803 if (scan_area_cache)
804 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
805
806 raw_spin_lock_irqsave(&object->lock, flags);
807 if (!area) {
808 pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
809 /* mark the object for full scan to avoid false positives */
810 object->flags |= OBJECT_FULL_SCAN;
811 goto out_unlock;
812 }
813 if (size == SIZE_MAX) {
814 size = untagged_objp + object->size - untagged_ptr;
815 } else if (untagged_ptr + size > untagged_objp + object->size) {
816 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
817 dump_object_info(object);
818 kmem_cache_free(scan_area_cache, area);
819 goto out_unlock;
820 }
821
822 INIT_HLIST_NODE(&area->node);
823 area->start = ptr;
824 area->size = size;
825
826 hlist_add_head(&area->node, &object->area_list);
827 out_unlock:
828 raw_spin_unlock_irqrestore(&object->lock, flags);
829 put_object(object);
830 }
831
832 /*
833 * Any surplus references (object already gray) to 'ptr' are passed to
834 * 'excess_ref'. This is used in the vmalloc() case where a pointer to
835 * vm_struct may be used as an alternative reference to the vmalloc'ed object
836 * (see free_thread_stack()).
837 */
object_set_excess_ref(unsigned long ptr,unsigned long excess_ref)838 static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
839 {
840 unsigned long flags;
841 struct kmemleak_object *object;
842
843 object = find_and_get_object(ptr, 0);
844 if (!object) {
845 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
846 ptr);
847 return;
848 }
849
850 raw_spin_lock_irqsave(&object->lock, flags);
851 object->excess_ref = excess_ref;
852 raw_spin_unlock_irqrestore(&object->lock, flags);
853 put_object(object);
854 }
855
856 /*
857 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
858 * pointer. Such object will not be scanned by kmemleak but references to it
859 * are searched.
860 */
object_no_scan(unsigned long ptr)861 static void object_no_scan(unsigned long ptr)
862 {
863 unsigned long flags;
864 struct kmemleak_object *object;
865
866 object = find_and_get_object(ptr, 0);
867 if (!object) {
868 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
869 return;
870 }
871
872 raw_spin_lock_irqsave(&object->lock, flags);
873 object->flags |= OBJECT_NO_SCAN;
874 raw_spin_unlock_irqrestore(&object->lock, flags);
875 put_object(object);
876 }
877
878 /**
879 * kmemleak_alloc - register a newly allocated object
880 * @ptr: pointer to beginning of the object
881 * @size: size of the object
882 * @min_count: minimum number of references to this object. If during memory
883 * scanning a number of references less than @min_count is found,
884 * the object is reported as a memory leak. If @min_count is 0,
885 * the object is never reported as a leak. If @min_count is -1,
886 * the object is ignored (not scanned and not reported as a leak)
887 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
888 *
889 * This function is called from the kernel allocators when a new object
890 * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
891 */
kmemleak_alloc(const void * ptr,size_t size,int min_count,gfp_t gfp)892 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
893 gfp_t gfp)
894 {
895 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
896
897 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
898 create_object((unsigned long)ptr, size, min_count, gfp);
899 }
900 EXPORT_SYMBOL_GPL(kmemleak_alloc);
901
902 /**
903 * kmemleak_alloc_percpu - register a newly allocated __percpu object
904 * @ptr: __percpu pointer to beginning of the object
905 * @size: size of the object
906 * @gfp: flags used for kmemleak internal memory allocations
907 *
908 * This function is called from the kernel percpu allocator when a new object
909 * (memory block) is allocated (alloc_percpu).
910 */
kmemleak_alloc_percpu(const void __percpu * ptr,size_t size,gfp_t gfp)911 void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
912 gfp_t gfp)
913 {
914 unsigned int cpu;
915
916 pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
917
918 /*
919 * Percpu allocations are only scanned and not reported as leaks
920 * (min_count is set to 0).
921 */
922 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
923 for_each_possible_cpu(cpu)
924 create_object((unsigned long)per_cpu_ptr(ptr, cpu),
925 size, 0, gfp);
926 }
927 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
928
929 /**
930 * kmemleak_vmalloc - register a newly vmalloc'ed object
931 * @area: pointer to vm_struct
932 * @size: size of the object
933 * @gfp: __vmalloc() flags used for kmemleak internal memory allocations
934 *
935 * This function is called from the vmalloc() kernel allocator when a new
936 * object (memory block) is allocated.
937 */
kmemleak_vmalloc(const struct vm_struct * area,size_t size,gfp_t gfp)938 void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
939 {
940 pr_debug("%s(0x%p, %zu)\n", __func__, area, size);
941
942 /*
943 * A min_count = 2 is needed because vm_struct contains a reference to
944 * the virtual address of the vmalloc'ed block.
945 */
946 if (kmemleak_enabled) {
947 create_object((unsigned long)area->addr, size, 2, gfp);
948 object_set_excess_ref((unsigned long)area,
949 (unsigned long)area->addr);
950 }
951 }
952 EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
953
954 /**
955 * kmemleak_free - unregister a previously registered object
956 * @ptr: pointer to beginning of the object
957 *
958 * This function is called from the kernel allocators when an object (memory
959 * block) is freed (kmem_cache_free, kfree, vfree etc.).
960 */
kmemleak_free(const void * ptr)961 void __ref kmemleak_free(const void *ptr)
962 {
963 pr_debug("%s(0x%p)\n", __func__, ptr);
964
965 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
966 delete_object_full((unsigned long)ptr);
967 }
968 EXPORT_SYMBOL_GPL(kmemleak_free);
969
970 /**
971 * kmemleak_free_part - partially unregister a previously registered object
972 * @ptr: pointer to the beginning or inside the object. This also
973 * represents the start of the range to be freed
974 * @size: size to be unregistered
975 *
976 * This function is called when only a part of a memory block is freed
977 * (usually from the bootmem allocator).
978 */
kmemleak_free_part(const void * ptr,size_t size)979 void __ref kmemleak_free_part(const void *ptr, size_t size)
980 {
981 pr_debug("%s(0x%p)\n", __func__, ptr);
982
983 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
984 delete_object_part((unsigned long)ptr, size);
985 }
986 EXPORT_SYMBOL_GPL(kmemleak_free_part);
987
988 /**
989 * kmemleak_free_percpu - unregister a previously registered __percpu object
990 * @ptr: __percpu pointer to beginning of the object
991 *
992 * This function is called from the kernel percpu allocator when an object
993 * (memory block) is freed (free_percpu).
994 */
kmemleak_free_percpu(const void __percpu * ptr)995 void __ref kmemleak_free_percpu(const void __percpu *ptr)
996 {
997 unsigned int cpu;
998
999 pr_debug("%s(0x%p)\n", __func__, ptr);
1000
1001 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1002 for_each_possible_cpu(cpu)
1003 delete_object_full((unsigned long)per_cpu_ptr(ptr,
1004 cpu));
1005 }
1006 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1007
1008 /**
1009 * kmemleak_update_trace - update object allocation stack trace
1010 * @ptr: pointer to beginning of the object
1011 *
1012 * Override the object allocation stack trace for cases where the actual
1013 * allocation place is not always useful.
1014 */
kmemleak_update_trace(const void * ptr)1015 void __ref kmemleak_update_trace(const void *ptr)
1016 {
1017 struct kmemleak_object *object;
1018 unsigned long flags;
1019
1020 pr_debug("%s(0x%p)\n", __func__, ptr);
1021
1022 if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1023 return;
1024
1025 object = find_and_get_object((unsigned long)ptr, 1);
1026 if (!object) {
1027 #ifdef DEBUG
1028 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1029 ptr);
1030 #endif
1031 return;
1032 }
1033
1034 raw_spin_lock_irqsave(&object->lock, flags);
1035 object->trace_len = __save_stack_trace(object->trace);
1036 raw_spin_unlock_irqrestore(&object->lock, flags);
1037
1038 put_object(object);
1039 }
1040 EXPORT_SYMBOL(kmemleak_update_trace);
1041
1042 /**
1043 * kmemleak_not_leak - mark an allocated object as false positive
1044 * @ptr: pointer to beginning of the object
1045 *
1046 * Calling this function on an object will cause the memory block to no longer
1047 * be reported as leak and always be scanned.
1048 */
kmemleak_not_leak(const void * ptr)1049 void __ref kmemleak_not_leak(const void *ptr)
1050 {
1051 pr_debug("%s(0x%p)\n", __func__, ptr);
1052
1053 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1054 make_gray_object((unsigned long)ptr);
1055 }
1056 EXPORT_SYMBOL(kmemleak_not_leak);
1057
1058 /**
1059 * kmemleak_ignore - ignore an allocated object
1060 * @ptr: pointer to beginning of the object
1061 *
1062 * Calling this function on an object will cause the memory block to be
1063 * ignored (not scanned and not reported as a leak). This is usually done when
1064 * it is known that the corresponding block is not a leak and does not contain
1065 * any references to other allocated memory blocks.
1066 */
kmemleak_ignore(const void * ptr)1067 void __ref kmemleak_ignore(const void *ptr)
1068 {
1069 pr_debug("%s(0x%p)\n", __func__, ptr);
1070
1071 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1072 make_black_object((unsigned long)ptr);
1073 }
1074 EXPORT_SYMBOL(kmemleak_ignore);
1075
1076 /**
1077 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1078 * @ptr: pointer to beginning or inside the object. This also
1079 * represents the start of the scan area
1080 * @size: size of the scan area
1081 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1082 *
1083 * This function is used when it is known that only certain parts of an object
1084 * contain references to other objects. Kmemleak will only scan these areas
1085 * reducing the number false negatives.
1086 */
kmemleak_scan_area(const void * ptr,size_t size,gfp_t gfp)1087 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1088 {
1089 pr_debug("%s(0x%p)\n", __func__, ptr);
1090
1091 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1092 add_scan_area((unsigned long)ptr, size, gfp);
1093 }
1094 EXPORT_SYMBOL(kmemleak_scan_area);
1095
1096 /**
1097 * kmemleak_no_scan - do not scan an allocated object
1098 * @ptr: pointer to beginning of the object
1099 *
1100 * This function notifies kmemleak not to scan the given memory block. Useful
1101 * in situations where it is known that the given object does not contain any
1102 * references to other objects. Kmemleak will not scan such objects reducing
1103 * the number of false negatives.
1104 */
kmemleak_no_scan(const void * ptr)1105 void __ref kmemleak_no_scan(const void *ptr)
1106 {
1107 pr_debug("%s(0x%p)\n", __func__, ptr);
1108
1109 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1110 object_no_scan((unsigned long)ptr);
1111 }
1112 EXPORT_SYMBOL(kmemleak_no_scan);
1113
1114 /**
1115 * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1116 * address argument
1117 * @phys: physical address of the object
1118 * @size: size of the object
1119 * @min_count: minimum number of references to this object.
1120 * See kmemleak_alloc()
1121 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1122 */
kmemleak_alloc_phys(phys_addr_t phys,size_t size,int min_count,gfp_t gfp)1123 void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
1124 gfp_t gfp)
1125 {
1126 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1127 kmemleak_alloc(__va(phys), size, min_count, gfp);
1128 }
1129 EXPORT_SYMBOL(kmemleak_alloc_phys);
1130
1131 /**
1132 * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1133 * physical address argument
1134 * @phys: physical address if the beginning or inside an object. This
1135 * also represents the start of the range to be freed
1136 * @size: size to be unregistered
1137 */
kmemleak_free_part_phys(phys_addr_t phys,size_t size)1138 void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1139 {
1140 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1141 kmemleak_free_part(__va(phys), size);
1142 }
1143 EXPORT_SYMBOL(kmemleak_free_part_phys);
1144
1145 /**
1146 * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
1147 * address argument
1148 * @phys: physical address of the object
1149 */
kmemleak_not_leak_phys(phys_addr_t phys)1150 void __ref kmemleak_not_leak_phys(phys_addr_t phys)
1151 {
1152 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1153 kmemleak_not_leak(__va(phys));
1154 }
1155 EXPORT_SYMBOL(kmemleak_not_leak_phys);
1156
1157 /**
1158 * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1159 * address argument
1160 * @phys: physical address of the object
1161 */
kmemleak_ignore_phys(phys_addr_t phys)1162 void __ref kmemleak_ignore_phys(phys_addr_t phys)
1163 {
1164 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1165 kmemleak_ignore(__va(phys));
1166 }
1167 EXPORT_SYMBOL(kmemleak_ignore_phys);
1168
1169 /*
1170 * Update an object's checksum and return true if it was modified.
1171 */
update_checksum(struct kmemleak_object * object)1172 static bool update_checksum(struct kmemleak_object *object)
1173 {
1174 u32 old_csum = object->checksum;
1175
1176 kasan_disable_current();
1177 kcsan_disable_current();
1178 object->checksum = crc32(0, (void *)object->pointer, object->size);
1179 kasan_enable_current();
1180 kcsan_enable_current();
1181
1182 return object->checksum != old_csum;
1183 }
1184
1185 /*
1186 * Update an object's references. object->lock must be held by the caller.
1187 */
update_refs(struct kmemleak_object * object)1188 static void update_refs(struct kmemleak_object *object)
1189 {
1190 if (!color_white(object)) {
1191 /* non-orphan, ignored or new */
1192 return;
1193 }
1194
1195 /*
1196 * Increase the object's reference count (number of pointers to the
1197 * memory block). If this count reaches the required minimum, the
1198 * object's color will become gray and it will be added to the
1199 * gray_list.
1200 */
1201 object->count++;
1202 if (color_gray(object)) {
1203 /* put_object() called when removing from gray_list */
1204 WARN_ON(!get_object(object));
1205 list_add_tail(&object->gray_list, &gray_list);
1206 }
1207 }
1208
1209 /*
1210 * Memory scanning is a long process and it needs to be interruptable. This
1211 * function checks whether such interrupt condition occurred.
1212 */
scan_should_stop(void)1213 static int scan_should_stop(void)
1214 {
1215 if (!kmemleak_enabled)
1216 return 1;
1217
1218 /*
1219 * This function may be called from either process or kthread context,
1220 * hence the need to check for both stop conditions.
1221 */
1222 if (current->mm)
1223 return signal_pending(current);
1224 else
1225 return kthread_should_stop();
1226
1227 return 0;
1228 }
1229
1230 /*
1231 * Scan a memory block (exclusive range) for valid pointers and add those
1232 * found to the gray list.
1233 */
scan_block(void * _start,void * _end,struct kmemleak_object * scanned)1234 static void scan_block(void *_start, void *_end,
1235 struct kmemleak_object *scanned)
1236 {
1237 unsigned long *ptr;
1238 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1239 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1240 unsigned long flags;
1241 unsigned long untagged_ptr;
1242
1243 raw_spin_lock_irqsave(&kmemleak_lock, flags);
1244 for (ptr = start; ptr < end; ptr++) {
1245 struct kmemleak_object *object;
1246 unsigned long pointer;
1247 unsigned long excess_ref;
1248
1249 if (scan_should_stop())
1250 break;
1251
1252 kasan_disable_current();
1253 pointer = *ptr;
1254 kasan_enable_current();
1255
1256 untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1257 if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1258 continue;
1259
1260 /*
1261 * No need for get_object() here since we hold kmemleak_lock.
1262 * object->use_count cannot be dropped to 0 while the object
1263 * is still present in object_tree_root and object_list
1264 * (with updates protected by kmemleak_lock).
1265 */
1266 object = lookup_object(pointer, 1);
1267 if (!object)
1268 continue;
1269 if (object == scanned)
1270 /* self referenced, ignore */
1271 continue;
1272
1273 /*
1274 * Avoid the lockdep recursive warning on object->lock being
1275 * previously acquired in scan_object(). These locks are
1276 * enclosed by scan_mutex.
1277 */
1278 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1279 /* only pass surplus references (object already gray) */
1280 if (color_gray(object)) {
1281 excess_ref = object->excess_ref;
1282 /* no need for update_refs() if object already gray */
1283 } else {
1284 excess_ref = 0;
1285 update_refs(object);
1286 }
1287 raw_spin_unlock(&object->lock);
1288
1289 if (excess_ref) {
1290 object = lookup_object(excess_ref, 0);
1291 if (!object)
1292 continue;
1293 if (object == scanned)
1294 /* circular reference, ignore */
1295 continue;
1296 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1297 update_refs(object);
1298 raw_spin_unlock(&object->lock);
1299 }
1300 }
1301 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
1302 }
1303
1304 /*
1305 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1306 */
1307 #ifdef CONFIG_SMP
scan_large_block(void * start,void * end)1308 static void scan_large_block(void *start, void *end)
1309 {
1310 void *next;
1311
1312 while (start < end) {
1313 next = min(start + MAX_SCAN_SIZE, end);
1314 scan_block(start, next, NULL);
1315 start = next;
1316 cond_resched();
1317 }
1318 }
1319 #endif
1320
1321 /*
1322 * Scan a memory block corresponding to a kmemleak_object. A condition is
1323 * that object->use_count >= 1.
1324 */
scan_object(struct kmemleak_object * object)1325 static void scan_object(struct kmemleak_object *object)
1326 {
1327 struct kmemleak_scan_area *area;
1328 unsigned long flags;
1329
1330 /*
1331 * Once the object->lock is acquired, the corresponding memory block
1332 * cannot be freed (the same lock is acquired in delete_object).
1333 */
1334 raw_spin_lock_irqsave(&object->lock, flags);
1335 if (object->flags & OBJECT_NO_SCAN)
1336 goto out;
1337 if (!(object->flags & OBJECT_ALLOCATED))
1338 /* already freed object */
1339 goto out;
1340 if (hlist_empty(&object->area_list) ||
1341 object->flags & OBJECT_FULL_SCAN) {
1342 void *start = (void *)object->pointer;
1343 void *end = (void *)(object->pointer + object->size);
1344 void *next;
1345
1346 do {
1347 next = min(start + MAX_SCAN_SIZE, end);
1348 scan_block(start, next, object);
1349
1350 start = next;
1351 if (start >= end)
1352 break;
1353
1354 raw_spin_unlock_irqrestore(&object->lock, flags);
1355 cond_resched();
1356 raw_spin_lock_irqsave(&object->lock, flags);
1357 } while (object->flags & OBJECT_ALLOCATED);
1358 } else
1359 hlist_for_each_entry(area, &object->area_list, node)
1360 scan_block((void *)area->start,
1361 (void *)(area->start + area->size),
1362 object);
1363 out:
1364 raw_spin_unlock_irqrestore(&object->lock, flags);
1365 }
1366
1367 /*
1368 * Scan the objects already referenced (gray objects). More objects will be
1369 * referenced and, if there are no memory leaks, all the objects are scanned.
1370 */
scan_gray_list(void)1371 static void scan_gray_list(void)
1372 {
1373 struct kmemleak_object *object, *tmp;
1374
1375 /*
1376 * The list traversal is safe for both tail additions and removals
1377 * from inside the loop. The kmemleak objects cannot be freed from
1378 * outside the loop because their use_count was incremented.
1379 */
1380 object = list_entry(gray_list.next, typeof(*object), gray_list);
1381 while (&object->gray_list != &gray_list) {
1382 cond_resched();
1383
1384 /* may add new objects to the list */
1385 if (!scan_should_stop())
1386 scan_object(object);
1387
1388 tmp = list_entry(object->gray_list.next, typeof(*object),
1389 gray_list);
1390
1391 /* remove the object from the list and release it */
1392 list_del(&object->gray_list);
1393 put_object(object);
1394
1395 object = tmp;
1396 }
1397 WARN_ON(!list_empty(&gray_list));
1398 }
1399
1400 /*
1401 * Scan data sections and all the referenced memory blocks allocated via the
1402 * kernel's standard allocators. This function must be called with the
1403 * scan_mutex held.
1404 */
kmemleak_scan(void)1405 static void kmemleak_scan(void)
1406 {
1407 unsigned long flags;
1408 struct kmemleak_object *object;
1409 struct zone *zone;
1410 int __maybe_unused i;
1411 int new_leaks = 0;
1412
1413 jiffies_last_scan = jiffies;
1414
1415 /* prepare the kmemleak_object's */
1416 rcu_read_lock();
1417 list_for_each_entry_rcu(object, &object_list, object_list) {
1418 raw_spin_lock_irqsave(&object->lock, flags);
1419 #ifdef DEBUG
1420 /*
1421 * With a few exceptions there should be a maximum of
1422 * 1 reference to any object at this point.
1423 */
1424 if (atomic_read(&object->use_count) > 1) {
1425 pr_debug("object->use_count = %d\n",
1426 atomic_read(&object->use_count));
1427 dump_object_info(object);
1428 }
1429 #endif
1430 /* reset the reference count (whiten the object) */
1431 object->count = 0;
1432 if (color_gray(object) && get_object(object))
1433 list_add_tail(&object->gray_list, &gray_list);
1434
1435 raw_spin_unlock_irqrestore(&object->lock, flags);
1436 }
1437 rcu_read_unlock();
1438
1439 #ifdef CONFIG_SMP
1440 /* per-cpu sections scanning */
1441 for_each_possible_cpu(i)
1442 scan_large_block(__per_cpu_start + per_cpu_offset(i),
1443 __per_cpu_end + per_cpu_offset(i));
1444 #endif
1445
1446 /*
1447 * Struct page scanning for each node.
1448 */
1449 get_online_mems();
1450 for_each_populated_zone(zone) {
1451 unsigned long start_pfn = zone->zone_start_pfn;
1452 unsigned long end_pfn = zone_end_pfn(zone);
1453 unsigned long pfn;
1454
1455 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1456 struct page *page = pfn_to_online_page(pfn);
1457
1458 if (!page)
1459 continue;
1460
1461 /* only scan pages belonging to this zone */
1462 if (page_zone(page) != zone)
1463 continue;
1464 /* only scan if page is in use */
1465 if (page_count(page) == 0)
1466 continue;
1467 scan_block(page, page + 1, NULL);
1468 if (!(pfn & 63))
1469 cond_resched();
1470 }
1471 }
1472 put_online_mems();
1473
1474 /*
1475 * Scanning the task stacks (may introduce false negatives).
1476 */
1477 if (kmemleak_stack_scan) {
1478 struct task_struct *p, *g;
1479
1480 rcu_read_lock();
1481 for_each_process_thread(g, p) {
1482 void *stack = try_get_task_stack(p);
1483 if (stack) {
1484 scan_block(stack, stack + THREAD_SIZE, NULL);
1485 put_task_stack(p);
1486 }
1487 }
1488 rcu_read_unlock();
1489 }
1490
1491 /*
1492 * Scan the objects already referenced from the sections scanned
1493 * above.
1494 */
1495 scan_gray_list();
1496
1497 /*
1498 * Check for new or unreferenced objects modified since the previous
1499 * scan and color them gray until the next scan.
1500 */
1501 rcu_read_lock();
1502 list_for_each_entry_rcu(object, &object_list, object_list) {
1503 raw_spin_lock_irqsave(&object->lock, flags);
1504 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1505 && update_checksum(object) && get_object(object)) {
1506 /* color it gray temporarily */
1507 object->count = object->min_count;
1508 list_add_tail(&object->gray_list, &gray_list);
1509 }
1510 raw_spin_unlock_irqrestore(&object->lock, flags);
1511 }
1512 rcu_read_unlock();
1513
1514 /*
1515 * Re-scan the gray list for modified unreferenced objects.
1516 */
1517 scan_gray_list();
1518
1519 /*
1520 * If scanning was stopped do not report any new unreferenced objects.
1521 */
1522 if (scan_should_stop())
1523 return;
1524
1525 /*
1526 * Scanning result reporting.
1527 */
1528 rcu_read_lock();
1529 list_for_each_entry_rcu(object, &object_list, object_list) {
1530 raw_spin_lock_irqsave(&object->lock, flags);
1531 if (unreferenced_object(object) &&
1532 !(object->flags & OBJECT_REPORTED)) {
1533 object->flags |= OBJECT_REPORTED;
1534
1535 if (kmemleak_verbose)
1536 print_unreferenced(NULL, object);
1537
1538 new_leaks++;
1539 }
1540 raw_spin_unlock_irqrestore(&object->lock, flags);
1541 }
1542 rcu_read_unlock();
1543
1544 if (new_leaks) {
1545 kmemleak_found_leaks = true;
1546
1547 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1548 new_leaks);
1549 }
1550
1551 }
1552
1553 /*
1554 * Thread function performing automatic memory scanning. Unreferenced objects
1555 * at the end of a memory scan are reported but only the first time.
1556 */
kmemleak_scan_thread(void * arg)1557 static int kmemleak_scan_thread(void *arg)
1558 {
1559 static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
1560
1561 pr_info("Automatic memory scanning thread started\n");
1562 set_user_nice(current, 10);
1563
1564 /*
1565 * Wait before the first scan to allow the system to fully initialize.
1566 */
1567 if (first_run) {
1568 signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1569 first_run = 0;
1570 while (timeout && !kthread_should_stop())
1571 timeout = schedule_timeout_interruptible(timeout);
1572 }
1573
1574 while (!kthread_should_stop()) {
1575 signed long timeout = jiffies_scan_wait;
1576
1577 mutex_lock(&scan_mutex);
1578 kmemleak_scan();
1579 mutex_unlock(&scan_mutex);
1580
1581 /* wait before the next scan */
1582 while (timeout && !kthread_should_stop())
1583 timeout = schedule_timeout_interruptible(timeout);
1584 }
1585
1586 pr_info("Automatic memory scanning thread ended\n");
1587
1588 return 0;
1589 }
1590
1591 /*
1592 * Start the automatic memory scanning thread. This function must be called
1593 * with the scan_mutex held.
1594 */
start_scan_thread(void)1595 static void start_scan_thread(void)
1596 {
1597 if (scan_thread)
1598 return;
1599 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1600 if (IS_ERR(scan_thread)) {
1601 pr_warn("Failed to create the scan thread\n");
1602 scan_thread = NULL;
1603 }
1604 }
1605
1606 /*
1607 * Stop the automatic memory scanning thread.
1608 */
stop_scan_thread(void)1609 static void stop_scan_thread(void)
1610 {
1611 if (scan_thread) {
1612 kthread_stop(scan_thread);
1613 scan_thread = NULL;
1614 }
1615 }
1616
1617 /*
1618 * Iterate over the object_list and return the first valid object at or after
1619 * the required position with its use_count incremented. The function triggers
1620 * a memory scanning when the pos argument points to the first position.
1621 */
kmemleak_seq_start(struct seq_file * seq,loff_t * pos)1622 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1623 {
1624 struct kmemleak_object *object;
1625 loff_t n = *pos;
1626 int err;
1627
1628 err = mutex_lock_interruptible(&scan_mutex);
1629 if (err < 0)
1630 return ERR_PTR(err);
1631
1632 rcu_read_lock();
1633 list_for_each_entry_rcu(object, &object_list, object_list) {
1634 if (n-- > 0)
1635 continue;
1636 if (get_object(object))
1637 goto out;
1638 }
1639 object = NULL;
1640 out:
1641 return object;
1642 }
1643
1644 /*
1645 * Return the next object in the object_list. The function decrements the
1646 * use_count of the previous object and increases that of the next one.
1647 */
kmemleak_seq_next(struct seq_file * seq,void * v,loff_t * pos)1648 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1649 {
1650 struct kmemleak_object *prev_obj = v;
1651 struct kmemleak_object *next_obj = NULL;
1652 struct kmemleak_object *obj = prev_obj;
1653
1654 ++(*pos);
1655
1656 list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1657 if (get_object(obj)) {
1658 next_obj = obj;
1659 break;
1660 }
1661 }
1662
1663 put_object(prev_obj);
1664 return next_obj;
1665 }
1666
1667 /*
1668 * Decrement the use_count of the last object required, if any.
1669 */
kmemleak_seq_stop(struct seq_file * seq,void * v)1670 static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1671 {
1672 if (!IS_ERR(v)) {
1673 /*
1674 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1675 * waiting was interrupted, so only release it if !IS_ERR.
1676 */
1677 rcu_read_unlock();
1678 mutex_unlock(&scan_mutex);
1679 if (v)
1680 put_object(v);
1681 }
1682 }
1683
1684 /*
1685 * Print the information for an unreferenced object to the seq file.
1686 */
kmemleak_seq_show(struct seq_file * seq,void * v)1687 static int kmemleak_seq_show(struct seq_file *seq, void *v)
1688 {
1689 struct kmemleak_object *object = v;
1690 unsigned long flags;
1691
1692 raw_spin_lock_irqsave(&object->lock, flags);
1693 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1694 print_unreferenced(seq, object);
1695 raw_spin_unlock_irqrestore(&object->lock, flags);
1696 return 0;
1697 }
1698
1699 static const struct seq_operations kmemleak_seq_ops = {
1700 .start = kmemleak_seq_start,
1701 .next = kmemleak_seq_next,
1702 .stop = kmemleak_seq_stop,
1703 .show = kmemleak_seq_show,
1704 };
1705
kmemleak_open(struct inode * inode,struct file * file)1706 static int kmemleak_open(struct inode *inode, struct file *file)
1707 {
1708 return seq_open(file, &kmemleak_seq_ops);
1709 }
1710
dump_str_object_info(const char * str)1711 static int dump_str_object_info(const char *str)
1712 {
1713 unsigned long flags;
1714 struct kmemleak_object *object;
1715 unsigned long addr;
1716
1717 if (kstrtoul(str, 0, &addr))
1718 return -EINVAL;
1719 object = find_and_get_object(addr, 0);
1720 if (!object) {
1721 pr_info("Unknown object at 0x%08lx\n", addr);
1722 return -EINVAL;
1723 }
1724
1725 raw_spin_lock_irqsave(&object->lock, flags);
1726 dump_object_info(object);
1727 raw_spin_unlock_irqrestore(&object->lock, flags);
1728
1729 put_object(object);
1730 return 0;
1731 }
1732
1733 /*
1734 * We use grey instead of black to ensure we can do future scans on the same
1735 * objects. If we did not do future scans these black objects could
1736 * potentially contain references to newly allocated objects in the future and
1737 * we'd end up with false positives.
1738 */
kmemleak_clear(void)1739 static void kmemleak_clear(void)
1740 {
1741 struct kmemleak_object *object;
1742 unsigned long flags;
1743
1744 rcu_read_lock();
1745 list_for_each_entry_rcu(object, &object_list, object_list) {
1746 raw_spin_lock_irqsave(&object->lock, flags);
1747 if ((object->flags & OBJECT_REPORTED) &&
1748 unreferenced_object(object))
1749 __paint_it(object, KMEMLEAK_GREY);
1750 raw_spin_unlock_irqrestore(&object->lock, flags);
1751 }
1752 rcu_read_unlock();
1753
1754 kmemleak_found_leaks = false;
1755 }
1756
1757 static void __kmemleak_do_cleanup(void);
1758
1759 /*
1760 * File write operation to configure kmemleak at run-time. The following
1761 * commands can be written to the /sys/kernel/debug/kmemleak file:
1762 * off - disable kmemleak (irreversible)
1763 * stack=on - enable the task stacks scanning
1764 * stack=off - disable the tasks stacks scanning
1765 * scan=on - start the automatic memory scanning thread
1766 * scan=off - stop the automatic memory scanning thread
1767 * scan=... - set the automatic memory scanning period in seconds (0 to
1768 * disable it)
1769 * scan - trigger a memory scan
1770 * clear - mark all current reported unreferenced kmemleak objects as
1771 * grey to ignore printing them, or free all kmemleak objects
1772 * if kmemleak has been disabled.
1773 * dump=... - dump information about the object found at the given address
1774 */
kmemleak_write(struct file * file,const char __user * user_buf,size_t size,loff_t * ppos)1775 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1776 size_t size, loff_t *ppos)
1777 {
1778 char buf[64];
1779 int buf_size;
1780 int ret;
1781
1782 buf_size = min(size, (sizeof(buf) - 1));
1783 if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1784 return -EFAULT;
1785 buf[buf_size] = 0;
1786
1787 ret = mutex_lock_interruptible(&scan_mutex);
1788 if (ret < 0)
1789 return ret;
1790
1791 if (strncmp(buf, "clear", 5) == 0) {
1792 if (kmemleak_enabled)
1793 kmemleak_clear();
1794 else
1795 __kmemleak_do_cleanup();
1796 goto out;
1797 }
1798
1799 if (!kmemleak_enabled) {
1800 ret = -EPERM;
1801 goto out;
1802 }
1803
1804 if (strncmp(buf, "off", 3) == 0)
1805 kmemleak_disable();
1806 else if (strncmp(buf, "stack=on", 8) == 0)
1807 kmemleak_stack_scan = 1;
1808 else if (strncmp(buf, "stack=off", 9) == 0)
1809 kmemleak_stack_scan = 0;
1810 else if (strncmp(buf, "scan=on", 7) == 0)
1811 start_scan_thread();
1812 else if (strncmp(buf, "scan=off", 8) == 0)
1813 stop_scan_thread();
1814 else if (strncmp(buf, "scan=", 5) == 0) {
1815 unsigned long secs;
1816
1817 ret = kstrtoul(buf + 5, 0, &secs);
1818 if (ret < 0)
1819 goto out;
1820 stop_scan_thread();
1821 if (secs) {
1822 jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1823 start_scan_thread();
1824 }
1825 } else if (strncmp(buf, "scan", 4) == 0)
1826 kmemleak_scan();
1827 else if (strncmp(buf, "dump=", 5) == 0)
1828 ret = dump_str_object_info(buf + 5);
1829 else
1830 ret = -EINVAL;
1831
1832 out:
1833 mutex_unlock(&scan_mutex);
1834 if (ret < 0)
1835 return ret;
1836
1837 /* ignore the rest of the buffer, only one command at a time */
1838 *ppos += size;
1839 return size;
1840 }
1841
1842 static const struct file_operations kmemleak_fops = {
1843 .owner = THIS_MODULE,
1844 .open = kmemleak_open,
1845 .read = seq_read,
1846 .write = kmemleak_write,
1847 .llseek = seq_lseek,
1848 .release = seq_release,
1849 };
1850
__kmemleak_do_cleanup(void)1851 static void __kmemleak_do_cleanup(void)
1852 {
1853 struct kmemleak_object *object, *tmp;
1854
1855 /*
1856 * Kmemleak has already been disabled, no need for RCU list traversal
1857 * or kmemleak_lock held.
1858 */
1859 list_for_each_entry_safe(object, tmp, &object_list, object_list) {
1860 __remove_object(object);
1861 __delete_object(object);
1862 }
1863 }
1864
1865 /*
1866 * Stop the memory scanning thread and free the kmemleak internal objects if
1867 * no previous scan thread (otherwise, kmemleak may still have some useful
1868 * information on memory leaks).
1869 */
kmemleak_do_cleanup(struct work_struct * work)1870 static void kmemleak_do_cleanup(struct work_struct *work)
1871 {
1872 stop_scan_thread();
1873
1874 mutex_lock(&scan_mutex);
1875 /*
1876 * Once it is made sure that kmemleak_scan has stopped, it is safe to no
1877 * longer track object freeing. Ordering of the scan thread stopping and
1878 * the memory accesses below is guaranteed by the kthread_stop()
1879 * function.
1880 */
1881 kmemleak_free_enabled = 0;
1882 mutex_unlock(&scan_mutex);
1883
1884 if (!kmemleak_found_leaks)
1885 __kmemleak_do_cleanup();
1886 else
1887 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
1888 }
1889
1890 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1891
1892 /*
1893 * Disable kmemleak. No memory allocation/freeing will be traced once this
1894 * function is called. Disabling kmemleak is an irreversible operation.
1895 */
kmemleak_disable(void)1896 static void kmemleak_disable(void)
1897 {
1898 /* atomically check whether it was already invoked */
1899 if (cmpxchg(&kmemleak_error, 0, 1))
1900 return;
1901
1902 /* stop any memory operation tracing */
1903 kmemleak_enabled = 0;
1904
1905 /* check whether it is too early for a kernel thread */
1906 if (kmemleak_initialized)
1907 schedule_work(&cleanup_work);
1908 else
1909 kmemleak_free_enabled = 0;
1910
1911 pr_info("Kernel memory leak detector disabled\n");
1912 }
1913
1914 /*
1915 * Allow boot-time kmemleak disabling (enabled by default).
1916 */
kmemleak_boot_config(char * str)1917 static int __init kmemleak_boot_config(char *str)
1918 {
1919 if (!str)
1920 return -EINVAL;
1921 if (strcmp(str, "off") == 0)
1922 kmemleak_disable();
1923 else if (strcmp(str, "on") == 0)
1924 kmemleak_skip_disable = 1;
1925 else
1926 return -EINVAL;
1927 return 0;
1928 }
1929 early_param("kmemleak", kmemleak_boot_config);
1930
1931 /*
1932 * Kmemleak initialization.
1933 */
kmemleak_init(void)1934 void __init kmemleak_init(void)
1935 {
1936 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1937 if (!kmemleak_skip_disable) {
1938 kmemleak_disable();
1939 return;
1940 }
1941 #endif
1942
1943 if (kmemleak_error)
1944 return;
1945
1946 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1947 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1948
1949 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1950 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1951
1952 /* register the data/bss sections */
1953 create_object((unsigned long)_sdata, _edata - _sdata,
1954 KMEMLEAK_GREY, GFP_ATOMIC);
1955 create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
1956 KMEMLEAK_GREY, GFP_ATOMIC);
1957 /* only register .data..ro_after_init if not within .data */
1958 if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
1959 create_object((unsigned long)__start_ro_after_init,
1960 __end_ro_after_init - __start_ro_after_init,
1961 KMEMLEAK_GREY, GFP_ATOMIC);
1962 }
1963
1964 /*
1965 * Late initialization function.
1966 */
kmemleak_late_init(void)1967 static int __init kmemleak_late_init(void)
1968 {
1969 kmemleak_initialized = 1;
1970
1971 debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
1972
1973 if (kmemleak_error) {
1974 /*
1975 * Some error occurred and kmemleak was disabled. There is a
1976 * small chance that kmemleak_disable() was called immediately
1977 * after setting kmemleak_initialized and we may end up with
1978 * two clean-up threads but serialized by scan_mutex.
1979 */
1980 schedule_work(&cleanup_work);
1981 return -ENOMEM;
1982 }
1983
1984 if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
1985 mutex_lock(&scan_mutex);
1986 start_scan_thread();
1987 mutex_unlock(&scan_mutex);
1988 }
1989
1990 pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
1991 mem_pool_free_count);
1992
1993 return 0;
1994 }
1995 late_initcall(kmemleak_late_init);
1996