• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mm/kmemleak.c
4  *
5  * Copyright (C) 2008 ARM Limited
6  * Written by Catalin Marinas <catalin.marinas@arm.com>
7  *
8  * For more information on the algorithm and kmemleak usage, please see
9  * Documentation/dev-tools/kmemleak.rst.
10  *
11  * Notes on locking
12  * ----------------
13  *
14  * The following locks and mutexes are used by kmemleak:
15  *
16  * - kmemleak_lock (raw_spinlock_t): protects the object_list modifications and
17  *   accesses to the object_tree_root. The object_list is the main list
18  *   holding the metadata (struct kmemleak_object) for the allocated memory
19  *   blocks. The object_tree_root is a red black tree used to look-up
20  *   metadata based on a pointer to the corresponding memory block.  The
21  *   kmemleak_object structures are added to the object_list and
22  *   object_tree_root in the create_object() function called from the
23  *   kmemleak_alloc() callback and removed in delete_object() called from the
24  *   kmemleak_free() callback
25  * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
26  *   Accesses to the metadata (e.g. count) are protected by this lock. Note
27  *   that some members of this structure may be protected by other means
28  *   (atomic or kmemleak_lock). This lock is also held when scanning the
29  *   corresponding memory block to avoid the kernel freeing it via the
30  *   kmemleak_free() callback. This is less heavyweight than holding a global
31  *   lock like kmemleak_lock during scanning.
32  * - scan_mutex (mutex): ensures that only one thread may scan the memory for
33  *   unreferenced objects at a time. The gray_list contains the objects which
34  *   are already referenced or marked as false positives and need to be
35  *   scanned. This list is only modified during a scanning episode when the
36  *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
37  *   Note that the kmemleak_object.use_count is incremented when an object is
38  *   added to the gray_list and therefore cannot be freed. This mutex also
39  *   prevents multiple users of the "kmemleak" debugfs file together with
40  *   modifications to the memory scanning parameters including the scan_thread
41  *   pointer
42  *
43  * Locks and mutexes are acquired/nested in the following order:
44  *
45  *   scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
46  *
47  * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
48  * regions.
49  *
50  * The kmemleak_object structures have a use_count incremented or decremented
51  * using the get_object()/put_object() functions. When the use_count becomes
52  * 0, this count can no longer be incremented and put_object() schedules the
53  * kmemleak_object freeing via an RCU callback. All calls to the get_object()
54  * function must be protected by rcu_read_lock() to avoid accessing a freed
55  * structure.
56  */
57 
58 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
59 
60 #include <linux/init.h>
61 #include <linux/kernel.h>
62 #include <linux/list.h>
63 #include <linux/sched/signal.h>
64 #include <linux/sched/task.h>
65 #include <linux/sched/task_stack.h>
66 #include <linux/jiffies.h>
67 #include <linux/delay.h>
68 #include <linux/export.h>
69 #include <linux/kthread.h>
70 #include <linux/rbtree.h>
71 #include <linux/fs.h>
72 #include <linux/debugfs.h>
73 #include <linux/seq_file.h>
74 #include <linux/cpumask.h>
75 #include <linux/spinlock.h>
76 #include <linux/module.h>
77 #include <linux/mutex.h>
78 #include <linux/rcupdate.h>
79 #include <linux/stacktrace.h>
80 #include <linux/cache.h>
81 #include <linux/percpu.h>
82 #include <linux/memblock.h>
83 #include <linux/pfn.h>
84 #include <linux/mmzone.h>
85 #include <linux/slab.h>
86 #include <linux/thread_info.h>
87 #include <linux/err.h>
88 #include <linux/uaccess.h>
89 #include <linux/string.h>
90 #include <linux/nodemask.h>
91 #include <linux/mm.h>
92 #include <linux/workqueue.h>
93 #include <linux/crc32.h>
94 
95 #include <asm/sections.h>
96 #include <asm/processor.h>
97 #include <linux/atomic.h>
98 
99 #include <linux/kasan.h>
100 #include <linux/kfence.h>
101 #include <linux/kmemleak.h>
102 #include <linux/memory_hotplug.h>
103 
104 /*
105  * Kmemleak configuration and common defines.
106  */
107 #define MAX_TRACE		16	/* stack trace length */
108 #define MSECS_MIN_AGE		5000	/* minimum object age for reporting */
109 #define SECS_FIRST_SCAN		60	/* delay before the first scan */
110 #define SECS_SCAN_WAIT		600	/* subsequent auto scanning delay */
111 #define MAX_SCAN_SIZE		4096	/* maximum size of a scanned block */
112 
113 #define BYTES_PER_POINTER	sizeof(void *)
114 
115 /* GFP bitmask for kmemleak internal allocations */
116 #define gfp_kmemleak_mask(gfp)	(((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
117 				 __GFP_NORETRY | __GFP_NOMEMALLOC | \
118 				 __GFP_NOWARN)
119 
120 /* scanning area inside a memory block */
121 struct kmemleak_scan_area {
122 	struct hlist_node node;
123 	unsigned long start;
124 	size_t size;
125 };
126 
127 #define KMEMLEAK_GREY	0
128 #define KMEMLEAK_BLACK	-1
129 
130 /*
131  * Structure holding the metadata for each allocated memory block.
132  * Modifications to such objects should be made while holding the
133  * object->lock. Insertions or deletions from object_list, gray_list or
134  * rb_node are already protected by the corresponding locks or mutex (see
135  * the notes on locking above). These objects are reference-counted
136  * (use_count) and freed using the RCU mechanism.
137  */
138 struct kmemleak_object {
139 	raw_spinlock_t lock;
140 	unsigned int flags;		/* object status flags */
141 	struct list_head object_list;
142 	struct list_head gray_list;
143 	struct rb_node rb_node;
144 	struct rcu_head rcu;		/* object_list lockless traversal */
145 	/* object usage count; object freed when use_count == 0 */
146 	atomic_t use_count;
147 	unsigned long pointer;
148 	size_t size;
149 	/* pass surplus references to this pointer */
150 	unsigned long excess_ref;
151 	/* minimum number of a pointers found before it is considered leak */
152 	int min_count;
153 	/* the total number of pointers found pointing to this object */
154 	int count;
155 	/* checksum for detecting modified objects */
156 	u32 checksum;
157 	/* memory ranges to be scanned inside an object (empty for all) */
158 	struct hlist_head area_list;
159 	unsigned long trace[MAX_TRACE];
160 	unsigned int trace_len;
161 	unsigned long jiffies;		/* creation timestamp */
162 	pid_t pid;			/* pid of the current task */
163 	char comm[TASK_COMM_LEN];	/* executable name */
164 };
165 
166 /* flag representing the memory block allocation status */
167 #define OBJECT_ALLOCATED	(1 << 0)
168 /* flag set after the first reporting of an unreference object */
169 #define OBJECT_REPORTED		(1 << 1)
170 /* flag set to not scan the object */
171 #define OBJECT_NO_SCAN		(1 << 2)
172 /* flag set to fully scan the object when scan_area allocation failed */
173 #define OBJECT_FULL_SCAN	(1 << 3)
174 
175 #define HEX_PREFIX		"    "
176 /* number of bytes to print per line; must be 16 or 32 */
177 #define HEX_ROW_SIZE		16
178 /* number of bytes to print at a time (1, 2, 4, 8) */
179 #define HEX_GROUP_SIZE		1
180 /* include ASCII after the hex output */
181 #define HEX_ASCII		1
182 /* max number of lines to be printed */
183 #define HEX_MAX_LINES		2
184 
185 /* the list of all allocated objects */
186 static LIST_HEAD(object_list);
187 /* the list of gray-colored objects (see color_gray comment below) */
188 static LIST_HEAD(gray_list);
189 /* memory pool allocation */
190 static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
191 static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
192 static LIST_HEAD(mem_pool_free_list);
193 /* search tree for object boundaries */
194 static struct rb_root object_tree_root = RB_ROOT;
195 /* protecting the access to object_list and object_tree_root */
196 static DEFINE_RAW_SPINLOCK(kmemleak_lock);
197 
198 /* allocation caches for kmemleak internal data */
199 static struct kmem_cache *object_cache;
200 static struct kmem_cache *scan_area_cache;
201 
202 /* set if tracing memory operations is enabled */
203 static int kmemleak_enabled = 1;
204 /* same as above but only for the kmemleak_free() callback */
205 static int kmemleak_free_enabled = 1;
206 /* set in the late_initcall if there were no errors */
207 static int kmemleak_initialized;
208 /* set if a kmemleak warning was issued */
209 static int kmemleak_warning;
210 /* set if a fatal kmemleak error has occurred */
211 static int kmemleak_error;
212 
213 /* minimum and maximum address that may be valid pointers */
214 static unsigned long min_addr = ULONG_MAX;
215 static unsigned long max_addr;
216 
217 static struct task_struct *scan_thread;
218 /* used to avoid reporting of recently allocated objects */
219 static unsigned long jiffies_min_age;
220 static unsigned long jiffies_last_scan;
221 /* delay between automatic memory scannings */
222 static signed long jiffies_scan_wait;
223 /* enables or disables the task stacks scanning */
224 static int kmemleak_stack_scan = 1;
225 /* protects the memory scanning, parameters and debug/kmemleak file access */
226 static DEFINE_MUTEX(scan_mutex);
227 /* setting kmemleak=on, will set this var, skipping the disable */
228 static int kmemleak_skip_disable;
229 /* If there are leaks that can be reported */
230 static bool kmemleak_found_leaks;
231 
232 static bool kmemleak_verbose;
233 module_param_named(verbose, kmemleak_verbose, bool, 0600);
234 
235 static void kmemleak_disable(void);
236 
237 /*
238  * Print a warning and dump the stack trace.
239  */
240 #define kmemleak_warn(x...)	do {		\
241 	pr_warn(x);				\
242 	dump_stack();				\
243 	kmemleak_warning = 1;			\
244 } while (0)
245 
246 /*
247  * Macro invoked when a serious kmemleak condition occurred and cannot be
248  * recovered from. Kmemleak will be disabled and further allocation/freeing
249  * tracing no longer available.
250  */
251 #define kmemleak_stop(x...)	do {	\
252 	kmemleak_warn(x);		\
253 	kmemleak_disable();		\
254 } while (0)
255 
256 #define warn_or_seq_printf(seq, fmt, ...)	do {	\
257 	if (seq)					\
258 		seq_printf(seq, fmt, ##__VA_ARGS__);	\
259 	else						\
260 		pr_warn(fmt, ##__VA_ARGS__);		\
261 } while (0)
262 
warn_or_seq_hex_dump(struct seq_file * seq,int prefix_type,int rowsize,int groupsize,const void * buf,size_t len,bool ascii)263 static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
264 				 int rowsize, int groupsize, const void *buf,
265 				 size_t len, bool ascii)
266 {
267 	if (seq)
268 		seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
269 			     buf, len, ascii);
270 	else
271 		print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
272 			       rowsize, groupsize, buf, len, ascii);
273 }
274 
275 /*
276  * Printing of the objects hex dump to the seq file. The number of lines to be
277  * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
278  * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
279  * with the object->lock held.
280  */
hex_dump_object(struct seq_file * seq,struct kmemleak_object * object)281 static void hex_dump_object(struct seq_file *seq,
282 			    struct kmemleak_object *object)
283 {
284 	const u8 *ptr = (const u8 *)object->pointer;
285 	size_t len;
286 
287 	/* limit the number of lines to HEX_MAX_LINES */
288 	len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
289 
290 	warn_or_seq_printf(seq, "  hex dump (first %zu bytes):\n", len);
291 	kasan_disable_current();
292 	warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
293 			     HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
294 	kasan_enable_current();
295 }
296 
297 /*
298  * Object colors, encoded with count and min_count:
299  * - white - orphan object, not enough references to it (count < min_count)
300  * - gray  - not orphan, not marked as false positive (min_count == 0) or
301  *		sufficient references to it (count >= min_count)
302  * - black - ignore, it doesn't contain references (e.g. text section)
303  *		(min_count == -1). No function defined for this color.
304  * Newly created objects don't have any color assigned (object->count == -1)
305  * before the next memory scan when they become white.
306  */
color_white(const struct kmemleak_object * object)307 static bool color_white(const struct kmemleak_object *object)
308 {
309 	return object->count != KMEMLEAK_BLACK &&
310 		object->count < object->min_count;
311 }
312 
color_gray(const struct kmemleak_object * object)313 static bool color_gray(const struct kmemleak_object *object)
314 {
315 	return object->min_count != KMEMLEAK_BLACK &&
316 		object->count >= object->min_count;
317 }
318 
319 /*
320  * Objects are considered unreferenced only if their color is white, they have
321  * not be deleted and have a minimum age to avoid false positives caused by
322  * pointers temporarily stored in CPU registers.
323  */
unreferenced_object(struct kmemleak_object * object)324 static bool unreferenced_object(struct kmemleak_object *object)
325 {
326 	return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
327 		time_before_eq(object->jiffies + jiffies_min_age,
328 			       jiffies_last_scan);
329 }
330 
331 /*
332  * Printing of the unreferenced objects information to the seq file. The
333  * print_unreferenced function must be called with the object->lock held.
334  */
print_unreferenced(struct seq_file * seq,struct kmemleak_object * object)335 static void print_unreferenced(struct seq_file *seq,
336 			       struct kmemleak_object *object)
337 {
338 	int i;
339 	unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
340 
341 	warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
342 		   object->pointer, object->size);
343 	warn_or_seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
344 		   object->comm, object->pid, object->jiffies,
345 		   msecs_age / 1000, msecs_age % 1000);
346 	hex_dump_object(seq, object);
347 	warn_or_seq_printf(seq, "  backtrace:\n");
348 
349 	for (i = 0; i < object->trace_len; i++) {
350 		void *ptr = (void *)object->trace[i];
351 		warn_or_seq_printf(seq, "    [<%p>] %pS\n", ptr, ptr);
352 	}
353 }
354 
355 /*
356  * Print the kmemleak_object information. This function is used mainly for
357  * debugging special cases when kmemleak operations. It must be called with
358  * the object->lock held.
359  */
dump_object_info(struct kmemleak_object * object)360 static void dump_object_info(struct kmemleak_object *object)
361 {
362 	pr_notice("Object 0x%08lx (size %zu):\n",
363 		  object->pointer, object->size);
364 	pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
365 		  object->comm, object->pid, object->jiffies);
366 	pr_notice("  min_count = %d\n", object->min_count);
367 	pr_notice("  count = %d\n", object->count);
368 	pr_notice("  flags = 0x%x\n", object->flags);
369 	pr_notice("  checksum = %u\n", object->checksum);
370 	pr_notice("  backtrace:\n");
371 	stack_trace_print(object->trace, object->trace_len, 4);
372 }
373 
374 /*
375  * Look-up a memory block metadata (kmemleak_object) in the object search
376  * tree based on a pointer value. If alias is 0, only values pointing to the
377  * beginning of the memory block are allowed. The kmemleak_lock must be held
378  * when calling this function.
379  */
lookup_object(unsigned long ptr,int alias)380 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
381 {
382 	struct rb_node *rb = object_tree_root.rb_node;
383 
384 	while (rb) {
385 		struct kmemleak_object *object =
386 			rb_entry(rb, struct kmemleak_object, rb_node);
387 		if (ptr < object->pointer)
388 			rb = object->rb_node.rb_left;
389 		else if (object->pointer + object->size <= ptr)
390 			rb = object->rb_node.rb_right;
391 		else if (object->pointer == ptr || alias)
392 			return object;
393 		else {
394 			kmemleak_warn("Found object by alias at 0x%08lx\n",
395 				      ptr);
396 			dump_object_info(object);
397 			break;
398 		}
399 	}
400 	return NULL;
401 }
402 
403 /*
404  * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
405  * that once an object's use_count reached 0, the RCU freeing was already
406  * registered and the object should no longer be used. This function must be
407  * called under the protection of rcu_read_lock().
408  */
get_object(struct kmemleak_object * object)409 static int get_object(struct kmemleak_object *object)
410 {
411 	return atomic_inc_not_zero(&object->use_count);
412 }
413 
414 /*
415  * Memory pool allocation and freeing. kmemleak_lock must not be held.
416  */
mem_pool_alloc(gfp_t gfp)417 static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
418 {
419 	unsigned long flags;
420 	struct kmemleak_object *object;
421 
422 	/* try the slab allocator first */
423 	if (object_cache) {
424 		object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
425 		if (object)
426 			return object;
427 	}
428 
429 	/* slab allocation failed, try the memory pool */
430 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
431 	object = list_first_entry_or_null(&mem_pool_free_list,
432 					  typeof(*object), object_list);
433 	if (object)
434 		list_del(&object->object_list);
435 	else if (mem_pool_free_count)
436 		object = &mem_pool[--mem_pool_free_count];
437 	else
438 		pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
439 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
440 
441 	return object;
442 }
443 
444 /*
445  * Return the object to either the slab allocator or the memory pool.
446  */
mem_pool_free(struct kmemleak_object * object)447 static void mem_pool_free(struct kmemleak_object *object)
448 {
449 	unsigned long flags;
450 
451 	if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
452 		kmem_cache_free(object_cache, object);
453 		return;
454 	}
455 
456 	/* add the object to the memory pool free list */
457 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
458 	list_add(&object->object_list, &mem_pool_free_list);
459 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
460 }
461 
462 /*
463  * RCU callback to free a kmemleak_object.
464  */
free_object_rcu(struct rcu_head * rcu)465 static void free_object_rcu(struct rcu_head *rcu)
466 {
467 	struct hlist_node *tmp;
468 	struct kmemleak_scan_area *area;
469 	struct kmemleak_object *object =
470 		container_of(rcu, struct kmemleak_object, rcu);
471 
472 	/*
473 	 * Once use_count is 0 (guaranteed by put_object), there is no other
474 	 * code accessing this object, hence no need for locking.
475 	 */
476 	hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
477 		hlist_del(&area->node);
478 		kmem_cache_free(scan_area_cache, area);
479 	}
480 	mem_pool_free(object);
481 }
482 
483 /*
484  * Decrement the object use_count. Once the count is 0, free the object using
485  * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
486  * delete_object() path, the delayed RCU freeing ensures that there is no
487  * recursive call to the kernel allocator. Lock-less RCU object_list traversal
488  * is also possible.
489  */
put_object(struct kmemleak_object * object)490 static void put_object(struct kmemleak_object *object)
491 {
492 	if (!atomic_dec_and_test(&object->use_count))
493 		return;
494 
495 	/* should only get here after delete_object was called */
496 	WARN_ON(object->flags & OBJECT_ALLOCATED);
497 
498 	/*
499 	 * It may be too early for the RCU callbacks, however, there is no
500 	 * concurrent object_list traversal when !object_cache and all objects
501 	 * came from the memory pool. Free the object directly.
502 	 */
503 	if (object_cache)
504 		call_rcu(&object->rcu, free_object_rcu);
505 	else
506 		free_object_rcu(&object->rcu);
507 }
508 
509 /*
510  * Look up an object in the object search tree and increase its use_count.
511  */
find_and_get_object(unsigned long ptr,int alias)512 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
513 {
514 	unsigned long flags;
515 	struct kmemleak_object *object;
516 
517 	rcu_read_lock();
518 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
519 	object = lookup_object(ptr, alias);
520 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
521 
522 	/* check whether the object is still available */
523 	if (object && !get_object(object))
524 		object = NULL;
525 	rcu_read_unlock();
526 
527 	return object;
528 }
529 
530 /*
531  * Remove an object from the object_tree_root and object_list. Must be called
532  * with the kmemleak_lock held _if_ kmemleak is still enabled.
533  */
__remove_object(struct kmemleak_object * object)534 static void __remove_object(struct kmemleak_object *object)
535 {
536 	rb_erase(&object->rb_node, &object_tree_root);
537 	list_del_rcu(&object->object_list);
538 }
539 
540 /*
541  * Look up an object in the object search tree and remove it from both
542  * object_tree_root and object_list. The returned object's use_count should be
543  * at least 1, as initially set by create_object().
544  */
find_and_remove_object(unsigned long ptr,int alias)545 static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias)
546 {
547 	unsigned long flags;
548 	struct kmemleak_object *object;
549 
550 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
551 	object = lookup_object(ptr, alias);
552 	if (object)
553 		__remove_object(object);
554 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
555 
556 	return object;
557 }
558 
559 /*
560  * Save stack trace to the given array of MAX_TRACE size.
561  */
__save_stack_trace(unsigned long * trace)562 static int __save_stack_trace(unsigned long *trace)
563 {
564 	return stack_trace_save(trace, MAX_TRACE, 2);
565 }
566 
567 /*
568  * Create the metadata (struct kmemleak_object) corresponding to an allocated
569  * memory block and add it to the object_list and object_tree_root.
570  */
create_object(unsigned long ptr,size_t size,int min_count,gfp_t gfp)571 static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
572 					     int min_count, gfp_t gfp)
573 {
574 	unsigned long flags;
575 	struct kmemleak_object *object, *parent;
576 	struct rb_node **link, *rb_parent;
577 	unsigned long untagged_ptr;
578 
579 	object = mem_pool_alloc(gfp);
580 	if (!object) {
581 		pr_warn("Cannot allocate a kmemleak_object structure\n");
582 		kmemleak_disable();
583 		return NULL;
584 	}
585 
586 	INIT_LIST_HEAD(&object->object_list);
587 	INIT_LIST_HEAD(&object->gray_list);
588 	INIT_HLIST_HEAD(&object->area_list);
589 	raw_spin_lock_init(&object->lock);
590 	atomic_set(&object->use_count, 1);
591 	object->flags = OBJECT_ALLOCATED;
592 	object->pointer = ptr;
593 	object->size = kfence_ksize((void *)ptr) ?: size;
594 	object->excess_ref = 0;
595 	object->min_count = min_count;
596 	object->count = 0;			/* white color initially */
597 	object->jiffies = jiffies;
598 	object->checksum = 0;
599 
600 	/* task information */
601 	if (in_irq()) {
602 		object->pid = 0;
603 		strncpy(object->comm, "hardirq", sizeof(object->comm));
604 	} else if (in_serving_softirq()) {
605 		object->pid = 0;
606 		strncpy(object->comm, "softirq", sizeof(object->comm));
607 	} else {
608 		object->pid = current->pid;
609 		/*
610 		 * There is a small chance of a race with set_task_comm(),
611 		 * however using get_task_comm() here may cause locking
612 		 * dependency issues with current->alloc_lock. In the worst
613 		 * case, the command line is not correct.
614 		 */
615 		strncpy(object->comm, current->comm, sizeof(object->comm));
616 	}
617 
618 	/* kernel backtrace */
619 	object->trace_len = __save_stack_trace(object->trace);
620 
621 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
622 
623 	untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
624 	min_addr = min(min_addr, untagged_ptr);
625 	max_addr = max(max_addr, untagged_ptr + size);
626 	link = &object_tree_root.rb_node;
627 	rb_parent = NULL;
628 	while (*link) {
629 		rb_parent = *link;
630 		parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
631 		if (ptr + size <= parent->pointer)
632 			link = &parent->rb_node.rb_left;
633 		else if (parent->pointer + parent->size <= ptr)
634 			link = &parent->rb_node.rb_right;
635 		else {
636 			kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
637 				      ptr);
638 			/*
639 			 * No need for parent->lock here since "parent" cannot
640 			 * be freed while the kmemleak_lock is held.
641 			 */
642 			dump_object_info(parent);
643 			kmem_cache_free(object_cache, object);
644 			object = NULL;
645 			goto out;
646 		}
647 	}
648 	rb_link_node(&object->rb_node, rb_parent, link);
649 	rb_insert_color(&object->rb_node, &object_tree_root);
650 
651 	list_add_tail_rcu(&object->object_list, &object_list);
652 out:
653 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
654 	return object;
655 }
656 
657 /*
658  * Mark the object as not allocated and schedule RCU freeing via put_object().
659  */
__delete_object(struct kmemleak_object * object)660 static void __delete_object(struct kmemleak_object *object)
661 {
662 	unsigned long flags;
663 
664 	WARN_ON(!(object->flags & OBJECT_ALLOCATED));
665 	WARN_ON(atomic_read(&object->use_count) < 1);
666 
667 	/*
668 	 * Locking here also ensures that the corresponding memory block
669 	 * cannot be freed when it is being scanned.
670 	 */
671 	raw_spin_lock_irqsave(&object->lock, flags);
672 	object->flags &= ~OBJECT_ALLOCATED;
673 	raw_spin_unlock_irqrestore(&object->lock, flags);
674 	put_object(object);
675 }
676 
677 /*
678  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
679  * delete it.
680  */
delete_object_full(unsigned long ptr)681 static void delete_object_full(unsigned long ptr)
682 {
683 	struct kmemleak_object *object;
684 
685 	object = find_and_remove_object(ptr, 0);
686 	if (!object) {
687 #ifdef DEBUG
688 		kmemleak_warn("Freeing unknown object at 0x%08lx\n",
689 			      ptr);
690 #endif
691 		return;
692 	}
693 	__delete_object(object);
694 }
695 
696 /*
697  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
698  * delete it. If the memory block is partially freed, the function may create
699  * additional metadata for the remaining parts of the block.
700  */
delete_object_part(unsigned long ptr,size_t size)701 static void delete_object_part(unsigned long ptr, size_t size)
702 {
703 	struct kmemleak_object *object;
704 	unsigned long start, end;
705 
706 	object = find_and_remove_object(ptr, 1);
707 	if (!object) {
708 #ifdef DEBUG
709 		kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
710 			      ptr, size);
711 #endif
712 		return;
713 	}
714 
715 	/*
716 	 * Create one or two objects that may result from the memory block
717 	 * split. Note that partial freeing is only done by free_bootmem() and
718 	 * this happens before kmemleak_init() is called.
719 	 */
720 	start = object->pointer;
721 	end = object->pointer + object->size;
722 	if (ptr > start)
723 		create_object(start, ptr - start, object->min_count,
724 			      GFP_KERNEL);
725 	if (ptr + size < end)
726 		create_object(ptr + size, end - ptr - size, object->min_count,
727 			      GFP_KERNEL);
728 
729 	__delete_object(object);
730 }
731 
__paint_it(struct kmemleak_object * object,int color)732 static void __paint_it(struct kmemleak_object *object, int color)
733 {
734 	object->min_count = color;
735 	if (color == KMEMLEAK_BLACK)
736 		object->flags |= OBJECT_NO_SCAN;
737 }
738 
paint_it(struct kmemleak_object * object,int color)739 static void paint_it(struct kmemleak_object *object, int color)
740 {
741 	unsigned long flags;
742 
743 	raw_spin_lock_irqsave(&object->lock, flags);
744 	__paint_it(object, color);
745 	raw_spin_unlock_irqrestore(&object->lock, flags);
746 }
747 
paint_ptr(unsigned long ptr,int color)748 static void paint_ptr(unsigned long ptr, int color)
749 {
750 	struct kmemleak_object *object;
751 
752 	object = find_and_get_object(ptr, 0);
753 	if (!object) {
754 		kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
755 			      ptr,
756 			      (color == KMEMLEAK_GREY) ? "Grey" :
757 			      (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
758 		return;
759 	}
760 	paint_it(object, color);
761 	put_object(object);
762 }
763 
764 /*
765  * Mark an object permanently as gray-colored so that it can no longer be
766  * reported as a leak. This is used in general to mark a false positive.
767  */
make_gray_object(unsigned long ptr)768 static void make_gray_object(unsigned long ptr)
769 {
770 	paint_ptr(ptr, KMEMLEAK_GREY);
771 }
772 
773 /*
774  * Mark the object as black-colored so that it is ignored from scans and
775  * reporting.
776  */
make_black_object(unsigned long ptr)777 static void make_black_object(unsigned long ptr)
778 {
779 	paint_ptr(ptr, KMEMLEAK_BLACK);
780 }
781 
782 /*
783  * Add a scanning area to the object. If at least one such area is added,
784  * kmemleak will only scan these ranges rather than the whole memory block.
785  */
add_scan_area(unsigned long ptr,size_t size,gfp_t gfp)786 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
787 {
788 	unsigned long flags;
789 	struct kmemleak_object *object;
790 	struct kmemleak_scan_area *area = NULL;
791 	unsigned long untagged_ptr;
792 	unsigned long untagged_objp;
793 
794 	object = find_and_get_object(ptr, 1);
795 	if (!object) {
796 		kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
797 			      ptr);
798 		return;
799 	}
800 
801 	untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
802 	untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
803 
804 	if (scan_area_cache)
805 		area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
806 
807 	raw_spin_lock_irqsave(&object->lock, flags);
808 	if (!area) {
809 		pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
810 		/* mark the object for full scan to avoid false positives */
811 		object->flags |= OBJECT_FULL_SCAN;
812 		goto out_unlock;
813 	}
814 	if (size == SIZE_MAX) {
815 		size = untagged_objp + object->size - untagged_ptr;
816 	} else if (untagged_ptr + size > untagged_objp + object->size) {
817 		kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
818 		dump_object_info(object);
819 		kmem_cache_free(scan_area_cache, area);
820 		goto out_unlock;
821 	}
822 
823 	INIT_HLIST_NODE(&area->node);
824 	area->start = ptr;
825 	area->size = size;
826 
827 	hlist_add_head(&area->node, &object->area_list);
828 out_unlock:
829 	raw_spin_unlock_irqrestore(&object->lock, flags);
830 	put_object(object);
831 }
832 
833 /*
834  * Any surplus references (object already gray) to 'ptr' are passed to
835  * 'excess_ref'. This is used in the vmalloc() case where a pointer to
836  * vm_struct may be used as an alternative reference to the vmalloc'ed object
837  * (see free_thread_stack()).
838  */
object_set_excess_ref(unsigned long ptr,unsigned long excess_ref)839 static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
840 {
841 	unsigned long flags;
842 	struct kmemleak_object *object;
843 
844 	object = find_and_get_object(ptr, 0);
845 	if (!object) {
846 		kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
847 			      ptr);
848 		return;
849 	}
850 
851 	raw_spin_lock_irqsave(&object->lock, flags);
852 	object->excess_ref = excess_ref;
853 	raw_spin_unlock_irqrestore(&object->lock, flags);
854 	put_object(object);
855 }
856 
857 /*
858  * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
859  * pointer. Such object will not be scanned by kmemleak but references to it
860  * are searched.
861  */
object_no_scan(unsigned long ptr)862 static void object_no_scan(unsigned long ptr)
863 {
864 	unsigned long flags;
865 	struct kmemleak_object *object;
866 
867 	object = find_and_get_object(ptr, 0);
868 	if (!object) {
869 		kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
870 		return;
871 	}
872 
873 	raw_spin_lock_irqsave(&object->lock, flags);
874 	object->flags |= OBJECT_NO_SCAN;
875 	raw_spin_unlock_irqrestore(&object->lock, flags);
876 	put_object(object);
877 }
878 
879 /**
880  * kmemleak_alloc - register a newly allocated object
881  * @ptr:	pointer to beginning of the object
882  * @size:	size of the object
883  * @min_count:	minimum number of references to this object. If during memory
884  *		scanning a number of references less than @min_count is found,
885  *		the object is reported as a memory leak. If @min_count is 0,
886  *		the object is never reported as a leak. If @min_count is -1,
887  *		the object is ignored (not scanned and not reported as a leak)
888  * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
889  *
890  * This function is called from the kernel allocators when a new object
891  * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
892  */
kmemleak_alloc(const void * ptr,size_t size,int min_count,gfp_t gfp)893 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
894 			  gfp_t gfp)
895 {
896 	pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
897 
898 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
899 		create_object((unsigned long)ptr, size, min_count, gfp);
900 }
901 EXPORT_SYMBOL_GPL(kmemleak_alloc);
902 
903 /**
904  * kmemleak_alloc_percpu - register a newly allocated __percpu object
905  * @ptr:	__percpu pointer to beginning of the object
906  * @size:	size of the object
907  * @gfp:	flags used for kmemleak internal memory allocations
908  *
909  * This function is called from the kernel percpu allocator when a new object
910  * (memory block) is allocated (alloc_percpu).
911  */
kmemleak_alloc_percpu(const void __percpu * ptr,size_t size,gfp_t gfp)912 void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
913 				 gfp_t gfp)
914 {
915 	unsigned int cpu;
916 
917 	pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
918 
919 	/*
920 	 * Percpu allocations are only scanned and not reported as leaks
921 	 * (min_count is set to 0).
922 	 */
923 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
924 		for_each_possible_cpu(cpu)
925 			create_object((unsigned long)per_cpu_ptr(ptr, cpu),
926 				      size, 0, gfp);
927 }
928 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
929 
930 /**
931  * kmemleak_vmalloc - register a newly vmalloc'ed object
932  * @area:	pointer to vm_struct
933  * @size:	size of the object
934  * @gfp:	__vmalloc() flags used for kmemleak internal memory allocations
935  *
936  * This function is called from the vmalloc() kernel allocator when a new
937  * object (memory block) is allocated.
938  */
kmemleak_vmalloc(const struct vm_struct * area,size_t size,gfp_t gfp)939 void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
940 {
941 	pr_debug("%s(0x%p, %zu)\n", __func__, area, size);
942 
943 	/*
944 	 * A min_count = 2 is needed because vm_struct contains a reference to
945 	 * the virtual address of the vmalloc'ed block.
946 	 */
947 	if (kmemleak_enabled) {
948 		create_object((unsigned long)area->addr, size, 2, gfp);
949 		object_set_excess_ref((unsigned long)area,
950 				      (unsigned long)area->addr);
951 	}
952 }
953 EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
954 
955 /**
956  * kmemleak_free - unregister a previously registered object
957  * @ptr:	pointer to beginning of the object
958  *
959  * This function is called from the kernel allocators when an object (memory
960  * block) is freed (kmem_cache_free, kfree, vfree etc.).
961  */
kmemleak_free(const void * ptr)962 void __ref kmemleak_free(const void *ptr)
963 {
964 	pr_debug("%s(0x%p)\n", __func__, ptr);
965 
966 	if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
967 		delete_object_full((unsigned long)ptr);
968 }
969 EXPORT_SYMBOL_GPL(kmemleak_free);
970 
971 /**
972  * kmemleak_free_part - partially unregister a previously registered object
973  * @ptr:	pointer to the beginning or inside the object. This also
974  *		represents the start of the range to be freed
975  * @size:	size to be unregistered
976  *
977  * This function is called when only a part of a memory block is freed
978  * (usually from the bootmem allocator).
979  */
kmemleak_free_part(const void * ptr,size_t size)980 void __ref kmemleak_free_part(const void *ptr, size_t size)
981 {
982 	pr_debug("%s(0x%p)\n", __func__, ptr);
983 
984 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
985 		delete_object_part((unsigned long)ptr, size);
986 }
987 EXPORT_SYMBOL_GPL(kmemleak_free_part);
988 
989 /**
990  * kmemleak_free_percpu - unregister a previously registered __percpu object
991  * @ptr:	__percpu pointer to beginning of the object
992  *
993  * This function is called from the kernel percpu allocator when an object
994  * (memory block) is freed (free_percpu).
995  */
kmemleak_free_percpu(const void __percpu * ptr)996 void __ref kmemleak_free_percpu(const void __percpu *ptr)
997 {
998 	unsigned int cpu;
999 
1000 	pr_debug("%s(0x%p)\n", __func__, ptr);
1001 
1002 	if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1003 		for_each_possible_cpu(cpu)
1004 			delete_object_full((unsigned long)per_cpu_ptr(ptr,
1005 								      cpu));
1006 }
1007 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1008 
1009 /**
1010  * kmemleak_update_trace - update object allocation stack trace
1011  * @ptr:	pointer to beginning of the object
1012  *
1013  * Override the object allocation stack trace for cases where the actual
1014  * allocation place is not always useful.
1015  */
kmemleak_update_trace(const void * ptr)1016 void __ref kmemleak_update_trace(const void *ptr)
1017 {
1018 	struct kmemleak_object *object;
1019 	unsigned long flags;
1020 
1021 	pr_debug("%s(0x%p)\n", __func__, ptr);
1022 
1023 	if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1024 		return;
1025 
1026 	object = find_and_get_object((unsigned long)ptr, 1);
1027 	if (!object) {
1028 #ifdef DEBUG
1029 		kmemleak_warn("Updating stack trace for unknown object at %p\n",
1030 			      ptr);
1031 #endif
1032 		return;
1033 	}
1034 
1035 	raw_spin_lock_irqsave(&object->lock, flags);
1036 	object->trace_len = __save_stack_trace(object->trace);
1037 	raw_spin_unlock_irqrestore(&object->lock, flags);
1038 
1039 	put_object(object);
1040 }
1041 EXPORT_SYMBOL(kmemleak_update_trace);
1042 
1043 /**
1044  * kmemleak_not_leak - mark an allocated object as false positive
1045  * @ptr:	pointer to beginning of the object
1046  *
1047  * Calling this function on an object will cause the memory block to no longer
1048  * be reported as leak and always be scanned.
1049  */
kmemleak_not_leak(const void * ptr)1050 void __ref kmemleak_not_leak(const void *ptr)
1051 {
1052 	pr_debug("%s(0x%p)\n", __func__, ptr);
1053 
1054 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1055 		make_gray_object((unsigned long)ptr);
1056 }
1057 EXPORT_SYMBOL(kmemleak_not_leak);
1058 
1059 /**
1060  * kmemleak_ignore - ignore an allocated object
1061  * @ptr:	pointer to beginning of the object
1062  *
1063  * Calling this function on an object will cause the memory block to be
1064  * ignored (not scanned and not reported as a leak). This is usually done when
1065  * it is known that the corresponding block is not a leak and does not contain
1066  * any references to other allocated memory blocks.
1067  */
kmemleak_ignore(const void * ptr)1068 void __ref kmemleak_ignore(const void *ptr)
1069 {
1070 	pr_debug("%s(0x%p)\n", __func__, ptr);
1071 
1072 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1073 		make_black_object((unsigned long)ptr);
1074 }
1075 EXPORT_SYMBOL(kmemleak_ignore);
1076 
1077 /**
1078  * kmemleak_scan_area - limit the range to be scanned in an allocated object
1079  * @ptr:	pointer to beginning or inside the object. This also
1080  *		represents the start of the scan area
1081  * @size:	size of the scan area
1082  * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
1083  *
1084  * This function is used when it is known that only certain parts of an object
1085  * contain references to other objects. Kmemleak will only scan these areas
1086  * reducing the number false negatives.
1087  */
kmemleak_scan_area(const void * ptr,size_t size,gfp_t gfp)1088 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1089 {
1090 	pr_debug("%s(0x%p)\n", __func__, ptr);
1091 
1092 	if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1093 		add_scan_area((unsigned long)ptr, size, gfp);
1094 }
1095 EXPORT_SYMBOL(kmemleak_scan_area);
1096 
1097 /**
1098  * kmemleak_no_scan - do not scan an allocated object
1099  * @ptr:	pointer to beginning of the object
1100  *
1101  * This function notifies kmemleak not to scan the given memory block. Useful
1102  * in situations where it is known that the given object does not contain any
1103  * references to other objects. Kmemleak will not scan such objects reducing
1104  * the number of false negatives.
1105  */
kmemleak_no_scan(const void * ptr)1106 void __ref kmemleak_no_scan(const void *ptr)
1107 {
1108 	pr_debug("%s(0x%p)\n", __func__, ptr);
1109 
1110 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1111 		object_no_scan((unsigned long)ptr);
1112 }
1113 EXPORT_SYMBOL(kmemleak_no_scan);
1114 
1115 /**
1116  * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1117  *			 address argument
1118  * @phys:	physical address of the object
1119  * @size:	size of the object
1120  * @min_count:	minimum number of references to this object.
1121  *              See kmemleak_alloc()
1122  * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
1123  */
kmemleak_alloc_phys(phys_addr_t phys,size_t size,int min_count,gfp_t gfp)1124 void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
1125 			       gfp_t gfp)
1126 {
1127 	if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1128 		kmemleak_alloc(__va(phys), size, min_count, gfp);
1129 }
1130 EXPORT_SYMBOL(kmemleak_alloc_phys);
1131 
1132 /**
1133  * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1134  *			     physical address argument
1135  * @phys:	physical address if the beginning or inside an object. This
1136  *		also represents the start of the range to be freed
1137  * @size:	size to be unregistered
1138  */
kmemleak_free_part_phys(phys_addr_t phys,size_t size)1139 void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1140 {
1141 	if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1142 		kmemleak_free_part(__va(phys), size);
1143 }
1144 EXPORT_SYMBOL(kmemleak_free_part_phys);
1145 
1146 /**
1147  * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
1148  *			    address argument
1149  * @phys:	physical address of the object
1150  */
kmemleak_not_leak_phys(phys_addr_t phys)1151 void __ref kmemleak_not_leak_phys(phys_addr_t phys)
1152 {
1153 	if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1154 		kmemleak_not_leak(__va(phys));
1155 }
1156 EXPORT_SYMBOL(kmemleak_not_leak_phys);
1157 
1158 /**
1159  * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1160  *			  address argument
1161  * @phys:	physical address of the object
1162  */
kmemleak_ignore_phys(phys_addr_t phys)1163 void __ref kmemleak_ignore_phys(phys_addr_t phys)
1164 {
1165 	if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1166 		kmemleak_ignore(__va(phys));
1167 }
1168 EXPORT_SYMBOL(kmemleak_ignore_phys);
1169 
1170 /*
1171  * Update an object's checksum and return true if it was modified.
1172  */
update_checksum(struct kmemleak_object * object)1173 static bool update_checksum(struct kmemleak_object *object)
1174 {
1175 	u32 old_csum = object->checksum;
1176 
1177 	kasan_disable_current();
1178 	kcsan_disable_current();
1179 	object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
1180 	kasan_enable_current();
1181 	kcsan_enable_current();
1182 
1183 	return object->checksum != old_csum;
1184 }
1185 
1186 /*
1187  * Update an object's references. object->lock must be held by the caller.
1188  */
update_refs(struct kmemleak_object * object)1189 static void update_refs(struct kmemleak_object *object)
1190 {
1191 	if (!color_white(object)) {
1192 		/* non-orphan, ignored or new */
1193 		return;
1194 	}
1195 
1196 	/*
1197 	 * Increase the object's reference count (number of pointers to the
1198 	 * memory block). If this count reaches the required minimum, the
1199 	 * object's color will become gray and it will be added to the
1200 	 * gray_list.
1201 	 */
1202 	object->count++;
1203 	if (color_gray(object)) {
1204 		/* put_object() called when removing from gray_list */
1205 		WARN_ON(!get_object(object));
1206 		list_add_tail(&object->gray_list, &gray_list);
1207 	}
1208 }
1209 
1210 /*
1211  * Memory scanning is a long process and it needs to be interruptable. This
1212  * function checks whether such interrupt condition occurred.
1213  */
scan_should_stop(void)1214 static int scan_should_stop(void)
1215 {
1216 	if (!kmemleak_enabled)
1217 		return 1;
1218 
1219 	/*
1220 	 * This function may be called from either process or kthread context,
1221 	 * hence the need to check for both stop conditions.
1222 	 */
1223 	if (current->mm)
1224 		return signal_pending(current);
1225 	else
1226 		return kthread_should_stop();
1227 
1228 	return 0;
1229 }
1230 
1231 /*
1232  * Scan a memory block (exclusive range) for valid pointers and add those
1233  * found to the gray list.
1234  */
scan_block(void * _start,void * _end,struct kmemleak_object * scanned)1235 static void scan_block(void *_start, void *_end,
1236 		       struct kmemleak_object *scanned)
1237 {
1238 	unsigned long *ptr;
1239 	unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1240 	unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1241 	unsigned long flags;
1242 	unsigned long untagged_ptr;
1243 
1244 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
1245 	for (ptr = start; ptr < end; ptr++) {
1246 		struct kmemleak_object *object;
1247 		unsigned long pointer;
1248 		unsigned long excess_ref;
1249 
1250 		if (scan_should_stop())
1251 			break;
1252 
1253 		kasan_disable_current();
1254 		pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
1255 		kasan_enable_current();
1256 
1257 		untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1258 		if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1259 			continue;
1260 
1261 		/*
1262 		 * No need for get_object() here since we hold kmemleak_lock.
1263 		 * object->use_count cannot be dropped to 0 while the object
1264 		 * is still present in object_tree_root and object_list
1265 		 * (with updates protected by kmemleak_lock).
1266 		 */
1267 		object = lookup_object(pointer, 1);
1268 		if (!object)
1269 			continue;
1270 		if (object == scanned)
1271 			/* self referenced, ignore */
1272 			continue;
1273 
1274 		/*
1275 		 * Avoid the lockdep recursive warning on object->lock being
1276 		 * previously acquired in scan_object(). These locks are
1277 		 * enclosed by scan_mutex.
1278 		 */
1279 		raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1280 		/* only pass surplus references (object already gray) */
1281 		if (color_gray(object)) {
1282 			excess_ref = object->excess_ref;
1283 			/* no need for update_refs() if object already gray */
1284 		} else {
1285 			excess_ref = 0;
1286 			update_refs(object);
1287 		}
1288 		raw_spin_unlock(&object->lock);
1289 
1290 		if (excess_ref) {
1291 			object = lookup_object(excess_ref, 0);
1292 			if (!object)
1293 				continue;
1294 			if (object == scanned)
1295 				/* circular reference, ignore */
1296 				continue;
1297 			raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1298 			update_refs(object);
1299 			raw_spin_unlock(&object->lock);
1300 		}
1301 	}
1302 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
1303 }
1304 
1305 /*
1306  * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1307  */
1308 #ifdef CONFIG_SMP
scan_large_block(void * start,void * end)1309 static void scan_large_block(void *start, void *end)
1310 {
1311 	void *next;
1312 
1313 	while (start < end) {
1314 		next = min(start + MAX_SCAN_SIZE, end);
1315 		scan_block(start, next, NULL);
1316 		start = next;
1317 		cond_resched();
1318 	}
1319 }
1320 #endif
1321 
1322 /*
1323  * Scan a memory block corresponding to a kmemleak_object. A condition is
1324  * that object->use_count >= 1.
1325  */
scan_object(struct kmemleak_object * object)1326 static void scan_object(struct kmemleak_object *object)
1327 {
1328 	struct kmemleak_scan_area *area;
1329 	unsigned long flags;
1330 
1331 	/*
1332 	 * Once the object->lock is acquired, the corresponding memory block
1333 	 * cannot be freed (the same lock is acquired in delete_object).
1334 	 */
1335 	raw_spin_lock_irqsave(&object->lock, flags);
1336 	if (object->flags & OBJECT_NO_SCAN)
1337 		goto out;
1338 	if (!(object->flags & OBJECT_ALLOCATED))
1339 		/* already freed object */
1340 		goto out;
1341 	if (hlist_empty(&object->area_list) ||
1342 	    object->flags & OBJECT_FULL_SCAN) {
1343 		void *start = (void *)object->pointer;
1344 		void *end = (void *)(object->pointer + object->size);
1345 		void *next;
1346 
1347 		do {
1348 			next = min(start + MAX_SCAN_SIZE, end);
1349 			scan_block(start, next, object);
1350 
1351 			start = next;
1352 			if (start >= end)
1353 				break;
1354 
1355 			raw_spin_unlock_irqrestore(&object->lock, flags);
1356 			cond_resched();
1357 			raw_spin_lock_irqsave(&object->lock, flags);
1358 		} while (object->flags & OBJECT_ALLOCATED);
1359 	} else
1360 		hlist_for_each_entry(area, &object->area_list, node)
1361 			scan_block((void *)area->start,
1362 				   (void *)(area->start + area->size),
1363 				   object);
1364 out:
1365 	raw_spin_unlock_irqrestore(&object->lock, flags);
1366 }
1367 
1368 /*
1369  * Scan the objects already referenced (gray objects). More objects will be
1370  * referenced and, if there are no memory leaks, all the objects are scanned.
1371  */
scan_gray_list(void)1372 static void scan_gray_list(void)
1373 {
1374 	struct kmemleak_object *object, *tmp;
1375 
1376 	/*
1377 	 * The list traversal is safe for both tail additions and removals
1378 	 * from inside the loop. The kmemleak objects cannot be freed from
1379 	 * outside the loop because their use_count was incremented.
1380 	 */
1381 	object = list_entry(gray_list.next, typeof(*object), gray_list);
1382 	while (&object->gray_list != &gray_list) {
1383 		cond_resched();
1384 
1385 		/* may add new objects to the list */
1386 		if (!scan_should_stop())
1387 			scan_object(object);
1388 
1389 		tmp = list_entry(object->gray_list.next, typeof(*object),
1390 				 gray_list);
1391 
1392 		/* remove the object from the list and release it */
1393 		list_del(&object->gray_list);
1394 		put_object(object);
1395 
1396 		object = tmp;
1397 	}
1398 	WARN_ON(!list_empty(&gray_list));
1399 }
1400 
1401 /*
1402  * Scan data sections and all the referenced memory blocks allocated via the
1403  * kernel's standard allocators. This function must be called with the
1404  * scan_mutex held.
1405  */
kmemleak_scan(void)1406 static void kmemleak_scan(void)
1407 {
1408 	unsigned long flags;
1409 	struct kmemleak_object *object;
1410 	struct zone *zone;
1411 	int __maybe_unused i;
1412 	int new_leaks = 0;
1413 
1414 	jiffies_last_scan = jiffies;
1415 
1416 	/* prepare the kmemleak_object's */
1417 	rcu_read_lock();
1418 	list_for_each_entry_rcu(object, &object_list, object_list) {
1419 		raw_spin_lock_irqsave(&object->lock, flags);
1420 #ifdef DEBUG
1421 		/*
1422 		 * With a few exceptions there should be a maximum of
1423 		 * 1 reference to any object at this point.
1424 		 */
1425 		if (atomic_read(&object->use_count) > 1) {
1426 			pr_debug("object->use_count = %d\n",
1427 				 atomic_read(&object->use_count));
1428 			dump_object_info(object);
1429 		}
1430 #endif
1431 		/* reset the reference count (whiten the object) */
1432 		object->count = 0;
1433 		if (color_gray(object) && get_object(object))
1434 			list_add_tail(&object->gray_list, &gray_list);
1435 
1436 		raw_spin_unlock_irqrestore(&object->lock, flags);
1437 	}
1438 	rcu_read_unlock();
1439 
1440 #ifdef CONFIG_SMP
1441 	/* per-cpu sections scanning */
1442 	for_each_possible_cpu(i)
1443 		scan_large_block(__per_cpu_start + per_cpu_offset(i),
1444 				 __per_cpu_end + per_cpu_offset(i));
1445 #endif
1446 
1447 	/*
1448 	 * Struct page scanning for each node.
1449 	 */
1450 	get_online_mems();
1451 	for_each_populated_zone(zone) {
1452 		unsigned long start_pfn = zone->zone_start_pfn;
1453 		unsigned long end_pfn = zone_end_pfn(zone);
1454 		unsigned long pfn;
1455 
1456 		for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1457 			struct page *page = pfn_to_online_page(pfn);
1458 
1459 			if (!page)
1460 				continue;
1461 
1462 			/* only scan pages belonging to this zone */
1463 			if (page_zone(page) != zone)
1464 				continue;
1465 			/* only scan if page is in use */
1466 			if (page_count(page) == 0)
1467 				continue;
1468 			scan_block(page, page + 1, NULL);
1469 			if (!(pfn & 63))
1470 				cond_resched();
1471 		}
1472 	}
1473 	put_online_mems();
1474 
1475 	/*
1476 	 * Scanning the task stacks (may introduce false negatives).
1477 	 */
1478 	if (kmemleak_stack_scan) {
1479 		struct task_struct *p, *g;
1480 
1481 		rcu_read_lock();
1482 		for_each_process_thread(g, p) {
1483 			void *stack = try_get_task_stack(p);
1484 			if (stack) {
1485 				scan_block(stack, stack + THREAD_SIZE, NULL);
1486 				put_task_stack(p);
1487 			}
1488 		}
1489 		rcu_read_unlock();
1490 	}
1491 
1492 	/*
1493 	 * Scan the objects already referenced from the sections scanned
1494 	 * above.
1495 	 */
1496 	scan_gray_list();
1497 
1498 	/*
1499 	 * Check for new or unreferenced objects modified since the previous
1500 	 * scan and color them gray until the next scan.
1501 	 */
1502 	rcu_read_lock();
1503 	list_for_each_entry_rcu(object, &object_list, object_list) {
1504 		raw_spin_lock_irqsave(&object->lock, flags);
1505 		if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1506 		    && update_checksum(object) && get_object(object)) {
1507 			/* color it gray temporarily */
1508 			object->count = object->min_count;
1509 			list_add_tail(&object->gray_list, &gray_list);
1510 		}
1511 		raw_spin_unlock_irqrestore(&object->lock, flags);
1512 	}
1513 	rcu_read_unlock();
1514 
1515 	/*
1516 	 * Re-scan the gray list for modified unreferenced objects.
1517 	 */
1518 	scan_gray_list();
1519 
1520 	/*
1521 	 * If scanning was stopped do not report any new unreferenced objects.
1522 	 */
1523 	if (scan_should_stop())
1524 		return;
1525 
1526 	/*
1527 	 * Scanning result reporting.
1528 	 */
1529 	rcu_read_lock();
1530 	list_for_each_entry_rcu(object, &object_list, object_list) {
1531 		raw_spin_lock_irqsave(&object->lock, flags);
1532 		if (unreferenced_object(object) &&
1533 		    !(object->flags & OBJECT_REPORTED)) {
1534 			object->flags |= OBJECT_REPORTED;
1535 
1536 			if (kmemleak_verbose)
1537 				print_unreferenced(NULL, object);
1538 
1539 			new_leaks++;
1540 		}
1541 		raw_spin_unlock_irqrestore(&object->lock, flags);
1542 	}
1543 	rcu_read_unlock();
1544 
1545 	if (new_leaks) {
1546 		kmemleak_found_leaks = true;
1547 
1548 		pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1549 			new_leaks);
1550 	}
1551 
1552 }
1553 
1554 /*
1555  * Thread function performing automatic memory scanning. Unreferenced objects
1556  * at the end of a memory scan are reported but only the first time.
1557  */
kmemleak_scan_thread(void * arg)1558 static int kmemleak_scan_thread(void *arg)
1559 {
1560 	static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
1561 
1562 	pr_info("Automatic memory scanning thread started\n");
1563 	set_user_nice(current, 10);
1564 
1565 	/*
1566 	 * Wait before the first scan to allow the system to fully initialize.
1567 	 */
1568 	if (first_run) {
1569 		signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1570 		first_run = 0;
1571 		while (timeout && !kthread_should_stop())
1572 			timeout = schedule_timeout_interruptible(timeout);
1573 	}
1574 
1575 	while (!kthread_should_stop()) {
1576 		signed long timeout = jiffies_scan_wait;
1577 
1578 		mutex_lock(&scan_mutex);
1579 		kmemleak_scan();
1580 		mutex_unlock(&scan_mutex);
1581 
1582 		/* wait before the next scan */
1583 		while (timeout && !kthread_should_stop())
1584 			timeout = schedule_timeout_interruptible(timeout);
1585 	}
1586 
1587 	pr_info("Automatic memory scanning thread ended\n");
1588 
1589 	return 0;
1590 }
1591 
1592 /*
1593  * Start the automatic memory scanning thread. This function must be called
1594  * with the scan_mutex held.
1595  */
start_scan_thread(void)1596 static void start_scan_thread(void)
1597 {
1598 	if (scan_thread)
1599 		return;
1600 	scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1601 	if (IS_ERR(scan_thread)) {
1602 		pr_warn("Failed to create the scan thread\n");
1603 		scan_thread = NULL;
1604 	}
1605 }
1606 
1607 /*
1608  * Stop the automatic memory scanning thread.
1609  */
stop_scan_thread(void)1610 static void stop_scan_thread(void)
1611 {
1612 	if (scan_thread) {
1613 		kthread_stop(scan_thread);
1614 		scan_thread = NULL;
1615 	}
1616 }
1617 
1618 /*
1619  * Iterate over the object_list and return the first valid object at or after
1620  * the required position with its use_count incremented. The function triggers
1621  * a memory scanning when the pos argument points to the first position.
1622  */
kmemleak_seq_start(struct seq_file * seq,loff_t * pos)1623 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1624 {
1625 	struct kmemleak_object *object;
1626 	loff_t n = *pos;
1627 	int err;
1628 
1629 	err = mutex_lock_interruptible(&scan_mutex);
1630 	if (err < 0)
1631 		return ERR_PTR(err);
1632 
1633 	rcu_read_lock();
1634 	list_for_each_entry_rcu(object, &object_list, object_list) {
1635 		if (n-- > 0)
1636 			continue;
1637 		if (get_object(object))
1638 			goto out;
1639 	}
1640 	object = NULL;
1641 out:
1642 	return object;
1643 }
1644 
1645 /*
1646  * Return the next object in the object_list. The function decrements the
1647  * use_count of the previous object and increases that of the next one.
1648  */
kmemleak_seq_next(struct seq_file * seq,void * v,loff_t * pos)1649 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1650 {
1651 	struct kmemleak_object *prev_obj = v;
1652 	struct kmemleak_object *next_obj = NULL;
1653 	struct kmemleak_object *obj = prev_obj;
1654 
1655 	++(*pos);
1656 
1657 	list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1658 		if (get_object(obj)) {
1659 			next_obj = obj;
1660 			break;
1661 		}
1662 	}
1663 
1664 	put_object(prev_obj);
1665 	return next_obj;
1666 }
1667 
1668 /*
1669  * Decrement the use_count of the last object required, if any.
1670  */
kmemleak_seq_stop(struct seq_file * seq,void * v)1671 static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1672 {
1673 	if (!IS_ERR(v)) {
1674 		/*
1675 		 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1676 		 * waiting was interrupted, so only release it if !IS_ERR.
1677 		 */
1678 		rcu_read_unlock();
1679 		mutex_unlock(&scan_mutex);
1680 		if (v)
1681 			put_object(v);
1682 	}
1683 }
1684 
1685 /*
1686  * Print the information for an unreferenced object to the seq file.
1687  */
kmemleak_seq_show(struct seq_file * seq,void * v)1688 static int kmemleak_seq_show(struct seq_file *seq, void *v)
1689 {
1690 	struct kmemleak_object *object = v;
1691 	unsigned long flags;
1692 
1693 	raw_spin_lock_irqsave(&object->lock, flags);
1694 	if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1695 		print_unreferenced(seq, object);
1696 	raw_spin_unlock_irqrestore(&object->lock, flags);
1697 	return 0;
1698 }
1699 
1700 static const struct seq_operations kmemleak_seq_ops = {
1701 	.start = kmemleak_seq_start,
1702 	.next  = kmemleak_seq_next,
1703 	.stop  = kmemleak_seq_stop,
1704 	.show  = kmemleak_seq_show,
1705 };
1706 
kmemleak_open(struct inode * inode,struct file * file)1707 static int kmemleak_open(struct inode *inode, struct file *file)
1708 {
1709 	return seq_open(file, &kmemleak_seq_ops);
1710 }
1711 
dump_str_object_info(const char * str)1712 static int dump_str_object_info(const char *str)
1713 {
1714 	unsigned long flags;
1715 	struct kmemleak_object *object;
1716 	unsigned long addr;
1717 
1718 	if (kstrtoul(str, 0, &addr))
1719 		return -EINVAL;
1720 	object = find_and_get_object(addr, 0);
1721 	if (!object) {
1722 		pr_info("Unknown object at 0x%08lx\n", addr);
1723 		return -EINVAL;
1724 	}
1725 
1726 	raw_spin_lock_irqsave(&object->lock, flags);
1727 	dump_object_info(object);
1728 	raw_spin_unlock_irqrestore(&object->lock, flags);
1729 
1730 	put_object(object);
1731 	return 0;
1732 }
1733 
1734 /*
1735  * We use grey instead of black to ensure we can do future scans on the same
1736  * objects. If we did not do future scans these black objects could
1737  * potentially contain references to newly allocated objects in the future and
1738  * we'd end up with false positives.
1739  */
kmemleak_clear(void)1740 static void kmemleak_clear(void)
1741 {
1742 	struct kmemleak_object *object;
1743 	unsigned long flags;
1744 
1745 	rcu_read_lock();
1746 	list_for_each_entry_rcu(object, &object_list, object_list) {
1747 		raw_spin_lock_irqsave(&object->lock, flags);
1748 		if ((object->flags & OBJECT_REPORTED) &&
1749 		    unreferenced_object(object))
1750 			__paint_it(object, KMEMLEAK_GREY);
1751 		raw_spin_unlock_irqrestore(&object->lock, flags);
1752 	}
1753 	rcu_read_unlock();
1754 
1755 	kmemleak_found_leaks = false;
1756 }
1757 
1758 static void __kmemleak_do_cleanup(void);
1759 
1760 /*
1761  * File write operation to configure kmemleak at run-time. The following
1762  * commands can be written to the /sys/kernel/debug/kmemleak file:
1763  *   off	- disable kmemleak (irreversible)
1764  *   stack=on	- enable the task stacks scanning
1765  *   stack=off	- disable the tasks stacks scanning
1766  *   scan=on	- start the automatic memory scanning thread
1767  *   scan=off	- stop the automatic memory scanning thread
1768  *   scan=...	- set the automatic memory scanning period in seconds (0 to
1769  *		  disable it)
1770  *   scan	- trigger a memory scan
1771  *   clear	- mark all current reported unreferenced kmemleak objects as
1772  *		  grey to ignore printing them, or free all kmemleak objects
1773  *		  if kmemleak has been disabled.
1774  *   dump=...	- dump information about the object found at the given address
1775  */
kmemleak_write(struct file * file,const char __user * user_buf,size_t size,loff_t * ppos)1776 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1777 			      size_t size, loff_t *ppos)
1778 {
1779 	char buf[64];
1780 	int buf_size;
1781 	int ret;
1782 
1783 	buf_size = min(size, (sizeof(buf) - 1));
1784 	if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1785 		return -EFAULT;
1786 	buf[buf_size] = 0;
1787 
1788 	ret = mutex_lock_interruptible(&scan_mutex);
1789 	if (ret < 0)
1790 		return ret;
1791 
1792 	if (strncmp(buf, "clear", 5) == 0) {
1793 		if (kmemleak_enabled)
1794 			kmemleak_clear();
1795 		else
1796 			__kmemleak_do_cleanup();
1797 		goto out;
1798 	}
1799 
1800 	if (!kmemleak_enabled) {
1801 		ret = -EPERM;
1802 		goto out;
1803 	}
1804 
1805 	if (strncmp(buf, "off", 3) == 0)
1806 		kmemleak_disable();
1807 	else if (strncmp(buf, "stack=on", 8) == 0)
1808 		kmemleak_stack_scan = 1;
1809 	else if (strncmp(buf, "stack=off", 9) == 0)
1810 		kmemleak_stack_scan = 0;
1811 	else if (strncmp(buf, "scan=on", 7) == 0)
1812 		start_scan_thread();
1813 	else if (strncmp(buf, "scan=off", 8) == 0)
1814 		stop_scan_thread();
1815 	else if (strncmp(buf, "scan=", 5) == 0) {
1816 		unsigned long secs;
1817 
1818 		ret = kstrtoul(buf + 5, 0, &secs);
1819 		if (ret < 0)
1820 			goto out;
1821 		stop_scan_thread();
1822 		if (secs) {
1823 			jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1824 			start_scan_thread();
1825 		}
1826 	} else if (strncmp(buf, "scan", 4) == 0)
1827 		kmemleak_scan();
1828 	else if (strncmp(buf, "dump=", 5) == 0)
1829 		ret = dump_str_object_info(buf + 5);
1830 	else
1831 		ret = -EINVAL;
1832 
1833 out:
1834 	mutex_unlock(&scan_mutex);
1835 	if (ret < 0)
1836 		return ret;
1837 
1838 	/* ignore the rest of the buffer, only one command at a time */
1839 	*ppos += size;
1840 	return size;
1841 }
1842 
1843 static const struct file_operations kmemleak_fops = {
1844 	.owner		= THIS_MODULE,
1845 	.open		= kmemleak_open,
1846 	.read		= seq_read,
1847 	.write		= kmemleak_write,
1848 	.llseek		= seq_lseek,
1849 	.release	= seq_release,
1850 };
1851 
__kmemleak_do_cleanup(void)1852 static void __kmemleak_do_cleanup(void)
1853 {
1854 	struct kmemleak_object *object, *tmp;
1855 
1856 	/*
1857 	 * Kmemleak has already been disabled, no need for RCU list traversal
1858 	 * or kmemleak_lock held.
1859 	 */
1860 	list_for_each_entry_safe(object, tmp, &object_list, object_list) {
1861 		__remove_object(object);
1862 		__delete_object(object);
1863 	}
1864 }
1865 
1866 /*
1867  * Stop the memory scanning thread and free the kmemleak internal objects if
1868  * no previous scan thread (otherwise, kmemleak may still have some useful
1869  * information on memory leaks).
1870  */
kmemleak_do_cleanup(struct work_struct * work)1871 static void kmemleak_do_cleanup(struct work_struct *work)
1872 {
1873 	stop_scan_thread();
1874 
1875 	mutex_lock(&scan_mutex);
1876 	/*
1877 	 * Once it is made sure that kmemleak_scan has stopped, it is safe to no
1878 	 * longer track object freeing. Ordering of the scan thread stopping and
1879 	 * the memory accesses below is guaranteed by the kthread_stop()
1880 	 * function.
1881 	 */
1882 	kmemleak_free_enabled = 0;
1883 	mutex_unlock(&scan_mutex);
1884 
1885 	if (!kmemleak_found_leaks)
1886 		__kmemleak_do_cleanup();
1887 	else
1888 		pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
1889 }
1890 
1891 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1892 
1893 /*
1894  * Disable kmemleak. No memory allocation/freeing will be traced once this
1895  * function is called. Disabling kmemleak is an irreversible operation.
1896  */
kmemleak_disable(void)1897 static void kmemleak_disable(void)
1898 {
1899 	/* atomically check whether it was already invoked */
1900 	if (cmpxchg(&kmemleak_error, 0, 1))
1901 		return;
1902 
1903 	/* stop any memory operation tracing */
1904 	kmemleak_enabled = 0;
1905 
1906 	/* check whether it is too early for a kernel thread */
1907 	if (kmemleak_initialized)
1908 		schedule_work(&cleanup_work);
1909 	else
1910 		kmemleak_free_enabled = 0;
1911 
1912 	pr_info("Kernel memory leak detector disabled\n");
1913 }
1914 
1915 /*
1916  * Allow boot-time kmemleak disabling (enabled by default).
1917  */
kmemleak_boot_config(char * str)1918 static int __init kmemleak_boot_config(char *str)
1919 {
1920 	if (!str)
1921 		return -EINVAL;
1922 	if (strcmp(str, "off") == 0)
1923 		kmemleak_disable();
1924 	else if (strcmp(str, "on") == 0)
1925 		kmemleak_skip_disable = 1;
1926 	else
1927 		return -EINVAL;
1928 	return 0;
1929 }
1930 early_param("kmemleak", kmemleak_boot_config);
1931 
1932 /*
1933  * Kmemleak initialization.
1934  */
kmemleak_init(void)1935 void __init kmemleak_init(void)
1936 {
1937 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1938 	if (!kmemleak_skip_disable) {
1939 		kmemleak_disable();
1940 		return;
1941 	}
1942 #endif
1943 
1944 	if (kmemleak_error)
1945 		return;
1946 
1947 	jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1948 	jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1949 
1950 	object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1951 	scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1952 
1953 	/* register the data/bss sections */
1954 	create_object((unsigned long)_sdata, _edata - _sdata,
1955 		      KMEMLEAK_GREY, GFP_ATOMIC);
1956 	create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
1957 		      KMEMLEAK_GREY, GFP_ATOMIC);
1958 	/* only register .data..ro_after_init if not within .data */
1959 	if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
1960 		create_object((unsigned long)__start_ro_after_init,
1961 			      __end_ro_after_init - __start_ro_after_init,
1962 			      KMEMLEAK_GREY, GFP_ATOMIC);
1963 }
1964 
1965 /*
1966  * Late initialization function.
1967  */
kmemleak_late_init(void)1968 static int __init kmemleak_late_init(void)
1969 {
1970 	kmemleak_initialized = 1;
1971 
1972 	debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
1973 
1974 	if (kmemleak_error) {
1975 		/*
1976 		 * Some error occurred and kmemleak was disabled. There is a
1977 		 * small chance that kmemleak_disable() was called immediately
1978 		 * after setting kmemleak_initialized and we may end up with
1979 		 * two clean-up threads but serialized by scan_mutex.
1980 		 */
1981 		schedule_work(&cleanup_work);
1982 		return -ENOMEM;
1983 	}
1984 
1985 	if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
1986 		mutex_lock(&scan_mutex);
1987 		start_scan_thread();
1988 		mutex_unlock(&scan_mutex);
1989 	}
1990 
1991 	pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
1992 		mem_pool_free_count);
1993 
1994 	return 0;
1995 }
1996 late_initcall(kmemleak_late_init);
1997