• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mm/kmemleak.c
4  *
5  * Copyright (C) 2008 ARM Limited
6  * Written by Catalin Marinas <catalin.marinas@arm.com>
7  *
8  * For more information on the algorithm and kmemleak usage, please see
9  * Documentation/dev-tools/kmemleak.rst.
10  *
11  * Notes on locking
12  * ----------------
13  *
14  * The following locks and mutexes are used by kmemleak:
15  *
16  * - kmemleak_lock (raw_spinlock_t): protects the object_list as well as
17  *   del_state modifications and accesses to the object trees
18  *   (object_tree_root, object_phys_tree_root, object_percpu_tree_root). The
19  *   object_list is the main list holding the metadata (struct
20  *   kmemleak_object) for the allocated memory blocks. The object trees are
21  *   red black trees used to look-up metadata based on a pointer to the
22  *   corresponding memory block. The kmemleak_object structures are added to
23  *   the object_list and the object tree root in the create_object() function
24  *   called from the kmemleak_alloc{,_phys,_percpu}() callback and removed in
25  *   delete_object() called from the kmemleak_free{,_phys,_percpu}() callback
26  * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
27  *   Accesses to the metadata (e.g. count) are protected by this lock. Note
28  *   that some members of this structure may be protected by other means
29  *   (atomic or kmemleak_lock). This lock is also held when scanning the
30  *   corresponding memory block to avoid the kernel freeing it via the
31  *   kmemleak_free() callback. This is less heavyweight than holding a global
32  *   lock like kmemleak_lock during scanning.
33  * - scan_mutex (mutex): ensures that only one thread may scan the memory for
34  *   unreferenced objects at a time. The gray_list contains the objects which
35  *   are already referenced or marked as false positives and need to be
36  *   scanned. This list is only modified during a scanning episode when the
37  *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
38  *   Note that the kmemleak_object.use_count is incremented when an object is
39  *   added to the gray_list and therefore cannot be freed. This mutex also
40  *   prevents multiple users of the "kmemleak" debugfs file together with
41  *   modifications to the memory scanning parameters including the scan_thread
42  *   pointer
43  *
44  * Locks and mutexes are acquired/nested in the following order:
45  *
46  *   scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
47  *
48  * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
49  * regions.
50  *
51  * The kmemleak_object structures have a use_count incremented or decremented
52  * using the get_object()/put_object() functions. When the use_count becomes
53  * 0, this count can no longer be incremented and put_object() schedules the
54  * kmemleak_object freeing via an RCU callback. All calls to the get_object()
55  * function must be protected by rcu_read_lock() to avoid accessing a freed
56  * structure.
57  */
58 
59 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
60 
61 #include <linux/init.h>
62 #include <linux/kernel.h>
63 #include <linux/list.h>
64 #include <linux/sched/signal.h>
65 #include <linux/sched/task.h>
66 #include <linux/sched/task_stack.h>
67 #include <linux/jiffies.h>
68 #include <linux/delay.h>
69 #include <linux/export.h>
70 #include <linux/kthread.h>
71 #include <linux/rbtree.h>
72 #include <linux/fs.h>
73 #include <linux/debugfs.h>
74 #include <linux/seq_file.h>
75 #include <linux/cpumask.h>
76 #include <linux/spinlock.h>
77 #include <linux/module.h>
78 #include <linux/mutex.h>
79 #include <linux/rcupdate.h>
80 #include <linux/stacktrace.h>
81 #include <linux/stackdepot.h>
82 #include <linux/cache.h>
83 #include <linux/percpu.h>
84 #include <linux/memblock.h>
85 #include <linux/pfn.h>
86 #include <linux/mmzone.h>
87 #include <linux/slab.h>
88 #include <linux/thread_info.h>
89 #include <linux/err.h>
90 #include <linux/uaccess.h>
91 #include <linux/string.h>
92 #include <linux/nodemask.h>
93 #include <linux/mm.h>
94 #include <linux/workqueue.h>
95 #include <linux/crc32.h>
96 
97 #include <asm/sections.h>
98 #include <asm/processor.h>
99 #include <linux/atomic.h>
100 
101 #include <linux/kasan.h>
102 #include <linux/kfence.h>
103 #include <linux/kmemleak.h>
104 #include <linux/memory_hotplug.h>
105 
106 /*
107  * Kmemleak configuration and common defines.
108  */
109 #define MAX_TRACE		16	/* stack trace length */
110 #define MSECS_MIN_AGE		5000	/* minimum object age for reporting */
111 #define SECS_FIRST_SCAN		60	/* delay before the first scan */
112 #define SECS_SCAN_WAIT		600	/* subsequent auto scanning delay */
113 #define MAX_SCAN_SIZE		4096	/* maximum size of a scanned block */
114 
115 #define BYTES_PER_POINTER	sizeof(void *)
116 
117 /* scanning area inside a memory block */
118 struct kmemleak_scan_area {
119 	struct hlist_node node;
120 	unsigned long start;
121 	size_t size;
122 };
123 
124 #define KMEMLEAK_GREY	0
125 #define KMEMLEAK_BLACK	-1
126 
127 /*
128  * Structure holding the metadata for each allocated memory block.
129  * Modifications to such objects should be made while holding the
130  * object->lock. Insertions or deletions from object_list, gray_list or
131  * rb_node are already protected by the corresponding locks or mutex (see
132  * the notes on locking above). These objects are reference-counted
133  * (use_count) and freed using the RCU mechanism.
134  */
135 struct kmemleak_object {
136 	raw_spinlock_t lock;
137 	unsigned int flags;		/* object status flags */
138 	struct list_head object_list;
139 	struct list_head gray_list;
140 	struct rb_node rb_node;
141 	struct rcu_head rcu;		/* object_list lockless traversal */
142 	/* object usage count; object freed when use_count == 0 */
143 	atomic_t use_count;
144 	unsigned int del_state;		/* deletion state */
145 	unsigned long pointer;
146 	size_t size;
147 	/* pass surplus references to this pointer */
148 	unsigned long excess_ref;
149 	/* minimum number of a pointers found before it is considered leak */
150 	int min_count;
151 	/* the total number of pointers found pointing to this object */
152 	int count;
153 	/* checksum for detecting modified objects */
154 	u32 checksum;
155 	depot_stack_handle_t trace_handle;
156 	/* memory ranges to be scanned inside an object (empty for all) */
157 	struct hlist_head area_list;
158 	unsigned long jiffies;		/* creation timestamp */
159 	pid_t pid;			/* pid of the current task */
160 	char comm[TASK_COMM_LEN];	/* executable name */
161 };
162 
163 /* flag representing the memory block allocation status */
164 #define OBJECT_ALLOCATED	(1 << 0)
165 /* flag set after the first reporting of an unreference object */
166 #define OBJECT_REPORTED		(1 << 1)
167 /* flag set to not scan the object */
168 #define OBJECT_NO_SCAN		(1 << 2)
169 /* flag set to fully scan the object when scan_area allocation failed */
170 #define OBJECT_FULL_SCAN	(1 << 3)
171 /* flag set for object allocated with physical address */
172 #define OBJECT_PHYS		(1 << 4)
173 /* flag set for per-CPU pointers */
174 #define OBJECT_PERCPU		(1 << 5)
175 
176 /* set when __remove_object() called */
177 #define DELSTATE_REMOVED	(1 << 0)
178 /* set to temporarily prevent deletion from object_list */
179 #define DELSTATE_NO_DELETE	(1 << 1)
180 
181 #define HEX_PREFIX		"    "
182 /* number of bytes to print per line; must be 16 or 32 */
183 #define HEX_ROW_SIZE		16
184 /* number of bytes to print at a time (1, 2, 4, 8) */
185 #define HEX_GROUP_SIZE		1
186 /* include ASCII after the hex output */
187 #define HEX_ASCII		1
188 /* max number of lines to be printed */
189 #define HEX_MAX_LINES		2
190 
191 /* the list of all allocated objects */
192 static LIST_HEAD(object_list);
193 /* the list of gray-colored objects (see color_gray comment below) */
194 static LIST_HEAD(gray_list);
195 /* memory pool allocation */
196 static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
197 static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
198 static LIST_HEAD(mem_pool_free_list);
199 /* search tree for object boundaries */
200 static struct rb_root object_tree_root = RB_ROOT;
201 /* search tree for object (with OBJECT_PHYS flag) boundaries */
202 static struct rb_root object_phys_tree_root = RB_ROOT;
203 /* search tree for object (with OBJECT_PERCPU flag) boundaries */
204 static struct rb_root object_percpu_tree_root = RB_ROOT;
205 /* protecting the access to object_list, object_tree_root (or object_phys_tree_root) */
206 static DEFINE_RAW_SPINLOCK(kmemleak_lock);
207 
208 /* allocation caches for kmemleak internal data */
209 static struct kmem_cache *object_cache;
210 static struct kmem_cache *scan_area_cache;
211 
212 /* set if tracing memory operations is enabled */
213 static int kmemleak_enabled = 1;
214 /* same as above but only for the kmemleak_free() callback */
215 static int kmemleak_free_enabled = 1;
216 /* set in the late_initcall if there were no errors */
217 static int kmemleak_late_initialized;
218 /* set if a kmemleak warning was issued */
219 static int kmemleak_warning;
220 /* set if a fatal kmemleak error has occurred */
221 static int kmemleak_error;
222 
223 /* minimum and maximum address that may be valid pointers */
224 static unsigned long min_addr = ULONG_MAX;
225 static unsigned long max_addr;
226 
227 /* minimum and maximum address that may be valid per-CPU pointers */
228 static unsigned long min_percpu_addr = ULONG_MAX;
229 static unsigned long max_percpu_addr;
230 
231 static struct task_struct *scan_thread;
232 /* used to avoid reporting of recently allocated objects */
233 static unsigned long jiffies_min_age;
234 static unsigned long jiffies_last_scan;
235 /* delay between automatic memory scannings */
236 static unsigned long jiffies_scan_wait;
237 /* enables or disables the task stacks scanning */
238 static int kmemleak_stack_scan = 1;
239 /* protects the memory scanning, parameters and debug/kmemleak file access */
240 static DEFINE_MUTEX(scan_mutex);
241 /* setting kmemleak=on, will set this var, skipping the disable */
242 static int kmemleak_skip_disable;
243 /* If there are leaks that can be reported */
244 static bool kmemleak_found_leaks;
245 
246 static bool kmemleak_verbose;
247 module_param_named(verbose, kmemleak_verbose, bool, 0600);
248 
249 static void kmemleak_disable(void);
250 
251 /*
252  * Print a warning and dump the stack trace.
253  */
254 #define kmemleak_warn(x...)	do {		\
255 	pr_warn(x);				\
256 	dump_stack();				\
257 	kmemleak_warning = 1;			\
258 } while (0)
259 
260 /*
261  * Macro invoked when a serious kmemleak condition occurred and cannot be
262  * recovered from. Kmemleak will be disabled and further allocation/freeing
263  * tracing no longer available.
264  */
265 #define kmemleak_stop(x...)	do {	\
266 	kmemleak_warn(x);		\
267 	kmemleak_disable();		\
268 } while (0)
269 
270 #define warn_or_seq_printf(seq, fmt, ...)	do {	\
271 	if (seq)					\
272 		seq_printf(seq, fmt, ##__VA_ARGS__);	\
273 	else						\
274 		pr_warn(fmt, ##__VA_ARGS__);		\
275 } while (0)
276 
warn_or_seq_hex_dump(struct seq_file * seq,int prefix_type,int rowsize,int groupsize,const void * buf,size_t len,bool ascii)277 static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
278 				 int rowsize, int groupsize, const void *buf,
279 				 size_t len, bool ascii)
280 {
281 	if (seq)
282 		seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
283 			     buf, len, ascii);
284 	else
285 		print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
286 			       rowsize, groupsize, buf, len, ascii);
287 }
288 
289 /*
290  * Printing of the objects hex dump to the seq file. The number of lines to be
291  * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
292  * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
293  * with the object->lock held.
294  */
hex_dump_object(struct seq_file * seq,struct kmemleak_object * object)295 static void hex_dump_object(struct seq_file *seq,
296 			    struct kmemleak_object *object)
297 {
298 	const u8 *ptr = (const u8 *)object->pointer;
299 	size_t len;
300 
301 	if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
302 		return;
303 
304 	if (object->flags & OBJECT_PERCPU)
305 		ptr = (const u8 *)this_cpu_ptr((void __percpu *)object->pointer);
306 
307 	/* limit the number of lines to HEX_MAX_LINES */
308 	len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
309 
310 	if (object->flags & OBJECT_PERCPU)
311 		warn_or_seq_printf(seq, "  hex dump (first %zu bytes on cpu %d):\n",
312 				   len, raw_smp_processor_id());
313 	else
314 		warn_or_seq_printf(seq, "  hex dump (first %zu bytes):\n", len);
315 	kasan_disable_current();
316 	warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
317 			     HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
318 	kasan_enable_current();
319 }
320 
321 /*
322  * Object colors, encoded with count and min_count:
323  * - white - orphan object, not enough references to it (count < min_count)
324  * - gray  - not orphan, not marked as false positive (min_count == 0) or
325  *		sufficient references to it (count >= min_count)
326  * - black - ignore, it doesn't contain references (e.g. text section)
327  *		(min_count == -1). No function defined for this color.
328  * Newly created objects don't have any color assigned (object->count == -1)
329  * before the next memory scan when they become white.
330  */
color_white(const struct kmemleak_object * object)331 static bool color_white(const struct kmemleak_object *object)
332 {
333 	return object->count != KMEMLEAK_BLACK &&
334 		object->count < object->min_count;
335 }
336 
color_gray(const struct kmemleak_object * object)337 static bool color_gray(const struct kmemleak_object *object)
338 {
339 	return object->min_count != KMEMLEAK_BLACK &&
340 		object->count >= object->min_count;
341 }
342 
343 /*
344  * Objects are considered unreferenced only if their color is white, they have
345  * not be deleted and have a minimum age to avoid false positives caused by
346  * pointers temporarily stored in CPU registers.
347  */
unreferenced_object(struct kmemleak_object * object)348 static bool unreferenced_object(struct kmemleak_object *object)
349 {
350 	return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
351 		time_before_eq(object->jiffies + jiffies_min_age,
352 			       jiffies_last_scan);
353 }
354 
355 /*
356  * Printing of the unreferenced objects information to the seq file. The
357  * print_unreferenced function must be called with the object->lock held.
358  */
print_unreferenced(struct seq_file * seq,struct kmemleak_object * object)359 static void print_unreferenced(struct seq_file *seq,
360 			       struct kmemleak_object *object)
361 {
362 	int i;
363 	unsigned long *entries;
364 	unsigned int nr_entries;
365 
366 	nr_entries = stack_depot_fetch(object->trace_handle, &entries);
367 	warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
368 			  object->pointer, object->size);
369 	warn_or_seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu\n",
370 			   object->comm, object->pid, object->jiffies);
371 	hex_dump_object(seq, object);
372 	warn_or_seq_printf(seq, "  backtrace (crc %x):\n", object->checksum);
373 
374 	for (i = 0; i < nr_entries; i++) {
375 		void *ptr = (void *)entries[i];
376 		warn_or_seq_printf(seq, "    %pS\n", ptr);
377 	}
378 }
379 
380 /*
381  * Print the kmemleak_object information. This function is used mainly for
382  * debugging special cases when kmemleak operations. It must be called with
383  * the object->lock held.
384  */
dump_object_info(struct kmemleak_object * object)385 static void dump_object_info(struct kmemleak_object *object)
386 {
387 	pr_notice("Object 0x%08lx (size %zu):\n",
388 			object->pointer, object->size);
389 	pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
390 			object->comm, object->pid, object->jiffies);
391 	pr_notice("  min_count = %d\n", object->min_count);
392 	pr_notice("  count = %d\n", object->count);
393 	pr_notice("  flags = 0x%x\n", object->flags);
394 	pr_notice("  checksum = %u\n", object->checksum);
395 	pr_notice("  backtrace:\n");
396 	if (object->trace_handle)
397 		stack_depot_print(object->trace_handle);
398 }
399 
object_tree(unsigned long objflags)400 static struct rb_root *object_tree(unsigned long objflags)
401 {
402 	if (objflags & OBJECT_PHYS)
403 		return &object_phys_tree_root;
404 	if (objflags & OBJECT_PERCPU)
405 		return &object_percpu_tree_root;
406 	return &object_tree_root;
407 }
408 
409 /*
410  * Look-up a memory block metadata (kmemleak_object) in the object search
411  * tree based on a pointer value. If alias is 0, only values pointing to the
412  * beginning of the memory block are allowed. The kmemleak_lock must be held
413  * when calling this function.
414  */
__lookup_object(unsigned long ptr,int alias,unsigned int objflags)415 static struct kmemleak_object *__lookup_object(unsigned long ptr, int alias,
416 					       unsigned int objflags)
417 {
418 	struct rb_node *rb = object_tree(objflags)->rb_node;
419 	unsigned long untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
420 
421 	while (rb) {
422 		struct kmemleak_object *object;
423 		unsigned long untagged_objp;
424 
425 		object = rb_entry(rb, struct kmemleak_object, rb_node);
426 		untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
427 
428 		if (untagged_ptr < untagged_objp)
429 			rb = object->rb_node.rb_left;
430 		else if (untagged_objp + object->size <= untagged_ptr)
431 			rb = object->rb_node.rb_right;
432 		else if (untagged_objp == untagged_ptr || alias)
433 			return object;
434 		else {
435 			/*
436 			 * Printk deferring due to the kmemleak_lock held.
437 			 * This is done to avoid deadlock.
438 			 */
439 			printk_deferred_enter();
440 			kmemleak_warn("Found object by alias at 0x%08lx\n",
441 				      ptr);
442 			dump_object_info(object);
443 			printk_deferred_exit();
444 			break;
445 		}
446 	}
447 	return NULL;
448 }
449 
450 /* Look-up a kmemleak object which allocated with virtual address. */
lookup_object(unsigned long ptr,int alias)451 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
452 {
453 	return __lookup_object(ptr, alias, 0);
454 }
455 
456 /*
457  * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
458  * that once an object's use_count reached 0, the RCU freeing was already
459  * registered and the object should no longer be used. This function must be
460  * called under the protection of rcu_read_lock().
461  */
get_object(struct kmemleak_object * object)462 static int get_object(struct kmemleak_object *object)
463 {
464 	return atomic_inc_not_zero(&object->use_count);
465 }
466 
467 /*
468  * Memory pool allocation and freeing. kmemleak_lock must not be held.
469  */
mem_pool_alloc(gfp_t gfp)470 static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
471 {
472 	unsigned long flags;
473 	struct kmemleak_object *object;
474 	bool warn = false;
475 
476 	/* try the slab allocator first */
477 	if (object_cache) {
478 		object = kmem_cache_alloc_noprof(object_cache,
479 						 gfp_nested_mask(gfp));
480 		if (object)
481 			return object;
482 	}
483 
484 	/* slab allocation failed, try the memory pool */
485 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
486 	object = list_first_entry_or_null(&mem_pool_free_list,
487 					  typeof(*object), object_list);
488 	if (object)
489 		list_del(&object->object_list);
490 	else if (mem_pool_free_count)
491 		object = &mem_pool[--mem_pool_free_count];
492 	else
493 		warn = true;
494 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
495 	if (warn)
496 		pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
497 
498 	return object;
499 }
500 
501 /*
502  * Return the object to either the slab allocator or the memory pool.
503  */
mem_pool_free(struct kmemleak_object * object)504 static void mem_pool_free(struct kmemleak_object *object)
505 {
506 	unsigned long flags;
507 
508 	if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
509 		kmem_cache_free(object_cache, object);
510 		return;
511 	}
512 
513 	/* add the object to the memory pool free list */
514 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
515 	list_add(&object->object_list, &mem_pool_free_list);
516 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
517 }
518 
519 /*
520  * RCU callback to free a kmemleak_object.
521  */
free_object_rcu(struct rcu_head * rcu)522 static void free_object_rcu(struct rcu_head *rcu)
523 {
524 	struct hlist_node *tmp;
525 	struct kmemleak_scan_area *area;
526 	struct kmemleak_object *object =
527 		container_of(rcu, struct kmemleak_object, rcu);
528 
529 	/*
530 	 * Once use_count is 0 (guaranteed by put_object), there is no other
531 	 * code accessing this object, hence no need for locking.
532 	 */
533 	hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
534 		hlist_del(&area->node);
535 		kmem_cache_free(scan_area_cache, area);
536 	}
537 	mem_pool_free(object);
538 }
539 
540 /*
541  * Decrement the object use_count. Once the count is 0, free the object using
542  * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
543  * delete_object() path, the delayed RCU freeing ensures that there is no
544  * recursive call to the kernel allocator. Lock-less RCU object_list traversal
545  * is also possible.
546  */
put_object(struct kmemleak_object * object)547 static void put_object(struct kmemleak_object *object)
548 {
549 	if (!atomic_dec_and_test(&object->use_count))
550 		return;
551 
552 	/* should only get here after delete_object was called */
553 	WARN_ON(object->flags & OBJECT_ALLOCATED);
554 
555 	/*
556 	 * It may be too early for the RCU callbacks, however, there is no
557 	 * concurrent object_list traversal when !object_cache and all objects
558 	 * came from the memory pool. Free the object directly.
559 	 */
560 	if (object_cache)
561 		call_rcu(&object->rcu, free_object_rcu);
562 	else
563 		free_object_rcu(&object->rcu);
564 }
565 
566 /*
567  * Look up an object in the object search tree and increase its use_count.
568  */
__find_and_get_object(unsigned long ptr,int alias,unsigned int objflags)569 static struct kmemleak_object *__find_and_get_object(unsigned long ptr, int alias,
570 						     unsigned int objflags)
571 {
572 	unsigned long flags;
573 	struct kmemleak_object *object;
574 
575 	rcu_read_lock();
576 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
577 	object = __lookup_object(ptr, alias, objflags);
578 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
579 
580 	/* check whether the object is still available */
581 	if (object && !get_object(object))
582 		object = NULL;
583 	rcu_read_unlock();
584 
585 	return object;
586 }
587 
588 /* Look up and get an object which allocated with virtual address. */
find_and_get_object(unsigned long ptr,int alias)589 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
590 {
591 	return __find_and_get_object(ptr, alias, 0);
592 }
593 
594 /*
595  * Remove an object from its object tree and object_list. Must be called with
596  * the kmemleak_lock held _if_ kmemleak is still enabled.
597  */
__remove_object(struct kmemleak_object * object)598 static void __remove_object(struct kmemleak_object *object)
599 {
600 	rb_erase(&object->rb_node, object_tree(object->flags));
601 	if (!(object->del_state & DELSTATE_NO_DELETE))
602 		list_del_rcu(&object->object_list);
603 	object->del_state |= DELSTATE_REMOVED;
604 }
605 
__find_and_remove_object(unsigned long ptr,int alias,unsigned int objflags)606 static struct kmemleak_object *__find_and_remove_object(unsigned long ptr,
607 							int alias,
608 							unsigned int objflags)
609 {
610 	struct kmemleak_object *object;
611 
612 	object = __lookup_object(ptr, alias, objflags);
613 	if (object)
614 		__remove_object(object);
615 
616 	return object;
617 }
618 
619 /*
620  * Look up an object in the object search tree and remove it from both object
621  * tree root and object_list. The returned object's use_count should be at
622  * least 1, as initially set by create_object().
623  */
find_and_remove_object(unsigned long ptr,int alias,unsigned int objflags)624 static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias,
625 						      unsigned int objflags)
626 {
627 	unsigned long flags;
628 	struct kmemleak_object *object;
629 
630 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
631 	object = __find_and_remove_object(ptr, alias, objflags);
632 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
633 
634 	return object;
635 }
636 
set_track_prepare(void)637 static noinline depot_stack_handle_t set_track_prepare(void)
638 {
639 	depot_stack_handle_t trace_handle;
640 	unsigned long entries[MAX_TRACE];
641 	unsigned int nr_entries;
642 
643 	/*
644 	 * Use object_cache to determine whether kmemleak_init() has
645 	 * been invoked. stack_depot_early_init() is called before
646 	 * kmemleak_init() in mm_core_init().
647 	 */
648 	if (!object_cache)
649 		return 0;
650 	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
651 	trace_handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
652 
653 	return trace_handle;
654 }
655 
__alloc_object(gfp_t gfp)656 static struct kmemleak_object *__alloc_object(gfp_t gfp)
657 {
658 	struct kmemleak_object *object;
659 
660 	object = mem_pool_alloc(gfp);
661 	if (!object) {
662 		pr_warn("Cannot allocate a kmemleak_object structure\n");
663 		kmemleak_disable();
664 		return NULL;
665 	}
666 
667 	INIT_LIST_HEAD(&object->object_list);
668 	INIT_LIST_HEAD(&object->gray_list);
669 	INIT_HLIST_HEAD(&object->area_list);
670 	raw_spin_lock_init(&object->lock);
671 	atomic_set(&object->use_count, 1);
672 	object->excess_ref = 0;
673 	object->count = 0;			/* white color initially */
674 	object->checksum = 0;
675 	object->del_state = 0;
676 
677 	/* task information */
678 	if (in_hardirq()) {
679 		object->pid = 0;
680 		strscpy(object->comm, "hardirq");
681 	} else if (in_serving_softirq()) {
682 		object->pid = 0;
683 		strscpy(object->comm, "softirq");
684 	} else {
685 		object->pid = current->pid;
686 		/*
687 		 * There is a small chance of a race with set_task_comm(),
688 		 * however using get_task_comm() here may cause locking
689 		 * dependency issues with current->alloc_lock. In the worst
690 		 * case, the command line is not correct.
691 		 */
692 		strscpy(object->comm, current->comm);
693 	}
694 
695 	/* kernel backtrace */
696 	object->trace_handle = set_track_prepare();
697 
698 	return object;
699 }
700 
__link_object(struct kmemleak_object * object,unsigned long ptr,size_t size,int min_count,unsigned int objflags)701 static int __link_object(struct kmemleak_object *object, unsigned long ptr,
702 			 size_t size, int min_count, unsigned int objflags)
703 {
704 
705 	struct kmemleak_object *parent;
706 	struct rb_node **link, *rb_parent;
707 	unsigned long untagged_ptr;
708 	unsigned long untagged_objp;
709 
710 	object->flags = OBJECT_ALLOCATED | objflags;
711 	object->pointer = ptr;
712 	object->size = kfence_ksize((void *)ptr) ?: size;
713 	object->min_count = min_count;
714 	object->jiffies = jiffies;
715 
716 	untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
717 	/*
718 	 * Only update min_addr and max_addr with object storing virtual
719 	 * address. And update min_percpu_addr max_percpu_addr for per-CPU
720 	 * objects.
721 	 */
722 	if (objflags & OBJECT_PERCPU) {
723 		min_percpu_addr = min(min_percpu_addr, untagged_ptr);
724 		max_percpu_addr = max(max_percpu_addr, untagged_ptr + size);
725 	} else if (!(objflags & OBJECT_PHYS)) {
726 		min_addr = min(min_addr, untagged_ptr);
727 		max_addr = max(max_addr, untagged_ptr + size);
728 	}
729 	link = &object_tree(objflags)->rb_node;
730 	rb_parent = NULL;
731 	while (*link) {
732 		rb_parent = *link;
733 		parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
734 		untagged_objp = (unsigned long)kasan_reset_tag((void *)parent->pointer);
735 		if (untagged_ptr + size <= untagged_objp)
736 			link = &parent->rb_node.rb_left;
737 		else if (untagged_objp + parent->size <= untagged_ptr)
738 			link = &parent->rb_node.rb_right;
739 		else {
740 			/*
741 			 * Printk deferring due to the kmemleak_lock held.
742 			 * This is done to avoid deadlock.
743 			 */
744 			printk_deferred_enter();
745 			kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
746 				      ptr);
747 			/*
748 			 * No need for parent->lock here since "parent" cannot
749 			 * be freed while the kmemleak_lock is held.
750 			 */
751 			dump_object_info(parent);
752 			printk_deferred_exit();
753 			return -EEXIST;
754 		}
755 	}
756 	rb_link_node(&object->rb_node, rb_parent, link);
757 	rb_insert_color(&object->rb_node, object_tree(objflags));
758 	list_add_tail_rcu(&object->object_list, &object_list);
759 
760 	return 0;
761 }
762 
763 /*
764  * Create the metadata (struct kmemleak_object) corresponding to an allocated
765  * memory block and add it to the object_list and object tree.
766  */
__create_object(unsigned long ptr,size_t size,int min_count,gfp_t gfp,unsigned int objflags)767 static void __create_object(unsigned long ptr, size_t size,
768 				int min_count, gfp_t gfp, unsigned int objflags)
769 {
770 	struct kmemleak_object *object;
771 	unsigned long flags;
772 	int ret;
773 
774 	object = __alloc_object(gfp);
775 	if (!object)
776 		return;
777 
778 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
779 	ret = __link_object(object, ptr, size, min_count, objflags);
780 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
781 	if (ret)
782 		mem_pool_free(object);
783 }
784 
785 /* Create kmemleak object which allocated with virtual address. */
create_object(unsigned long ptr,size_t size,int min_count,gfp_t gfp)786 static void create_object(unsigned long ptr, size_t size,
787 			  int min_count, gfp_t gfp)
788 {
789 	__create_object(ptr, size, min_count, gfp, 0);
790 }
791 
792 /* Create kmemleak object which allocated with physical address. */
create_object_phys(unsigned long ptr,size_t size,int min_count,gfp_t gfp)793 static void create_object_phys(unsigned long ptr, size_t size,
794 			       int min_count, gfp_t gfp)
795 {
796 	__create_object(ptr, size, min_count, gfp, OBJECT_PHYS);
797 }
798 
799 /* Create kmemleak object corresponding to a per-CPU allocation. */
create_object_percpu(unsigned long ptr,size_t size,int min_count,gfp_t gfp)800 static void create_object_percpu(unsigned long ptr, size_t size,
801 				 int min_count, gfp_t gfp)
802 {
803 	__create_object(ptr, size, min_count, gfp, OBJECT_PERCPU);
804 }
805 
806 /*
807  * Mark the object as not allocated and schedule RCU freeing via put_object().
808  */
__delete_object(struct kmemleak_object * object)809 static void __delete_object(struct kmemleak_object *object)
810 {
811 	unsigned long flags;
812 
813 	WARN_ON(!(object->flags & OBJECT_ALLOCATED));
814 	WARN_ON(atomic_read(&object->use_count) < 1);
815 
816 	/*
817 	 * Locking here also ensures that the corresponding memory block
818 	 * cannot be freed when it is being scanned.
819 	 */
820 	raw_spin_lock_irqsave(&object->lock, flags);
821 	object->flags &= ~OBJECT_ALLOCATED;
822 	raw_spin_unlock_irqrestore(&object->lock, flags);
823 	put_object(object);
824 }
825 
826 /*
827  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
828  * delete it.
829  */
delete_object_full(unsigned long ptr,unsigned int objflags)830 static void delete_object_full(unsigned long ptr, unsigned int objflags)
831 {
832 	struct kmemleak_object *object;
833 
834 	object = find_and_remove_object(ptr, 0, objflags);
835 	if (!object) {
836 #ifdef DEBUG
837 		kmemleak_warn("Freeing unknown object at 0x%08lx\n",
838 			      ptr);
839 #endif
840 		return;
841 	}
842 	__delete_object(object);
843 }
844 
845 /*
846  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
847  * delete it. If the memory block is partially freed, the function may create
848  * additional metadata for the remaining parts of the block.
849  */
delete_object_part(unsigned long ptr,size_t size,unsigned int objflags)850 static void delete_object_part(unsigned long ptr, size_t size,
851 			       unsigned int objflags)
852 {
853 	struct kmemleak_object *object, *object_l, *object_r;
854 	unsigned long start, end, flags;
855 
856 	object_l = __alloc_object(GFP_KERNEL);
857 	if (!object_l)
858 		return;
859 
860 	object_r = __alloc_object(GFP_KERNEL);
861 	if (!object_r)
862 		goto out;
863 
864 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
865 	object = __find_and_remove_object(ptr, 1, objflags);
866 	if (!object)
867 		goto unlock;
868 
869 	/*
870 	 * Create one or two objects that may result from the memory block
871 	 * split. Note that partial freeing is only done by free_bootmem() and
872 	 * this happens before kmemleak_init() is called.
873 	 */
874 	start = object->pointer;
875 	end = object->pointer + object->size;
876 	if ((ptr > start) &&
877 	    !__link_object(object_l, start, ptr - start,
878 			   object->min_count, objflags))
879 		object_l = NULL;
880 	if ((ptr + size < end) &&
881 	    !__link_object(object_r, ptr + size, end - ptr - size,
882 			   object->min_count, objflags))
883 		object_r = NULL;
884 
885 unlock:
886 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
887 	if (object) {
888 		__delete_object(object);
889 	} else {
890 #ifdef DEBUG
891 		kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
892 			      ptr, size);
893 #endif
894 	}
895 
896 out:
897 	if (object_l)
898 		mem_pool_free(object_l);
899 	if (object_r)
900 		mem_pool_free(object_r);
901 }
902 
__paint_it(struct kmemleak_object * object,int color)903 static void __paint_it(struct kmemleak_object *object, int color)
904 {
905 	object->min_count = color;
906 	if (color == KMEMLEAK_BLACK)
907 		object->flags |= OBJECT_NO_SCAN;
908 }
909 
paint_it(struct kmemleak_object * object,int color)910 static void paint_it(struct kmemleak_object *object, int color)
911 {
912 	unsigned long flags;
913 
914 	raw_spin_lock_irqsave(&object->lock, flags);
915 	__paint_it(object, color);
916 	raw_spin_unlock_irqrestore(&object->lock, flags);
917 }
918 
paint_ptr(unsigned long ptr,int color,unsigned int objflags)919 static void paint_ptr(unsigned long ptr, int color, unsigned int objflags)
920 {
921 	struct kmemleak_object *object;
922 
923 	object = __find_and_get_object(ptr, 0, objflags);
924 	if (!object) {
925 		kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
926 			      ptr,
927 			      (color == KMEMLEAK_GREY) ? "Grey" :
928 			      (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
929 		return;
930 	}
931 	paint_it(object, color);
932 	put_object(object);
933 }
934 
935 /*
936  * Mark an object permanently as gray-colored so that it can no longer be
937  * reported as a leak. This is used in general to mark a false positive.
938  */
make_gray_object(unsigned long ptr)939 static void make_gray_object(unsigned long ptr)
940 {
941 	paint_ptr(ptr, KMEMLEAK_GREY, 0);
942 }
943 
944 /*
945  * Mark the object as black-colored so that it is ignored from scans and
946  * reporting.
947  */
make_black_object(unsigned long ptr,unsigned int objflags)948 static void make_black_object(unsigned long ptr, unsigned int objflags)
949 {
950 	paint_ptr(ptr, KMEMLEAK_BLACK, objflags);
951 }
952 
953 /*
954  * Add a scanning area to the object. If at least one such area is added,
955  * kmemleak will only scan these ranges rather than the whole memory block.
956  */
add_scan_area(unsigned long ptr,size_t size,gfp_t gfp)957 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
958 {
959 	unsigned long flags;
960 	struct kmemleak_object *object;
961 	struct kmemleak_scan_area *area = NULL;
962 	unsigned long untagged_ptr;
963 	unsigned long untagged_objp;
964 
965 	object = find_and_get_object(ptr, 1);
966 	if (!object) {
967 		kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
968 			      ptr);
969 		return;
970 	}
971 
972 	untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
973 	untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
974 
975 	if (scan_area_cache)
976 		area = kmem_cache_alloc_noprof(scan_area_cache,
977 					       gfp_nested_mask(gfp));
978 
979 	raw_spin_lock_irqsave(&object->lock, flags);
980 	if (!area) {
981 		pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
982 		/* mark the object for full scan to avoid false positives */
983 		object->flags |= OBJECT_FULL_SCAN;
984 		goto out_unlock;
985 	}
986 	if (size == SIZE_MAX) {
987 		size = untagged_objp + object->size - untagged_ptr;
988 	} else if (untagged_ptr + size > untagged_objp + object->size) {
989 		kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
990 		dump_object_info(object);
991 		kmem_cache_free(scan_area_cache, area);
992 		goto out_unlock;
993 	}
994 
995 	INIT_HLIST_NODE(&area->node);
996 	area->start = ptr;
997 	area->size = size;
998 
999 	hlist_add_head(&area->node, &object->area_list);
1000 out_unlock:
1001 	raw_spin_unlock_irqrestore(&object->lock, flags);
1002 	put_object(object);
1003 }
1004 
1005 /*
1006  * Any surplus references (object already gray) to 'ptr' are passed to
1007  * 'excess_ref'. This is used in the vmalloc() case where a pointer to
1008  * vm_struct may be used as an alternative reference to the vmalloc'ed object
1009  * (see free_thread_stack()).
1010  */
object_set_excess_ref(unsigned long ptr,unsigned long excess_ref)1011 static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
1012 {
1013 	unsigned long flags;
1014 	struct kmemleak_object *object;
1015 
1016 	object = find_and_get_object(ptr, 0);
1017 	if (!object) {
1018 		kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
1019 			      ptr);
1020 		return;
1021 	}
1022 
1023 	raw_spin_lock_irqsave(&object->lock, flags);
1024 	object->excess_ref = excess_ref;
1025 	raw_spin_unlock_irqrestore(&object->lock, flags);
1026 	put_object(object);
1027 }
1028 
1029 /*
1030  * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
1031  * pointer. Such object will not be scanned by kmemleak but references to it
1032  * are searched.
1033  */
object_no_scan(unsigned long ptr)1034 static void object_no_scan(unsigned long ptr)
1035 {
1036 	unsigned long flags;
1037 	struct kmemleak_object *object;
1038 
1039 	object = find_and_get_object(ptr, 0);
1040 	if (!object) {
1041 		kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
1042 		return;
1043 	}
1044 
1045 	raw_spin_lock_irqsave(&object->lock, flags);
1046 	object->flags |= OBJECT_NO_SCAN;
1047 	raw_spin_unlock_irqrestore(&object->lock, flags);
1048 	put_object(object);
1049 }
1050 
1051 /**
1052  * kmemleak_alloc - register a newly allocated object
1053  * @ptr:	pointer to beginning of the object
1054  * @size:	size of the object
1055  * @min_count:	minimum number of references to this object. If during memory
1056  *		scanning a number of references less than @min_count is found,
1057  *		the object is reported as a memory leak. If @min_count is 0,
1058  *		the object is never reported as a leak. If @min_count is -1,
1059  *		the object is ignored (not scanned and not reported as a leak)
1060  * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
1061  *
1062  * This function is called from the kernel allocators when a new object
1063  * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
1064  */
kmemleak_alloc(const void * ptr,size_t size,int min_count,gfp_t gfp)1065 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
1066 			  gfp_t gfp)
1067 {
1068 	pr_debug("%s(0x%px, %zu, %d)\n", __func__, ptr, size, min_count);
1069 
1070 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1071 		create_object((unsigned long)ptr, size, min_count, gfp);
1072 }
1073 EXPORT_SYMBOL_GPL(kmemleak_alloc);
1074 
1075 /**
1076  * kmemleak_alloc_percpu - register a newly allocated __percpu object
1077  * @ptr:	__percpu pointer to beginning of the object
1078  * @size:	size of the object
1079  * @gfp:	flags used for kmemleak internal memory allocations
1080  *
1081  * This function is called from the kernel percpu allocator when a new object
1082  * (memory block) is allocated (alloc_percpu).
1083  */
kmemleak_alloc_percpu(const void __percpu * ptr,size_t size,gfp_t gfp)1084 void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
1085 				 gfp_t gfp)
1086 {
1087 	pr_debug("%s(0x%px, %zu)\n", __func__, ptr, size);
1088 
1089 	if (kmemleak_enabled && ptr && !IS_ERR_PCPU(ptr))
1090 		create_object_percpu((__force unsigned long)ptr, size, 1, gfp);
1091 }
1092 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
1093 
1094 /**
1095  * kmemleak_vmalloc - register a newly vmalloc'ed object
1096  * @area:	pointer to vm_struct
1097  * @size:	size of the object
1098  * @gfp:	__vmalloc() flags used for kmemleak internal memory allocations
1099  *
1100  * This function is called from the vmalloc() kernel allocator when a new
1101  * object (memory block) is allocated.
1102  */
kmemleak_vmalloc(const struct vm_struct * area,size_t size,gfp_t gfp)1103 void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
1104 {
1105 	pr_debug("%s(0x%px, %zu)\n", __func__, area, size);
1106 
1107 	/*
1108 	 * A min_count = 2 is needed because vm_struct contains a reference to
1109 	 * the virtual address of the vmalloc'ed block.
1110 	 */
1111 	if (kmemleak_enabled) {
1112 		create_object((unsigned long)area->addr, size, 2, gfp);
1113 		object_set_excess_ref((unsigned long)area,
1114 				      (unsigned long)area->addr);
1115 	}
1116 }
1117 EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
1118 
1119 /**
1120  * kmemleak_free - unregister a previously registered object
1121  * @ptr:	pointer to beginning of the object
1122  *
1123  * This function is called from the kernel allocators when an object (memory
1124  * block) is freed (kmem_cache_free, kfree, vfree etc.).
1125  */
kmemleak_free(const void * ptr)1126 void __ref kmemleak_free(const void *ptr)
1127 {
1128 	pr_debug("%s(0x%px)\n", __func__, ptr);
1129 
1130 	if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1131 		delete_object_full((unsigned long)ptr, 0);
1132 }
1133 EXPORT_SYMBOL_GPL(kmemleak_free);
1134 
1135 /**
1136  * kmemleak_free_part - partially unregister a previously registered object
1137  * @ptr:	pointer to the beginning or inside the object. This also
1138  *		represents the start of the range to be freed
1139  * @size:	size to be unregistered
1140  *
1141  * This function is called when only a part of a memory block is freed
1142  * (usually from the bootmem allocator).
1143  */
kmemleak_free_part(const void * ptr,size_t size)1144 void __ref kmemleak_free_part(const void *ptr, size_t size)
1145 {
1146 	pr_debug("%s(0x%px)\n", __func__, ptr);
1147 
1148 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1149 		delete_object_part((unsigned long)ptr, size, 0);
1150 }
1151 EXPORT_SYMBOL_GPL(kmemleak_free_part);
1152 
1153 /**
1154  * kmemleak_free_percpu - unregister a previously registered __percpu object
1155  * @ptr:	__percpu pointer to beginning of the object
1156  *
1157  * This function is called from the kernel percpu allocator when an object
1158  * (memory block) is freed (free_percpu).
1159  */
kmemleak_free_percpu(const void __percpu * ptr)1160 void __ref kmemleak_free_percpu(const void __percpu *ptr)
1161 {
1162 	pr_debug("%s(0x%px)\n", __func__, ptr);
1163 
1164 	if (kmemleak_free_enabled && ptr && !IS_ERR_PCPU(ptr))
1165 		delete_object_full((__force unsigned long)ptr, OBJECT_PERCPU);
1166 }
1167 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1168 
1169 /**
1170  * kmemleak_update_trace - update object allocation stack trace
1171  * @ptr:	pointer to beginning of the object
1172  *
1173  * Override the object allocation stack trace for cases where the actual
1174  * allocation place is not always useful.
1175  */
kmemleak_update_trace(const void * ptr)1176 void __ref kmemleak_update_trace(const void *ptr)
1177 {
1178 	struct kmemleak_object *object;
1179 	depot_stack_handle_t trace_handle;
1180 	unsigned long flags;
1181 
1182 	pr_debug("%s(0x%px)\n", __func__, ptr);
1183 
1184 	if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1185 		return;
1186 
1187 	object = find_and_get_object((unsigned long)ptr, 1);
1188 	if (!object) {
1189 #ifdef DEBUG
1190 		kmemleak_warn("Updating stack trace for unknown object at %p\n",
1191 			      ptr);
1192 #endif
1193 		return;
1194 	}
1195 
1196 	trace_handle = set_track_prepare();
1197 	raw_spin_lock_irqsave(&object->lock, flags);
1198 	object->trace_handle = trace_handle;
1199 	raw_spin_unlock_irqrestore(&object->lock, flags);
1200 
1201 	put_object(object);
1202 }
1203 EXPORT_SYMBOL(kmemleak_update_trace);
1204 
1205 /**
1206  * kmemleak_not_leak - mark an allocated object as false positive
1207  * @ptr:	pointer to beginning of the object
1208  *
1209  * Calling this function on an object will cause the memory block to no longer
1210  * be reported as leak and always be scanned.
1211  */
kmemleak_not_leak(const void * ptr)1212 void __ref kmemleak_not_leak(const void *ptr)
1213 {
1214 	pr_debug("%s(0x%px)\n", __func__, ptr);
1215 
1216 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1217 		make_gray_object((unsigned long)ptr);
1218 }
1219 EXPORT_SYMBOL(kmemleak_not_leak);
1220 
1221 /**
1222  * kmemleak_ignore - ignore an allocated object
1223  * @ptr:	pointer to beginning of the object
1224  *
1225  * Calling this function on an object will cause the memory block to be
1226  * ignored (not scanned and not reported as a leak). This is usually done when
1227  * it is known that the corresponding block is not a leak and does not contain
1228  * any references to other allocated memory blocks.
1229  */
kmemleak_ignore(const void * ptr)1230 void __ref kmemleak_ignore(const void *ptr)
1231 {
1232 	pr_debug("%s(0x%px)\n", __func__, ptr);
1233 
1234 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1235 		make_black_object((unsigned long)ptr, 0);
1236 }
1237 EXPORT_SYMBOL(kmemleak_ignore);
1238 
1239 /**
1240  * kmemleak_scan_area - limit the range to be scanned in an allocated object
1241  * @ptr:	pointer to beginning or inside the object. This also
1242  *		represents the start of the scan area
1243  * @size:	size of the scan area
1244  * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
1245  *
1246  * This function is used when it is known that only certain parts of an object
1247  * contain references to other objects. Kmemleak will only scan these areas
1248  * reducing the number false negatives.
1249  */
kmemleak_scan_area(const void * ptr,size_t size,gfp_t gfp)1250 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1251 {
1252 	pr_debug("%s(0x%px)\n", __func__, ptr);
1253 
1254 	if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1255 		add_scan_area((unsigned long)ptr, size, gfp);
1256 }
1257 EXPORT_SYMBOL(kmemleak_scan_area);
1258 
1259 /**
1260  * kmemleak_no_scan - do not scan an allocated object
1261  * @ptr:	pointer to beginning of the object
1262  *
1263  * This function notifies kmemleak not to scan the given memory block. Useful
1264  * in situations where it is known that the given object does not contain any
1265  * references to other objects. Kmemleak will not scan such objects reducing
1266  * the number of false negatives.
1267  */
kmemleak_no_scan(const void * ptr)1268 void __ref kmemleak_no_scan(const void *ptr)
1269 {
1270 	pr_debug("%s(0x%px)\n", __func__, ptr);
1271 
1272 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1273 		object_no_scan((unsigned long)ptr);
1274 }
1275 EXPORT_SYMBOL(kmemleak_no_scan);
1276 
1277 /**
1278  * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1279  *			 address argument
1280  * @phys:	physical address of the object
1281  * @size:	size of the object
1282  * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
1283  */
kmemleak_alloc_phys(phys_addr_t phys,size_t size,gfp_t gfp)1284 void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, gfp_t gfp)
1285 {
1286 	pr_debug("%s(0x%px, %zu)\n", __func__, &phys, size);
1287 
1288 	if (kmemleak_enabled)
1289 		/*
1290 		 * Create object with OBJECT_PHYS flag and
1291 		 * assume min_count 0.
1292 		 */
1293 		create_object_phys((unsigned long)phys, size, 0, gfp);
1294 }
1295 EXPORT_SYMBOL(kmemleak_alloc_phys);
1296 
1297 /**
1298  * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1299  *			     physical address argument
1300  * @phys:	physical address if the beginning or inside an object. This
1301  *		also represents the start of the range to be freed
1302  * @size:	size to be unregistered
1303  */
kmemleak_free_part_phys(phys_addr_t phys,size_t size)1304 void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1305 {
1306 	pr_debug("%s(0x%px)\n", __func__, &phys);
1307 
1308 	if (kmemleak_enabled)
1309 		delete_object_part((unsigned long)phys, size, OBJECT_PHYS);
1310 }
1311 EXPORT_SYMBOL(kmemleak_free_part_phys);
1312 
1313 /**
1314  * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1315  *			  address argument
1316  * @phys:	physical address of the object
1317  */
kmemleak_ignore_phys(phys_addr_t phys)1318 void __ref kmemleak_ignore_phys(phys_addr_t phys)
1319 {
1320 	pr_debug("%s(0x%px)\n", __func__, &phys);
1321 
1322 	if (kmemleak_enabled)
1323 		make_black_object((unsigned long)phys, OBJECT_PHYS);
1324 }
1325 EXPORT_SYMBOL(kmemleak_ignore_phys);
1326 
1327 /*
1328  * Update an object's checksum and return true if it was modified.
1329  */
update_checksum(struct kmemleak_object * object)1330 static bool update_checksum(struct kmemleak_object *object)
1331 {
1332 	u32 old_csum = object->checksum;
1333 
1334 	if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
1335 		return false;
1336 
1337 	kasan_disable_current();
1338 	kcsan_disable_current();
1339 	if (object->flags & OBJECT_PERCPU) {
1340 		unsigned int cpu;
1341 
1342 		object->checksum = 0;
1343 		for_each_possible_cpu(cpu) {
1344 			void *ptr = per_cpu_ptr((void __percpu *)object->pointer, cpu);
1345 
1346 			object->checksum ^= crc32(0, kasan_reset_tag((void *)ptr), object->size);
1347 		}
1348 	} else {
1349 		object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
1350 	}
1351 	kasan_enable_current();
1352 	kcsan_enable_current();
1353 
1354 	return object->checksum != old_csum;
1355 }
1356 
1357 /*
1358  * Update an object's references. object->lock must be held by the caller.
1359  */
update_refs(struct kmemleak_object * object)1360 static void update_refs(struct kmemleak_object *object)
1361 {
1362 	if (!color_white(object)) {
1363 		/* non-orphan, ignored or new */
1364 		return;
1365 	}
1366 
1367 	/*
1368 	 * Increase the object's reference count (number of pointers to the
1369 	 * memory block). If this count reaches the required minimum, the
1370 	 * object's color will become gray and it will be added to the
1371 	 * gray_list.
1372 	 */
1373 	object->count++;
1374 	if (color_gray(object)) {
1375 		/* put_object() called when removing from gray_list */
1376 		WARN_ON(!get_object(object));
1377 		list_add_tail(&object->gray_list, &gray_list);
1378 	}
1379 }
1380 
pointer_update_refs(struct kmemleak_object * scanned,unsigned long pointer,unsigned int objflags)1381 static void pointer_update_refs(struct kmemleak_object *scanned,
1382 			 unsigned long pointer, unsigned int objflags)
1383 {
1384 	struct kmemleak_object *object;
1385 	unsigned long untagged_ptr;
1386 	unsigned long excess_ref;
1387 
1388 	untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1389 	if (objflags & OBJECT_PERCPU) {
1390 		if (untagged_ptr < min_percpu_addr || untagged_ptr >= max_percpu_addr)
1391 			return;
1392 	} else {
1393 		if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1394 			return;
1395 	}
1396 
1397 	/*
1398 	 * No need for get_object() here since we hold kmemleak_lock.
1399 	 * object->use_count cannot be dropped to 0 while the object
1400 	 * is still present in object_tree_root and object_list
1401 	 * (with updates protected by kmemleak_lock).
1402 	 */
1403 	object = __lookup_object(pointer, 1, objflags);
1404 	if (!object)
1405 		return;
1406 	if (object == scanned)
1407 		/* self referenced, ignore */
1408 		return;
1409 
1410 	/*
1411 	 * Avoid the lockdep recursive warning on object->lock being
1412 	 * previously acquired in scan_object(). These locks are
1413 	 * enclosed by scan_mutex.
1414 	 */
1415 	raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1416 	/* only pass surplus references (object already gray) */
1417 	if (color_gray(object)) {
1418 		excess_ref = object->excess_ref;
1419 		/* no need for update_refs() if object already gray */
1420 	} else {
1421 		excess_ref = 0;
1422 		update_refs(object);
1423 	}
1424 	raw_spin_unlock(&object->lock);
1425 
1426 	if (excess_ref) {
1427 		object = lookup_object(excess_ref, 0);
1428 		if (!object)
1429 			return;
1430 		if (object == scanned)
1431 			/* circular reference, ignore */
1432 			return;
1433 		raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1434 		update_refs(object);
1435 		raw_spin_unlock(&object->lock);
1436 	}
1437 }
1438 
1439 /*
1440  * Memory scanning is a long process and it needs to be interruptible. This
1441  * function checks whether such interrupt condition occurred.
1442  */
scan_should_stop(void)1443 static int scan_should_stop(void)
1444 {
1445 	if (!kmemleak_enabled)
1446 		return 1;
1447 
1448 	/*
1449 	 * This function may be called from either process or kthread context,
1450 	 * hence the need to check for both stop conditions.
1451 	 */
1452 	if (current->mm)
1453 		return signal_pending(current);
1454 	else
1455 		return kthread_should_stop();
1456 
1457 	return 0;
1458 }
1459 
1460 /*
1461  * Scan a memory block (exclusive range) for valid pointers and add those
1462  * found to the gray list.
1463  */
scan_block(void * _start,void * _end,struct kmemleak_object * scanned)1464 static void scan_block(void *_start, void *_end,
1465 		       struct kmemleak_object *scanned)
1466 {
1467 	unsigned long *ptr;
1468 	unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1469 	unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1470 	unsigned long flags;
1471 
1472 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
1473 	for (ptr = start; ptr < end; ptr++) {
1474 		unsigned long pointer;
1475 
1476 		if (scan_should_stop())
1477 			break;
1478 
1479 		kasan_disable_current();
1480 		pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
1481 		kasan_enable_current();
1482 
1483 		pointer_update_refs(scanned, pointer, 0);
1484 		pointer_update_refs(scanned, pointer, OBJECT_PERCPU);
1485 	}
1486 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
1487 }
1488 
1489 /*
1490  * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1491  */
1492 #ifdef CONFIG_SMP
scan_large_block(void * start,void * end)1493 static void scan_large_block(void *start, void *end)
1494 {
1495 	void *next;
1496 
1497 	while (start < end) {
1498 		next = min(start + MAX_SCAN_SIZE, end);
1499 		scan_block(start, next, NULL);
1500 		start = next;
1501 		cond_resched();
1502 	}
1503 }
1504 #endif
1505 
1506 /*
1507  * Scan a memory block corresponding to a kmemleak_object. A condition is
1508  * that object->use_count >= 1.
1509  */
scan_object(struct kmemleak_object * object)1510 static void scan_object(struct kmemleak_object *object)
1511 {
1512 	struct kmemleak_scan_area *area;
1513 	unsigned long flags;
1514 
1515 	/*
1516 	 * Once the object->lock is acquired, the corresponding memory block
1517 	 * cannot be freed (the same lock is acquired in delete_object).
1518 	 */
1519 	raw_spin_lock_irqsave(&object->lock, flags);
1520 	if (object->flags & OBJECT_NO_SCAN)
1521 		goto out;
1522 	if (!(object->flags & OBJECT_ALLOCATED))
1523 		/* already freed object */
1524 		goto out;
1525 
1526 	if (object->flags & OBJECT_PERCPU) {
1527 		unsigned int cpu;
1528 
1529 		for_each_possible_cpu(cpu) {
1530 			void *start = per_cpu_ptr((void __percpu *)object->pointer, cpu);
1531 			void *end = start + object->size;
1532 
1533 			scan_block(start, end, object);
1534 
1535 			raw_spin_unlock_irqrestore(&object->lock, flags);
1536 			cond_resched();
1537 			raw_spin_lock_irqsave(&object->lock, flags);
1538 			if (!(object->flags & OBJECT_ALLOCATED))
1539 				break;
1540 		}
1541 	} else if (hlist_empty(&object->area_list) ||
1542 	    object->flags & OBJECT_FULL_SCAN) {
1543 		void *start = object->flags & OBJECT_PHYS ?
1544 				__va((phys_addr_t)object->pointer) :
1545 				(void *)object->pointer;
1546 		void *end = start + object->size;
1547 		void *next;
1548 
1549 		do {
1550 			next = min(start + MAX_SCAN_SIZE, end);
1551 			scan_block(start, next, object);
1552 
1553 			start = next;
1554 			if (start >= end)
1555 				break;
1556 
1557 			raw_spin_unlock_irqrestore(&object->lock, flags);
1558 			cond_resched();
1559 			raw_spin_lock_irqsave(&object->lock, flags);
1560 		} while (object->flags & OBJECT_ALLOCATED);
1561 	} else {
1562 		hlist_for_each_entry(area, &object->area_list, node)
1563 			scan_block((void *)area->start,
1564 				   (void *)(area->start + area->size),
1565 				   object);
1566 	}
1567 out:
1568 	raw_spin_unlock_irqrestore(&object->lock, flags);
1569 }
1570 
1571 /*
1572  * Scan the objects already referenced (gray objects). More objects will be
1573  * referenced and, if there are no memory leaks, all the objects are scanned.
1574  */
scan_gray_list(void)1575 static void scan_gray_list(void)
1576 {
1577 	struct kmemleak_object *object, *tmp;
1578 
1579 	/*
1580 	 * The list traversal is safe for both tail additions and removals
1581 	 * from inside the loop. The kmemleak objects cannot be freed from
1582 	 * outside the loop because their use_count was incremented.
1583 	 */
1584 	object = list_entry(gray_list.next, typeof(*object), gray_list);
1585 	while (&object->gray_list != &gray_list) {
1586 		cond_resched();
1587 
1588 		/* may add new objects to the list */
1589 		if (!scan_should_stop())
1590 			scan_object(object);
1591 
1592 		tmp = list_entry(object->gray_list.next, typeof(*object),
1593 				 gray_list);
1594 
1595 		/* remove the object from the list and release it */
1596 		list_del(&object->gray_list);
1597 		put_object(object);
1598 
1599 		object = tmp;
1600 	}
1601 	WARN_ON(!list_empty(&gray_list));
1602 }
1603 
1604 /*
1605  * Conditionally call resched() in an object iteration loop while making sure
1606  * that the given object won't go away without RCU read lock by performing a
1607  * get_object() if necessaary.
1608  */
kmemleak_cond_resched(struct kmemleak_object * object)1609 static void kmemleak_cond_resched(struct kmemleak_object *object)
1610 {
1611 	if (!get_object(object))
1612 		return;	/* Try next object */
1613 
1614 	raw_spin_lock_irq(&kmemleak_lock);
1615 	if (object->del_state & DELSTATE_REMOVED)
1616 		goto unlock_put;	/* Object removed */
1617 	object->del_state |= DELSTATE_NO_DELETE;
1618 	raw_spin_unlock_irq(&kmemleak_lock);
1619 
1620 	rcu_read_unlock();
1621 	cond_resched();
1622 	rcu_read_lock();
1623 
1624 	raw_spin_lock_irq(&kmemleak_lock);
1625 	if (object->del_state & DELSTATE_REMOVED)
1626 		list_del_rcu(&object->object_list);
1627 	object->del_state &= ~DELSTATE_NO_DELETE;
1628 unlock_put:
1629 	raw_spin_unlock_irq(&kmemleak_lock);
1630 	put_object(object);
1631 }
1632 
1633 /*
1634  * Scan data sections and all the referenced memory blocks allocated via the
1635  * kernel's standard allocators. This function must be called with the
1636  * scan_mutex held.
1637  */
kmemleak_scan(void)1638 static void kmemleak_scan(void)
1639 {
1640 	struct kmemleak_object *object;
1641 	struct zone *zone;
1642 	int __maybe_unused i;
1643 	int new_leaks = 0;
1644 
1645 	jiffies_last_scan = jiffies;
1646 
1647 	/* prepare the kmemleak_object's */
1648 	rcu_read_lock();
1649 	list_for_each_entry_rcu(object, &object_list, object_list) {
1650 		raw_spin_lock_irq(&object->lock);
1651 #ifdef DEBUG
1652 		/*
1653 		 * With a few exceptions there should be a maximum of
1654 		 * 1 reference to any object at this point.
1655 		 */
1656 		if (atomic_read(&object->use_count) > 1) {
1657 			pr_debug("object->use_count = %d\n",
1658 				 atomic_read(&object->use_count));
1659 			dump_object_info(object);
1660 		}
1661 #endif
1662 
1663 		/* ignore objects outside lowmem (paint them black) */
1664 		if ((object->flags & OBJECT_PHYS) &&
1665 		   !(object->flags & OBJECT_NO_SCAN)) {
1666 			unsigned long phys = object->pointer;
1667 
1668 			if (PHYS_PFN(phys) < min_low_pfn ||
1669 			    PHYS_PFN(phys + object->size) > max_low_pfn)
1670 				__paint_it(object, KMEMLEAK_BLACK);
1671 		}
1672 
1673 		/* reset the reference count (whiten the object) */
1674 		object->count = 0;
1675 		if (color_gray(object) && get_object(object))
1676 			list_add_tail(&object->gray_list, &gray_list);
1677 
1678 		raw_spin_unlock_irq(&object->lock);
1679 
1680 		if (need_resched())
1681 			kmemleak_cond_resched(object);
1682 	}
1683 	rcu_read_unlock();
1684 
1685 #ifdef CONFIG_SMP
1686 	/* per-cpu sections scanning */
1687 	for_each_possible_cpu(i)
1688 		scan_large_block(__per_cpu_start + per_cpu_offset(i),
1689 				 __per_cpu_end + per_cpu_offset(i));
1690 #endif
1691 
1692 	/*
1693 	 * Struct page scanning for each node.
1694 	 */
1695 	get_online_mems();
1696 	for_each_populated_zone(zone) {
1697 		unsigned long start_pfn = zone->zone_start_pfn;
1698 		unsigned long end_pfn = zone_end_pfn(zone);
1699 		unsigned long pfn;
1700 
1701 		for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1702 			struct page *page = pfn_to_online_page(pfn);
1703 
1704 			if (!(pfn & 63))
1705 				cond_resched();
1706 
1707 			if (!page)
1708 				continue;
1709 
1710 			/* only scan pages belonging to this zone */
1711 			if (page_zone(page) != zone)
1712 				continue;
1713 			/* only scan if page is in use */
1714 			if (page_count(page) == 0)
1715 				continue;
1716 			scan_block(page, page + 1, NULL);
1717 		}
1718 	}
1719 	put_online_mems();
1720 
1721 	/*
1722 	 * Scanning the task stacks (may introduce false negatives).
1723 	 */
1724 	if (kmemleak_stack_scan) {
1725 		struct task_struct *p, *g;
1726 
1727 		rcu_read_lock();
1728 		for_each_process_thread(g, p) {
1729 			void *stack = try_get_task_stack(p);
1730 			if (stack) {
1731 				scan_block(stack, stack + THREAD_SIZE, NULL);
1732 				put_task_stack(p);
1733 			}
1734 		}
1735 		rcu_read_unlock();
1736 	}
1737 
1738 	/*
1739 	 * Scan the objects already referenced from the sections scanned
1740 	 * above.
1741 	 */
1742 	scan_gray_list();
1743 
1744 	/*
1745 	 * Check for new or unreferenced objects modified since the previous
1746 	 * scan and color them gray until the next scan.
1747 	 */
1748 	rcu_read_lock();
1749 	list_for_each_entry_rcu(object, &object_list, object_list) {
1750 		if (need_resched())
1751 			kmemleak_cond_resched(object);
1752 
1753 		/*
1754 		 * This is racy but we can save the overhead of lock/unlock
1755 		 * calls. The missed objects, if any, should be caught in
1756 		 * the next scan.
1757 		 */
1758 		if (!color_white(object))
1759 			continue;
1760 		raw_spin_lock_irq(&object->lock);
1761 		if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1762 		    && update_checksum(object) && get_object(object)) {
1763 			/* color it gray temporarily */
1764 			object->count = object->min_count;
1765 			list_add_tail(&object->gray_list, &gray_list);
1766 		}
1767 		raw_spin_unlock_irq(&object->lock);
1768 	}
1769 	rcu_read_unlock();
1770 
1771 	/*
1772 	 * Re-scan the gray list for modified unreferenced objects.
1773 	 */
1774 	scan_gray_list();
1775 
1776 	/*
1777 	 * If scanning was stopped do not report any new unreferenced objects.
1778 	 */
1779 	if (scan_should_stop())
1780 		return;
1781 
1782 	/*
1783 	 * Scanning result reporting.
1784 	 */
1785 	rcu_read_lock();
1786 	list_for_each_entry_rcu(object, &object_list, object_list) {
1787 		if (need_resched())
1788 			kmemleak_cond_resched(object);
1789 
1790 		/*
1791 		 * This is racy but we can save the overhead of lock/unlock
1792 		 * calls. The missed objects, if any, should be caught in
1793 		 * the next scan.
1794 		 */
1795 		if (!color_white(object))
1796 			continue;
1797 		raw_spin_lock_irq(&object->lock);
1798 		if (unreferenced_object(object) &&
1799 		    !(object->flags & OBJECT_REPORTED)) {
1800 			object->flags |= OBJECT_REPORTED;
1801 
1802 			if (kmemleak_verbose)
1803 				print_unreferenced(NULL, object);
1804 
1805 			new_leaks++;
1806 		}
1807 		raw_spin_unlock_irq(&object->lock);
1808 	}
1809 	rcu_read_unlock();
1810 
1811 	if (new_leaks) {
1812 		kmemleak_found_leaks = true;
1813 
1814 		pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1815 			new_leaks);
1816 	}
1817 
1818 }
1819 
1820 /*
1821  * Thread function performing automatic memory scanning. Unreferenced objects
1822  * at the end of a memory scan are reported but only the first time.
1823  */
kmemleak_scan_thread(void * arg)1824 static int kmemleak_scan_thread(void *arg)
1825 {
1826 	static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
1827 
1828 	pr_info("Automatic memory scanning thread started\n");
1829 	set_user_nice(current, 10);
1830 
1831 	/*
1832 	 * Wait before the first scan to allow the system to fully initialize.
1833 	 */
1834 	if (first_run) {
1835 		signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1836 		first_run = 0;
1837 		while (timeout && !kthread_should_stop())
1838 			timeout = schedule_timeout_interruptible(timeout);
1839 	}
1840 
1841 	while (!kthread_should_stop()) {
1842 		signed long timeout = READ_ONCE(jiffies_scan_wait);
1843 
1844 		mutex_lock(&scan_mutex);
1845 		kmemleak_scan();
1846 		mutex_unlock(&scan_mutex);
1847 
1848 		/* wait before the next scan */
1849 		while (timeout && !kthread_should_stop())
1850 			timeout = schedule_timeout_interruptible(timeout);
1851 	}
1852 
1853 	pr_info("Automatic memory scanning thread ended\n");
1854 
1855 	return 0;
1856 }
1857 
1858 /*
1859  * Start the automatic memory scanning thread. This function must be called
1860  * with the scan_mutex held.
1861  */
start_scan_thread(void)1862 static void start_scan_thread(void)
1863 {
1864 	if (scan_thread)
1865 		return;
1866 	scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1867 	if (IS_ERR(scan_thread)) {
1868 		pr_warn("Failed to create the scan thread\n");
1869 		scan_thread = NULL;
1870 	}
1871 }
1872 
1873 /*
1874  * Stop the automatic memory scanning thread.
1875  */
stop_scan_thread(void)1876 static void stop_scan_thread(void)
1877 {
1878 	if (scan_thread) {
1879 		kthread_stop(scan_thread);
1880 		scan_thread = NULL;
1881 	}
1882 }
1883 
1884 /*
1885  * Iterate over the object_list and return the first valid object at or after
1886  * the required position with its use_count incremented. The function triggers
1887  * a memory scanning when the pos argument points to the first position.
1888  */
kmemleak_seq_start(struct seq_file * seq,loff_t * pos)1889 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1890 {
1891 	struct kmemleak_object *object;
1892 	loff_t n = *pos;
1893 	int err;
1894 
1895 	err = mutex_lock_interruptible(&scan_mutex);
1896 	if (err < 0)
1897 		return ERR_PTR(err);
1898 
1899 	rcu_read_lock();
1900 	list_for_each_entry_rcu(object, &object_list, object_list) {
1901 		if (n-- > 0)
1902 			continue;
1903 		if (get_object(object))
1904 			goto out;
1905 	}
1906 	object = NULL;
1907 out:
1908 	return object;
1909 }
1910 
1911 /*
1912  * Return the next object in the object_list. The function decrements the
1913  * use_count of the previous object and increases that of the next one.
1914  */
kmemleak_seq_next(struct seq_file * seq,void * v,loff_t * pos)1915 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1916 {
1917 	struct kmemleak_object *prev_obj = v;
1918 	struct kmemleak_object *next_obj = NULL;
1919 	struct kmemleak_object *obj = prev_obj;
1920 
1921 	++(*pos);
1922 
1923 	list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1924 		if (get_object(obj)) {
1925 			next_obj = obj;
1926 			break;
1927 		}
1928 	}
1929 
1930 	put_object(prev_obj);
1931 	return next_obj;
1932 }
1933 
1934 /*
1935  * Decrement the use_count of the last object required, if any.
1936  */
kmemleak_seq_stop(struct seq_file * seq,void * v)1937 static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1938 {
1939 	if (!IS_ERR(v)) {
1940 		/*
1941 		 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1942 		 * waiting was interrupted, so only release it if !IS_ERR.
1943 		 */
1944 		rcu_read_unlock();
1945 		mutex_unlock(&scan_mutex);
1946 		if (v)
1947 			put_object(v);
1948 	}
1949 }
1950 
1951 /*
1952  * Print the information for an unreferenced object to the seq file.
1953  */
kmemleak_seq_show(struct seq_file * seq,void * v)1954 static int kmemleak_seq_show(struct seq_file *seq, void *v)
1955 {
1956 	struct kmemleak_object *object = v;
1957 	unsigned long flags;
1958 
1959 	raw_spin_lock_irqsave(&object->lock, flags);
1960 	if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1961 		print_unreferenced(seq, object);
1962 	raw_spin_unlock_irqrestore(&object->lock, flags);
1963 	return 0;
1964 }
1965 
1966 static const struct seq_operations kmemleak_seq_ops = {
1967 	.start = kmemleak_seq_start,
1968 	.next  = kmemleak_seq_next,
1969 	.stop  = kmemleak_seq_stop,
1970 	.show  = kmemleak_seq_show,
1971 };
1972 
kmemleak_open(struct inode * inode,struct file * file)1973 static int kmemleak_open(struct inode *inode, struct file *file)
1974 {
1975 	return seq_open(file, &kmemleak_seq_ops);
1976 }
1977 
dump_str_object_info(const char * str)1978 static int dump_str_object_info(const char *str)
1979 {
1980 	unsigned long flags;
1981 	struct kmemleak_object *object;
1982 	unsigned long addr;
1983 
1984 	if (kstrtoul(str, 0, &addr))
1985 		return -EINVAL;
1986 	object = find_and_get_object(addr, 0);
1987 	if (!object) {
1988 		pr_info("Unknown object at 0x%08lx\n", addr);
1989 		return -EINVAL;
1990 	}
1991 
1992 	raw_spin_lock_irqsave(&object->lock, flags);
1993 	dump_object_info(object);
1994 	raw_spin_unlock_irqrestore(&object->lock, flags);
1995 
1996 	put_object(object);
1997 	return 0;
1998 }
1999 
2000 /*
2001  * We use grey instead of black to ensure we can do future scans on the same
2002  * objects. If we did not do future scans these black objects could
2003  * potentially contain references to newly allocated objects in the future and
2004  * we'd end up with false positives.
2005  */
kmemleak_clear(void)2006 static void kmemleak_clear(void)
2007 {
2008 	struct kmemleak_object *object;
2009 
2010 	rcu_read_lock();
2011 	list_for_each_entry_rcu(object, &object_list, object_list) {
2012 		raw_spin_lock_irq(&object->lock);
2013 		if ((object->flags & OBJECT_REPORTED) &&
2014 		    unreferenced_object(object))
2015 			__paint_it(object, KMEMLEAK_GREY);
2016 		raw_spin_unlock_irq(&object->lock);
2017 	}
2018 	rcu_read_unlock();
2019 
2020 	kmemleak_found_leaks = false;
2021 }
2022 
2023 static void __kmemleak_do_cleanup(void);
2024 
2025 /*
2026  * File write operation to configure kmemleak at run-time. The following
2027  * commands can be written to the /sys/kernel/debug/kmemleak file:
2028  *   off	- disable kmemleak (irreversible)
2029  *   stack=on	- enable the task stacks scanning
2030  *   stack=off	- disable the tasks stacks scanning
2031  *   scan=on	- start the automatic memory scanning thread
2032  *   scan=off	- stop the automatic memory scanning thread
2033  *   scan=...	- set the automatic memory scanning period in seconds (0 to
2034  *		  disable it)
2035  *   scan	- trigger a memory scan
2036  *   clear	- mark all current reported unreferenced kmemleak objects as
2037  *		  grey to ignore printing them, or free all kmemleak objects
2038  *		  if kmemleak has been disabled.
2039  *   dump=...	- dump information about the object found at the given address
2040  */
kmemleak_write(struct file * file,const char __user * user_buf,size_t size,loff_t * ppos)2041 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
2042 			      size_t size, loff_t *ppos)
2043 {
2044 	char buf[64];
2045 	int buf_size;
2046 	int ret;
2047 
2048 	buf_size = min(size, (sizeof(buf) - 1));
2049 	if (strncpy_from_user(buf, user_buf, buf_size) < 0)
2050 		return -EFAULT;
2051 	buf[buf_size] = 0;
2052 
2053 	ret = mutex_lock_interruptible(&scan_mutex);
2054 	if (ret < 0)
2055 		return ret;
2056 
2057 	if (strncmp(buf, "clear", 5) == 0) {
2058 		if (kmemleak_enabled)
2059 			kmemleak_clear();
2060 		else
2061 			__kmemleak_do_cleanup();
2062 		goto out;
2063 	}
2064 
2065 	if (!kmemleak_enabled) {
2066 		ret = -EPERM;
2067 		goto out;
2068 	}
2069 
2070 	if (strncmp(buf, "off", 3) == 0)
2071 		kmemleak_disable();
2072 	else if (strncmp(buf, "stack=on", 8) == 0)
2073 		kmemleak_stack_scan = 1;
2074 	else if (strncmp(buf, "stack=off", 9) == 0)
2075 		kmemleak_stack_scan = 0;
2076 	else if (strncmp(buf, "scan=on", 7) == 0)
2077 		start_scan_thread();
2078 	else if (strncmp(buf, "scan=off", 8) == 0)
2079 		stop_scan_thread();
2080 	else if (strncmp(buf, "scan=", 5) == 0) {
2081 		unsigned secs;
2082 		unsigned long msecs;
2083 
2084 		ret = kstrtouint(buf + 5, 0, &secs);
2085 		if (ret < 0)
2086 			goto out;
2087 
2088 		msecs = secs * MSEC_PER_SEC;
2089 		if (msecs > UINT_MAX)
2090 			msecs = UINT_MAX;
2091 
2092 		stop_scan_thread();
2093 		if (msecs) {
2094 			WRITE_ONCE(jiffies_scan_wait, msecs_to_jiffies(msecs));
2095 			start_scan_thread();
2096 		}
2097 	} else if (strncmp(buf, "scan", 4) == 0)
2098 		kmemleak_scan();
2099 	else if (strncmp(buf, "dump=", 5) == 0)
2100 		ret = dump_str_object_info(buf + 5);
2101 	else
2102 		ret = -EINVAL;
2103 
2104 out:
2105 	mutex_unlock(&scan_mutex);
2106 	if (ret < 0)
2107 		return ret;
2108 
2109 	/* ignore the rest of the buffer, only one command at a time */
2110 	*ppos += size;
2111 	return size;
2112 }
2113 
2114 static const struct file_operations kmemleak_fops = {
2115 	.owner		= THIS_MODULE,
2116 	.open		= kmemleak_open,
2117 	.read		= seq_read,
2118 	.write		= kmemleak_write,
2119 	.llseek		= seq_lseek,
2120 	.release	= seq_release,
2121 };
2122 
__kmemleak_do_cleanup(void)2123 static void __kmemleak_do_cleanup(void)
2124 {
2125 	struct kmemleak_object *object, *tmp;
2126 	unsigned int cnt = 0;
2127 
2128 	/*
2129 	 * Kmemleak has already been disabled, no need for RCU list traversal
2130 	 * or kmemleak_lock held.
2131 	 */
2132 	list_for_each_entry_safe(object, tmp, &object_list, object_list) {
2133 		__remove_object(object);
2134 		__delete_object(object);
2135 
2136 		/* Call cond_resched() once per 64 iterations to avoid soft lockup */
2137 		if (!(++cnt & 0x3f))
2138 			cond_resched();
2139 	}
2140 }
2141 
2142 /*
2143  * Stop the memory scanning thread and free the kmemleak internal objects if
2144  * no previous scan thread (otherwise, kmemleak may still have some useful
2145  * information on memory leaks).
2146  */
kmemleak_do_cleanup(struct work_struct * work)2147 static void kmemleak_do_cleanup(struct work_struct *work)
2148 {
2149 	stop_scan_thread();
2150 
2151 	mutex_lock(&scan_mutex);
2152 	/*
2153 	 * Once it is made sure that kmemleak_scan has stopped, it is safe to no
2154 	 * longer track object freeing. Ordering of the scan thread stopping and
2155 	 * the memory accesses below is guaranteed by the kthread_stop()
2156 	 * function.
2157 	 */
2158 	kmemleak_free_enabled = 0;
2159 	mutex_unlock(&scan_mutex);
2160 
2161 	if (!kmemleak_found_leaks)
2162 		__kmemleak_do_cleanup();
2163 	else
2164 		pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
2165 }
2166 
2167 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
2168 
2169 /*
2170  * Disable kmemleak. No memory allocation/freeing will be traced once this
2171  * function is called. Disabling kmemleak is an irreversible operation.
2172  */
kmemleak_disable(void)2173 static void kmemleak_disable(void)
2174 {
2175 	/* atomically check whether it was already invoked */
2176 	if (cmpxchg(&kmemleak_error, 0, 1))
2177 		return;
2178 
2179 	/* stop any memory operation tracing */
2180 	kmemleak_enabled = 0;
2181 
2182 	/* check whether it is too early for a kernel thread */
2183 	if (kmemleak_late_initialized)
2184 		schedule_work(&cleanup_work);
2185 	else
2186 		kmemleak_free_enabled = 0;
2187 
2188 	pr_info("Kernel memory leak detector disabled\n");
2189 }
2190 
2191 /*
2192  * Allow boot-time kmemleak disabling (enabled by default).
2193  */
kmemleak_boot_config(char * str)2194 static int __init kmemleak_boot_config(char *str)
2195 {
2196 	if (!str)
2197 		return -EINVAL;
2198 	if (strcmp(str, "off") == 0)
2199 		kmemleak_disable();
2200 	else if (strcmp(str, "on") == 0) {
2201 		kmemleak_skip_disable = 1;
2202 		stack_depot_request_early_init();
2203 	}
2204 	else
2205 		return -EINVAL;
2206 	return 0;
2207 }
2208 early_param("kmemleak", kmemleak_boot_config);
2209 
2210 /*
2211  * Kmemleak initialization.
2212  */
kmemleak_init(void)2213 void __init kmemleak_init(void)
2214 {
2215 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
2216 	if (!kmemleak_skip_disable) {
2217 		kmemleak_disable();
2218 		return;
2219 	}
2220 #endif
2221 
2222 	if (kmemleak_error)
2223 		return;
2224 
2225 	jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
2226 	jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
2227 
2228 	object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
2229 	scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
2230 
2231 	/* register the data/bss sections */
2232 	create_object((unsigned long)_sdata, _edata - _sdata,
2233 		      KMEMLEAK_GREY, GFP_ATOMIC);
2234 	create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
2235 		      KMEMLEAK_GREY, GFP_ATOMIC);
2236 	/* only register .data..ro_after_init if not within .data */
2237 	if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
2238 		create_object((unsigned long)__start_ro_after_init,
2239 			      __end_ro_after_init - __start_ro_after_init,
2240 			      KMEMLEAK_GREY, GFP_ATOMIC);
2241 }
2242 
2243 /*
2244  * Late initialization function.
2245  */
kmemleak_late_init(void)2246 static int __init kmemleak_late_init(void)
2247 {
2248 	kmemleak_late_initialized = 1;
2249 
2250 	debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
2251 
2252 	if (kmemleak_error) {
2253 		/*
2254 		 * Some error occurred and kmemleak was disabled. There is a
2255 		 * small chance that kmemleak_disable() was called immediately
2256 		 * after setting kmemleak_late_initialized and we may end up with
2257 		 * two clean-up threads but serialized by scan_mutex.
2258 		 */
2259 		schedule_work(&cleanup_work);
2260 		return -ENOMEM;
2261 	}
2262 
2263 	if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
2264 		mutex_lock(&scan_mutex);
2265 		start_scan_thread();
2266 		mutex_unlock(&scan_mutex);
2267 	}
2268 
2269 	pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
2270 		mem_pool_free_count);
2271 
2272 	return 0;
2273 }
2274 late_initcall(kmemleak_late_init);
2275