• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mm/kmemleak.c
4  *
5  * Copyright (C) 2008 ARM Limited
6  * Written by Catalin Marinas <catalin.marinas@arm.com>
7  *
8  * For more information on the algorithm and kmemleak usage, please see
9  * Documentation/dev-tools/kmemleak.rst.
10  *
11  * Notes on locking
12  * ----------------
13  *
14  * The following locks and mutexes are used by kmemleak:
15  *
16  * - kmemleak_lock (raw_spinlock_t): protects the object_list modifications and
17  *   accesses to the object_tree_root. The object_list is the main list
18  *   holding the metadata (struct kmemleak_object) for the allocated memory
19  *   blocks. The object_tree_root is a red black tree used to look-up
20  *   metadata based on a pointer to the corresponding memory block.  The
21  *   kmemleak_object structures are added to the object_list and
22  *   object_tree_root in the create_object() function called from the
23  *   kmemleak_alloc() callback and removed in delete_object() called from the
24  *   kmemleak_free() callback
25  * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
26  *   Accesses to the metadata (e.g. count) are protected by this lock. Note
27  *   that some members of this structure may be protected by other means
28  *   (atomic or kmemleak_lock). This lock is also held when scanning the
29  *   corresponding memory block to avoid the kernel freeing it via the
30  *   kmemleak_free() callback. This is less heavyweight than holding a global
31  *   lock like kmemleak_lock during scanning.
32  * - scan_mutex (mutex): ensures that only one thread may scan the memory for
33  *   unreferenced objects at a time. The gray_list contains the objects which
34  *   are already referenced or marked as false positives and need to be
35  *   scanned. This list is only modified during a scanning episode when the
36  *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
37  *   Note that the kmemleak_object.use_count is incremented when an object is
38  *   added to the gray_list and therefore cannot be freed. This mutex also
39  *   prevents multiple users of the "kmemleak" debugfs file together with
40  *   modifications to the memory scanning parameters including the scan_thread
41  *   pointer
42  *
43  * Locks and mutexes are acquired/nested in the following order:
44  *
45  *   scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
46  *
47  * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
48  * regions.
49  *
50  * The kmemleak_object structures have a use_count incremented or decremented
51  * using the get_object()/put_object() functions. When the use_count becomes
52  * 0, this count can no longer be incremented and put_object() schedules the
53  * kmemleak_object freeing via an RCU callback. All calls to the get_object()
54  * function must be protected by rcu_read_lock() to avoid accessing a freed
55  * structure.
56  */
57 
58 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
59 
60 #include <linux/init.h>
61 #include <linux/kernel.h>
62 #include <linux/list.h>
63 #include <linux/sched/signal.h>
64 #include <linux/sched/task.h>
65 #include <linux/sched/task_stack.h>
66 #include <linux/jiffies.h>
67 #include <linux/delay.h>
68 #include <linux/export.h>
69 #include <linux/kthread.h>
70 #include <linux/rbtree.h>
71 #include <linux/fs.h>
72 #include <linux/debugfs.h>
73 #include <linux/seq_file.h>
74 #include <linux/cpumask.h>
75 #include <linux/spinlock.h>
76 #include <linux/module.h>
77 #include <linux/mutex.h>
78 #include <linux/rcupdate.h>
79 #include <linux/stacktrace.h>
80 #include <linux/cache.h>
81 #include <linux/percpu.h>
82 #include <linux/memblock.h>
83 #include <linux/pfn.h>
84 #include <linux/mmzone.h>
85 #include <linux/slab.h>
86 #include <linux/thread_info.h>
87 #include <linux/err.h>
88 #include <linux/uaccess.h>
89 #include <linux/string.h>
90 #include <linux/nodemask.h>
91 #include <linux/mm.h>
92 #include <linux/workqueue.h>
93 #include <linux/crc32.h>
94 
95 #include <asm/sections.h>
96 #include <asm/processor.h>
97 #include <linux/atomic.h>
98 
99 #include <linux/kasan.h>
100 #include <linux/kfence.h>
101 #include <linux/kmemleak.h>
102 #include <linux/memory_hotplug.h>
103 
104 /*
105  * Kmemleak configuration and common defines.
106  */
107 #define MAX_TRACE		16	/* stack trace length */
108 #define MSECS_MIN_AGE		5000	/* minimum object age for reporting */
109 #define SECS_FIRST_SCAN		60	/* delay before the first scan */
110 #define SECS_SCAN_WAIT		600	/* subsequent auto scanning delay */
111 #define MAX_SCAN_SIZE		4096	/* maximum size of a scanned block */
112 
113 #define BYTES_PER_POINTER	sizeof(void *)
114 
115 /* GFP bitmask for kmemleak internal allocations */
116 #define gfp_kmemleak_mask(gfp)	(((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
117 				 __GFP_NORETRY | __GFP_NOMEMALLOC | \
118 				 __GFP_NOWARN)
119 
120 /* scanning area inside a memory block */
121 struct kmemleak_scan_area {
122 	struct hlist_node node;
123 	unsigned long start;
124 	size_t size;
125 };
126 
127 #define KMEMLEAK_GREY	0
128 #define KMEMLEAK_BLACK	-1
129 
130 /*
131  * Structure holding the metadata for each allocated memory block.
132  * Modifications to such objects should be made while holding the
133  * object->lock. Insertions or deletions from object_list, gray_list or
134  * rb_node are already protected by the corresponding locks or mutex (see
135  * the notes on locking above). These objects are reference-counted
136  * (use_count) and freed using the RCU mechanism.
137  */
138 struct kmemleak_object {
139 	raw_spinlock_t lock;
140 	unsigned int flags;		/* object status flags */
141 	struct list_head object_list;
142 	struct list_head gray_list;
143 	struct rb_node rb_node;
144 	struct rcu_head rcu;		/* object_list lockless traversal */
145 	/* object usage count; object freed when use_count == 0 */
146 	atomic_t use_count;
147 	unsigned long pointer;
148 	size_t size;
149 	/* pass surplus references to this pointer */
150 	unsigned long excess_ref;
151 	/* minimum number of a pointers found before it is considered leak */
152 	int min_count;
153 	/* the total number of pointers found pointing to this object */
154 	int count;
155 	/* checksum for detecting modified objects */
156 	u32 checksum;
157 	/* memory ranges to be scanned inside an object (empty for all) */
158 	struct hlist_head area_list;
159 	unsigned long trace[MAX_TRACE];
160 	unsigned int trace_len;
161 	unsigned long jiffies;		/* creation timestamp */
162 	pid_t pid;			/* pid of the current task */
163 	char comm[TASK_COMM_LEN];	/* executable name */
164 };
165 
166 /* flag representing the memory block allocation status */
167 #define OBJECT_ALLOCATED	(1 << 0)
168 /* flag set after the first reporting of an unreference object */
169 #define OBJECT_REPORTED		(1 << 1)
170 /* flag set to not scan the object */
171 #define OBJECT_NO_SCAN		(1 << 2)
172 /* flag set to fully scan the object when scan_area allocation failed */
173 #define OBJECT_FULL_SCAN	(1 << 3)
174 
175 #define HEX_PREFIX		"    "
176 /* number of bytes to print per line; must be 16 or 32 */
177 #define HEX_ROW_SIZE		16
178 /* number of bytes to print at a time (1, 2, 4, 8) */
179 #define HEX_GROUP_SIZE		1
180 /* include ASCII after the hex output */
181 #define HEX_ASCII		1
182 /* max number of lines to be printed */
183 #define HEX_MAX_LINES		2
184 
185 /* the list of all allocated objects */
186 static LIST_HEAD(object_list);
187 /* the list of gray-colored objects (see color_gray comment below) */
188 static LIST_HEAD(gray_list);
189 /* memory pool allocation */
190 static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
191 static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
192 static LIST_HEAD(mem_pool_free_list);
193 /* search tree for object boundaries */
194 static struct rb_root object_tree_root = RB_ROOT;
195 /* protecting the access to object_list and object_tree_root */
196 static DEFINE_RAW_SPINLOCK(kmemleak_lock);
197 
198 /* allocation caches for kmemleak internal data */
199 static struct kmem_cache *object_cache;
200 static struct kmem_cache *scan_area_cache;
201 
202 /* set if tracing memory operations is enabled */
203 static int kmemleak_enabled = 1;
204 /* same as above but only for the kmemleak_free() callback */
205 static int kmemleak_free_enabled = 1;
206 /* set in the late_initcall if there were no errors */
207 static int kmemleak_initialized;
208 /* set if a kmemleak warning was issued */
209 static int kmemleak_warning;
210 /* set if a fatal kmemleak error has occurred */
211 static int kmemleak_error;
212 
213 /* minimum and maximum address that may be valid pointers */
214 static unsigned long min_addr = ULONG_MAX;
215 static unsigned long max_addr;
216 
217 static struct task_struct *scan_thread;
218 /* used to avoid reporting of recently allocated objects */
219 static unsigned long jiffies_min_age;
220 static unsigned long jiffies_last_scan;
221 /* delay between automatic memory scannings */
222 static signed long jiffies_scan_wait;
223 /* enables or disables the task stacks scanning */
224 static int kmemleak_stack_scan = 1;
225 /* protects the memory scanning, parameters and debug/kmemleak file access */
226 static DEFINE_MUTEX(scan_mutex);
227 /* setting kmemleak=on, will set this var, skipping the disable */
228 static int kmemleak_skip_disable;
229 /* If there are leaks that can be reported */
230 static bool kmemleak_found_leaks;
231 
232 static bool kmemleak_verbose;
233 module_param_named(verbose, kmemleak_verbose, bool, 0600);
234 
235 static void kmemleak_disable(void);
236 
237 /*
238  * Print a warning and dump the stack trace.
239  */
240 #define kmemleak_warn(x...)	do {		\
241 	pr_warn(x);				\
242 	dump_stack();				\
243 	kmemleak_warning = 1;			\
244 } while (0)
245 
246 /*
247  * Macro invoked when a serious kmemleak condition occurred and cannot be
248  * recovered from. Kmemleak will be disabled and further allocation/freeing
249  * tracing no longer available.
250  */
251 #define kmemleak_stop(x...)	do {	\
252 	kmemleak_warn(x);		\
253 	kmemleak_disable();		\
254 } while (0)
255 
256 #define warn_or_seq_printf(seq, fmt, ...)	do {	\
257 	if (seq)					\
258 		seq_printf(seq, fmt, ##__VA_ARGS__);	\
259 	else						\
260 		pr_warn(fmt, ##__VA_ARGS__);		\
261 } while (0)
262 
warn_or_seq_hex_dump(struct seq_file * seq,int prefix_type,int rowsize,int groupsize,const void * buf,size_t len,bool ascii)263 static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
264 				 int rowsize, int groupsize, const void *buf,
265 				 size_t len, bool ascii)
266 {
267 	if (seq)
268 		seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
269 			     buf, len, ascii);
270 	else
271 		print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
272 			       rowsize, groupsize, buf, len, ascii);
273 }
274 
275 /*
276  * Printing of the objects hex dump to the seq file. The number of lines to be
277  * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
278  * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
279  * with the object->lock held.
280  */
hex_dump_object(struct seq_file * seq,struct kmemleak_object * object)281 static void hex_dump_object(struct seq_file *seq,
282 			    struct kmemleak_object *object)
283 {
284 	const u8 *ptr = (const u8 *)object->pointer;
285 	size_t len;
286 
287 	/* limit the number of lines to HEX_MAX_LINES */
288 	len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
289 
290 	warn_or_seq_printf(seq, "  hex dump (first %zu bytes):\n", len);
291 	kasan_disable_current();
292 	warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
293 			     HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
294 	kasan_enable_current();
295 }
296 
297 /*
298  * Object colors, encoded with count and min_count:
299  * - white - orphan object, not enough references to it (count < min_count)
300  * - gray  - not orphan, not marked as false positive (min_count == 0) or
301  *		sufficient references to it (count >= min_count)
302  * - black - ignore, it doesn't contain references (e.g. text section)
303  *		(min_count == -1). No function defined for this color.
304  * Newly created objects don't have any color assigned (object->count == -1)
305  * before the next memory scan when they become white.
306  */
color_white(const struct kmemleak_object * object)307 static bool color_white(const struct kmemleak_object *object)
308 {
309 	return object->count != KMEMLEAK_BLACK &&
310 		object->count < object->min_count;
311 }
312 
color_gray(const struct kmemleak_object * object)313 static bool color_gray(const struct kmemleak_object *object)
314 {
315 	return object->min_count != KMEMLEAK_BLACK &&
316 		object->count >= object->min_count;
317 }
318 
319 /*
320  * Objects are considered unreferenced only if their color is white, they have
321  * not be deleted and have a minimum age to avoid false positives caused by
322  * pointers temporarily stored in CPU registers.
323  */
unreferenced_object(struct kmemleak_object * object)324 static bool unreferenced_object(struct kmemleak_object *object)
325 {
326 	return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
327 		time_before_eq(object->jiffies + jiffies_min_age,
328 			       jiffies_last_scan);
329 }
330 
331 /*
332  * Printing of the unreferenced objects information to the seq file. The
333  * print_unreferenced function must be called with the object->lock held.
334  */
print_unreferenced(struct seq_file * seq,struct kmemleak_object * object)335 static void print_unreferenced(struct seq_file *seq,
336 			       struct kmemleak_object *object)
337 {
338 	int i;
339 	unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
340 
341 	warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
342 		   object->pointer, object->size);
343 	warn_or_seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
344 		   object->comm, object->pid, object->jiffies,
345 		   msecs_age / 1000, msecs_age % 1000);
346 	hex_dump_object(seq, object);
347 	warn_or_seq_printf(seq, "  backtrace:\n");
348 
349 	for (i = 0; i < object->trace_len; i++) {
350 		void *ptr = (void *)object->trace[i];
351 		warn_or_seq_printf(seq, "    [<%p>] %pS\n", ptr, ptr);
352 	}
353 }
354 
355 /*
356  * Print the kmemleak_object information. This function is used mainly for
357  * debugging special cases when kmemleak operations. It must be called with
358  * the object->lock held.
359  */
dump_object_info(struct kmemleak_object * object)360 static void dump_object_info(struct kmemleak_object *object)
361 {
362 	pr_notice("Object 0x%08lx (size %zu):\n",
363 		  object->pointer, object->size);
364 	pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
365 		  object->comm, object->pid, object->jiffies);
366 	pr_notice("  min_count = %d\n", object->min_count);
367 	pr_notice("  count = %d\n", object->count);
368 	pr_notice("  flags = 0x%x\n", object->flags);
369 	pr_notice("  checksum = %u\n", object->checksum);
370 	pr_notice("  backtrace:\n");
371 	stack_trace_print(object->trace, object->trace_len, 4);
372 }
373 
374 /*
375  * Look-up a memory block metadata (kmemleak_object) in the object search
376  * tree based on a pointer value. If alias is 0, only values pointing to the
377  * beginning of the memory block are allowed. The kmemleak_lock must be held
378  * when calling this function.
379  */
lookup_object(unsigned long ptr,int alias)380 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
381 {
382 	struct rb_node *rb = object_tree_root.rb_node;
383 	unsigned long untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
384 
385 	while (rb) {
386 		struct kmemleak_object *object;
387 		unsigned long untagged_objp;
388 
389 		object = rb_entry(rb, struct kmemleak_object, rb_node);
390 		untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
391 
392 		if (untagged_ptr < untagged_objp)
393 			rb = object->rb_node.rb_left;
394 		else if (untagged_objp + object->size <= untagged_ptr)
395 			rb = object->rb_node.rb_right;
396 		else if (untagged_objp == untagged_ptr || alias)
397 			return object;
398 		else {
399 			kmemleak_warn("Found object by alias at 0x%08lx\n",
400 				      ptr);
401 			dump_object_info(object);
402 			break;
403 		}
404 	}
405 	return NULL;
406 }
407 
408 /*
409  * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
410  * that once an object's use_count reached 0, the RCU freeing was already
411  * registered and the object should no longer be used. This function must be
412  * called under the protection of rcu_read_lock().
413  */
get_object(struct kmemleak_object * object)414 static int get_object(struct kmemleak_object *object)
415 {
416 	return atomic_inc_not_zero(&object->use_count);
417 }
418 
419 /*
420  * Memory pool allocation and freeing. kmemleak_lock must not be held.
421  */
mem_pool_alloc(gfp_t gfp)422 static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
423 {
424 	unsigned long flags;
425 	struct kmemleak_object *object;
426 
427 	/* try the slab allocator first */
428 	if (object_cache) {
429 		object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
430 		if (object)
431 			return object;
432 	}
433 
434 	/* slab allocation failed, try the memory pool */
435 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
436 	object = list_first_entry_or_null(&mem_pool_free_list,
437 					  typeof(*object), object_list);
438 	if (object)
439 		list_del(&object->object_list);
440 	else if (mem_pool_free_count)
441 		object = &mem_pool[--mem_pool_free_count];
442 	else
443 		pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
444 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
445 
446 	return object;
447 }
448 
449 /*
450  * Return the object to either the slab allocator or the memory pool.
451  */
mem_pool_free(struct kmemleak_object * object)452 static void mem_pool_free(struct kmemleak_object *object)
453 {
454 	unsigned long flags;
455 
456 	if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
457 		kmem_cache_free(object_cache, object);
458 		return;
459 	}
460 
461 	/* add the object to the memory pool free list */
462 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
463 	list_add(&object->object_list, &mem_pool_free_list);
464 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
465 }
466 
467 /*
468  * RCU callback to free a kmemleak_object.
469  */
free_object_rcu(struct rcu_head * rcu)470 static void free_object_rcu(struct rcu_head *rcu)
471 {
472 	struct hlist_node *tmp;
473 	struct kmemleak_scan_area *area;
474 	struct kmemleak_object *object =
475 		container_of(rcu, struct kmemleak_object, rcu);
476 
477 	/*
478 	 * Once use_count is 0 (guaranteed by put_object), there is no other
479 	 * code accessing this object, hence no need for locking.
480 	 */
481 	hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
482 		hlist_del(&area->node);
483 		kmem_cache_free(scan_area_cache, area);
484 	}
485 	mem_pool_free(object);
486 }
487 
488 /*
489  * Decrement the object use_count. Once the count is 0, free the object using
490  * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
491  * delete_object() path, the delayed RCU freeing ensures that there is no
492  * recursive call to the kernel allocator. Lock-less RCU object_list traversal
493  * is also possible.
494  */
put_object(struct kmemleak_object * object)495 static void put_object(struct kmemleak_object *object)
496 {
497 	if (!atomic_dec_and_test(&object->use_count))
498 		return;
499 
500 	/* should only get here after delete_object was called */
501 	WARN_ON(object->flags & OBJECT_ALLOCATED);
502 
503 	/*
504 	 * It may be too early for the RCU callbacks, however, there is no
505 	 * concurrent object_list traversal when !object_cache and all objects
506 	 * came from the memory pool. Free the object directly.
507 	 */
508 	if (object_cache)
509 		call_rcu(&object->rcu, free_object_rcu);
510 	else
511 		free_object_rcu(&object->rcu);
512 }
513 
514 /*
515  * Look up an object in the object search tree and increase its use_count.
516  */
find_and_get_object(unsigned long ptr,int alias)517 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
518 {
519 	unsigned long flags;
520 	struct kmemleak_object *object;
521 
522 	rcu_read_lock();
523 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
524 	object = lookup_object(ptr, alias);
525 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
526 
527 	/* check whether the object is still available */
528 	if (object && !get_object(object))
529 		object = NULL;
530 	rcu_read_unlock();
531 
532 	return object;
533 }
534 
535 /*
536  * Remove an object from the object_tree_root and object_list. Must be called
537  * with the kmemleak_lock held _if_ kmemleak is still enabled.
538  */
__remove_object(struct kmemleak_object * object)539 static void __remove_object(struct kmemleak_object *object)
540 {
541 	rb_erase(&object->rb_node, &object_tree_root);
542 	list_del_rcu(&object->object_list);
543 }
544 
545 /*
546  * Look up an object in the object search tree and remove it from both
547  * object_tree_root and object_list. The returned object's use_count should be
548  * at least 1, as initially set by create_object().
549  */
find_and_remove_object(unsigned long ptr,int alias)550 static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias)
551 {
552 	unsigned long flags;
553 	struct kmemleak_object *object;
554 
555 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
556 	object = lookup_object(ptr, alias);
557 	if (object)
558 		__remove_object(object);
559 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
560 
561 	return object;
562 }
563 
564 /*
565  * Save stack trace to the given array of MAX_TRACE size.
566  */
__save_stack_trace(unsigned long * trace)567 static int __save_stack_trace(unsigned long *trace)
568 {
569 	return stack_trace_save(trace, MAX_TRACE, 2);
570 }
571 
572 /*
573  * Create the metadata (struct kmemleak_object) corresponding to an allocated
574  * memory block and add it to the object_list and object_tree_root.
575  */
create_object(unsigned long ptr,size_t size,int min_count,gfp_t gfp)576 static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
577 					     int min_count, gfp_t gfp)
578 {
579 	unsigned long flags;
580 	struct kmemleak_object *object, *parent;
581 	struct rb_node **link, *rb_parent;
582 	unsigned long untagged_ptr;
583 	unsigned long untagged_objp;
584 
585 	object = mem_pool_alloc(gfp);
586 	if (!object) {
587 		pr_warn("Cannot allocate a kmemleak_object structure\n");
588 		kmemleak_disable();
589 		return NULL;
590 	}
591 
592 	INIT_LIST_HEAD(&object->object_list);
593 	INIT_LIST_HEAD(&object->gray_list);
594 	INIT_HLIST_HEAD(&object->area_list);
595 	raw_spin_lock_init(&object->lock);
596 	atomic_set(&object->use_count, 1);
597 	object->flags = OBJECT_ALLOCATED;
598 	object->pointer = ptr;
599 	object->size = kfence_ksize((void *)ptr) ?: size;
600 	object->excess_ref = 0;
601 	object->min_count = min_count;
602 	object->count = 0;			/* white color initially */
603 	object->jiffies = jiffies;
604 	object->checksum = 0;
605 
606 	/* task information */
607 	if (in_irq()) {
608 		object->pid = 0;
609 		strncpy(object->comm, "hardirq", sizeof(object->comm));
610 	} else if (in_serving_softirq()) {
611 		object->pid = 0;
612 		strncpy(object->comm, "softirq", sizeof(object->comm));
613 	} else {
614 		object->pid = current->pid;
615 		/*
616 		 * There is a small chance of a race with set_task_comm(),
617 		 * however using get_task_comm() here may cause locking
618 		 * dependency issues with current->alloc_lock. In the worst
619 		 * case, the command line is not correct.
620 		 */
621 		strncpy(object->comm, current->comm, sizeof(object->comm));
622 	}
623 
624 	/* kernel backtrace */
625 	object->trace_len = __save_stack_trace(object->trace);
626 
627 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
628 
629 	untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
630 	min_addr = min(min_addr, untagged_ptr);
631 	max_addr = max(max_addr, untagged_ptr + size);
632 	link = &object_tree_root.rb_node;
633 	rb_parent = NULL;
634 	while (*link) {
635 		rb_parent = *link;
636 		parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
637 		untagged_objp = (unsigned long)kasan_reset_tag((void *)parent->pointer);
638 		if (untagged_ptr + size <= untagged_objp)
639 			link = &parent->rb_node.rb_left;
640 		else if (untagged_objp + parent->size <= untagged_ptr)
641 			link = &parent->rb_node.rb_right;
642 		else {
643 			kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
644 				      ptr);
645 			/*
646 			 * No need for parent->lock here since "parent" cannot
647 			 * be freed while the kmemleak_lock is held.
648 			 */
649 			dump_object_info(parent);
650 			kmem_cache_free(object_cache, object);
651 			object = NULL;
652 			goto out;
653 		}
654 	}
655 	rb_link_node(&object->rb_node, rb_parent, link);
656 	rb_insert_color(&object->rb_node, &object_tree_root);
657 
658 	list_add_tail_rcu(&object->object_list, &object_list);
659 out:
660 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
661 	return object;
662 }
663 
664 /*
665  * Mark the object as not allocated and schedule RCU freeing via put_object().
666  */
__delete_object(struct kmemleak_object * object)667 static void __delete_object(struct kmemleak_object *object)
668 {
669 	unsigned long flags;
670 
671 	WARN_ON(!(object->flags & OBJECT_ALLOCATED));
672 	WARN_ON(atomic_read(&object->use_count) < 1);
673 
674 	/*
675 	 * Locking here also ensures that the corresponding memory block
676 	 * cannot be freed when it is being scanned.
677 	 */
678 	raw_spin_lock_irqsave(&object->lock, flags);
679 	object->flags &= ~OBJECT_ALLOCATED;
680 	raw_spin_unlock_irqrestore(&object->lock, flags);
681 	put_object(object);
682 }
683 
684 /*
685  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
686  * delete it.
687  */
delete_object_full(unsigned long ptr)688 static void delete_object_full(unsigned long ptr)
689 {
690 	struct kmemleak_object *object;
691 
692 	object = find_and_remove_object(ptr, 0);
693 	if (!object) {
694 #ifdef DEBUG
695 		kmemleak_warn("Freeing unknown object at 0x%08lx\n",
696 			      ptr);
697 #endif
698 		return;
699 	}
700 	__delete_object(object);
701 }
702 
703 /*
704  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
705  * delete it. If the memory block is partially freed, the function may create
706  * additional metadata for the remaining parts of the block.
707  */
delete_object_part(unsigned long ptr,size_t size)708 static void delete_object_part(unsigned long ptr, size_t size)
709 {
710 	struct kmemleak_object *object;
711 	unsigned long start, end;
712 
713 	object = find_and_remove_object(ptr, 1);
714 	if (!object) {
715 #ifdef DEBUG
716 		kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
717 			      ptr, size);
718 #endif
719 		return;
720 	}
721 
722 	/*
723 	 * Create one or two objects that may result from the memory block
724 	 * split. Note that partial freeing is only done by free_bootmem() and
725 	 * this happens before kmemleak_init() is called.
726 	 */
727 	start = object->pointer;
728 	end = object->pointer + object->size;
729 	if (ptr > start)
730 		create_object(start, ptr - start, object->min_count,
731 			      GFP_KERNEL);
732 	if (ptr + size < end)
733 		create_object(ptr + size, end - ptr - size, object->min_count,
734 			      GFP_KERNEL);
735 
736 	__delete_object(object);
737 }
738 
__paint_it(struct kmemleak_object * object,int color)739 static void __paint_it(struct kmemleak_object *object, int color)
740 {
741 	object->min_count = color;
742 	if (color == KMEMLEAK_BLACK)
743 		object->flags |= OBJECT_NO_SCAN;
744 }
745 
paint_it(struct kmemleak_object * object,int color)746 static void paint_it(struct kmemleak_object *object, int color)
747 {
748 	unsigned long flags;
749 
750 	raw_spin_lock_irqsave(&object->lock, flags);
751 	__paint_it(object, color);
752 	raw_spin_unlock_irqrestore(&object->lock, flags);
753 }
754 
paint_ptr(unsigned long ptr,int color)755 static void paint_ptr(unsigned long ptr, int color)
756 {
757 	struct kmemleak_object *object;
758 
759 	object = find_and_get_object(ptr, 0);
760 	if (!object) {
761 		kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
762 			      ptr,
763 			      (color == KMEMLEAK_GREY) ? "Grey" :
764 			      (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
765 		return;
766 	}
767 	paint_it(object, color);
768 	put_object(object);
769 }
770 
771 /*
772  * Mark an object permanently as gray-colored so that it can no longer be
773  * reported as a leak. This is used in general to mark a false positive.
774  */
make_gray_object(unsigned long ptr)775 static void make_gray_object(unsigned long ptr)
776 {
777 	paint_ptr(ptr, KMEMLEAK_GREY);
778 }
779 
780 /*
781  * Mark the object as black-colored so that it is ignored from scans and
782  * reporting.
783  */
make_black_object(unsigned long ptr)784 static void make_black_object(unsigned long ptr)
785 {
786 	paint_ptr(ptr, KMEMLEAK_BLACK);
787 }
788 
789 /*
790  * Add a scanning area to the object. If at least one such area is added,
791  * kmemleak will only scan these ranges rather than the whole memory block.
792  */
add_scan_area(unsigned long ptr,size_t size,gfp_t gfp)793 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
794 {
795 	unsigned long flags;
796 	struct kmemleak_object *object;
797 	struct kmemleak_scan_area *area = NULL;
798 	unsigned long untagged_ptr;
799 	unsigned long untagged_objp;
800 
801 	object = find_and_get_object(ptr, 1);
802 	if (!object) {
803 		kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
804 			      ptr);
805 		return;
806 	}
807 
808 	untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
809 	untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
810 
811 	if (scan_area_cache)
812 		area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
813 
814 	raw_spin_lock_irqsave(&object->lock, flags);
815 	if (!area) {
816 		pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
817 		/* mark the object for full scan to avoid false positives */
818 		object->flags |= OBJECT_FULL_SCAN;
819 		goto out_unlock;
820 	}
821 	if (size == SIZE_MAX) {
822 		size = untagged_objp + object->size - untagged_ptr;
823 	} else if (untagged_ptr + size > untagged_objp + object->size) {
824 		kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
825 		dump_object_info(object);
826 		kmem_cache_free(scan_area_cache, area);
827 		goto out_unlock;
828 	}
829 
830 	INIT_HLIST_NODE(&area->node);
831 	area->start = ptr;
832 	area->size = size;
833 
834 	hlist_add_head(&area->node, &object->area_list);
835 out_unlock:
836 	raw_spin_unlock_irqrestore(&object->lock, flags);
837 	put_object(object);
838 }
839 
840 /*
841  * Any surplus references (object already gray) to 'ptr' are passed to
842  * 'excess_ref'. This is used in the vmalloc() case where a pointer to
843  * vm_struct may be used as an alternative reference to the vmalloc'ed object
844  * (see free_thread_stack()).
845  */
object_set_excess_ref(unsigned long ptr,unsigned long excess_ref)846 static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
847 {
848 	unsigned long flags;
849 	struct kmemleak_object *object;
850 
851 	object = find_and_get_object(ptr, 0);
852 	if (!object) {
853 		kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
854 			      ptr);
855 		return;
856 	}
857 
858 	raw_spin_lock_irqsave(&object->lock, flags);
859 	object->excess_ref = excess_ref;
860 	raw_spin_unlock_irqrestore(&object->lock, flags);
861 	put_object(object);
862 }
863 
864 /*
865  * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
866  * pointer. Such object will not be scanned by kmemleak but references to it
867  * are searched.
868  */
object_no_scan(unsigned long ptr)869 static void object_no_scan(unsigned long ptr)
870 {
871 	unsigned long flags;
872 	struct kmemleak_object *object;
873 
874 	object = find_and_get_object(ptr, 0);
875 	if (!object) {
876 		kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
877 		return;
878 	}
879 
880 	raw_spin_lock_irqsave(&object->lock, flags);
881 	object->flags |= OBJECT_NO_SCAN;
882 	raw_spin_unlock_irqrestore(&object->lock, flags);
883 	put_object(object);
884 }
885 
886 /**
887  * kmemleak_alloc - register a newly allocated object
888  * @ptr:	pointer to beginning of the object
889  * @size:	size of the object
890  * @min_count:	minimum number of references to this object. If during memory
891  *		scanning a number of references less than @min_count is found,
892  *		the object is reported as a memory leak. If @min_count is 0,
893  *		the object is never reported as a leak. If @min_count is -1,
894  *		the object is ignored (not scanned and not reported as a leak)
895  * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
896  *
897  * This function is called from the kernel allocators when a new object
898  * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
899  */
kmemleak_alloc(const void * ptr,size_t size,int min_count,gfp_t gfp)900 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
901 			  gfp_t gfp)
902 {
903 	pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
904 
905 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
906 		create_object((unsigned long)ptr, size, min_count, gfp);
907 }
908 EXPORT_SYMBOL_GPL(kmemleak_alloc);
909 
910 /**
911  * kmemleak_alloc_percpu - register a newly allocated __percpu object
912  * @ptr:	__percpu pointer to beginning of the object
913  * @size:	size of the object
914  * @gfp:	flags used for kmemleak internal memory allocations
915  *
916  * This function is called from the kernel percpu allocator when a new object
917  * (memory block) is allocated (alloc_percpu).
918  */
kmemleak_alloc_percpu(const void __percpu * ptr,size_t size,gfp_t gfp)919 void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
920 				 gfp_t gfp)
921 {
922 	unsigned int cpu;
923 
924 	pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
925 
926 	/*
927 	 * Percpu allocations are only scanned and not reported as leaks
928 	 * (min_count is set to 0).
929 	 */
930 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
931 		for_each_possible_cpu(cpu)
932 			create_object((unsigned long)per_cpu_ptr(ptr, cpu),
933 				      size, 0, gfp);
934 }
935 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
936 
937 /**
938  * kmemleak_vmalloc - register a newly vmalloc'ed object
939  * @area:	pointer to vm_struct
940  * @size:	size of the object
941  * @gfp:	__vmalloc() flags used for kmemleak internal memory allocations
942  *
943  * This function is called from the vmalloc() kernel allocator when a new
944  * object (memory block) is allocated.
945  */
kmemleak_vmalloc(const struct vm_struct * area,size_t size,gfp_t gfp)946 void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
947 {
948 	pr_debug("%s(0x%p, %zu)\n", __func__, area, size);
949 
950 	/*
951 	 * A min_count = 2 is needed because vm_struct contains a reference to
952 	 * the virtual address of the vmalloc'ed block.
953 	 */
954 	if (kmemleak_enabled) {
955 		create_object((unsigned long)area->addr, size, 2, gfp);
956 		object_set_excess_ref((unsigned long)area,
957 				      (unsigned long)area->addr);
958 	}
959 }
960 EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
961 
962 /**
963  * kmemleak_free - unregister a previously registered object
964  * @ptr:	pointer to beginning of the object
965  *
966  * This function is called from the kernel allocators when an object (memory
967  * block) is freed (kmem_cache_free, kfree, vfree etc.).
968  */
kmemleak_free(const void * ptr)969 void __ref kmemleak_free(const void *ptr)
970 {
971 	pr_debug("%s(0x%p)\n", __func__, ptr);
972 
973 	if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
974 		delete_object_full((unsigned long)ptr);
975 }
976 EXPORT_SYMBOL_GPL(kmemleak_free);
977 
978 /**
979  * kmemleak_free_part - partially unregister a previously registered object
980  * @ptr:	pointer to the beginning or inside the object. This also
981  *		represents the start of the range to be freed
982  * @size:	size to be unregistered
983  *
984  * This function is called when only a part of a memory block is freed
985  * (usually from the bootmem allocator).
986  */
kmemleak_free_part(const void * ptr,size_t size)987 void __ref kmemleak_free_part(const void *ptr, size_t size)
988 {
989 	pr_debug("%s(0x%p)\n", __func__, ptr);
990 
991 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
992 		delete_object_part((unsigned long)ptr, size);
993 }
994 EXPORT_SYMBOL_GPL(kmemleak_free_part);
995 
996 /**
997  * kmemleak_free_percpu - unregister a previously registered __percpu object
998  * @ptr:	__percpu pointer to beginning of the object
999  *
1000  * This function is called from the kernel percpu allocator when an object
1001  * (memory block) is freed (free_percpu).
1002  */
kmemleak_free_percpu(const void __percpu * ptr)1003 void __ref kmemleak_free_percpu(const void __percpu *ptr)
1004 {
1005 	unsigned int cpu;
1006 
1007 	pr_debug("%s(0x%p)\n", __func__, ptr);
1008 
1009 	if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1010 		for_each_possible_cpu(cpu)
1011 			delete_object_full((unsigned long)per_cpu_ptr(ptr,
1012 								      cpu));
1013 }
1014 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1015 
1016 /**
1017  * kmemleak_update_trace - update object allocation stack trace
1018  * @ptr:	pointer to beginning of the object
1019  *
1020  * Override the object allocation stack trace for cases where the actual
1021  * allocation place is not always useful.
1022  */
kmemleak_update_trace(const void * ptr)1023 void __ref kmemleak_update_trace(const void *ptr)
1024 {
1025 	struct kmemleak_object *object;
1026 	unsigned long flags;
1027 
1028 	pr_debug("%s(0x%p)\n", __func__, ptr);
1029 
1030 	if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1031 		return;
1032 
1033 	object = find_and_get_object((unsigned long)ptr, 1);
1034 	if (!object) {
1035 #ifdef DEBUG
1036 		kmemleak_warn("Updating stack trace for unknown object at %p\n",
1037 			      ptr);
1038 #endif
1039 		return;
1040 	}
1041 
1042 	raw_spin_lock_irqsave(&object->lock, flags);
1043 	object->trace_len = __save_stack_trace(object->trace);
1044 	raw_spin_unlock_irqrestore(&object->lock, flags);
1045 
1046 	put_object(object);
1047 }
1048 EXPORT_SYMBOL(kmemleak_update_trace);
1049 
1050 /**
1051  * kmemleak_not_leak - mark an allocated object as false positive
1052  * @ptr:	pointer to beginning of the object
1053  *
1054  * Calling this function on an object will cause the memory block to no longer
1055  * be reported as leak and always be scanned.
1056  */
kmemleak_not_leak(const void * ptr)1057 void __ref kmemleak_not_leak(const void *ptr)
1058 {
1059 	pr_debug("%s(0x%p)\n", __func__, ptr);
1060 
1061 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1062 		make_gray_object((unsigned long)ptr);
1063 }
1064 EXPORT_SYMBOL(kmemleak_not_leak);
1065 
1066 /**
1067  * kmemleak_ignore - ignore an allocated object
1068  * @ptr:	pointer to beginning of the object
1069  *
1070  * Calling this function on an object will cause the memory block to be
1071  * ignored (not scanned and not reported as a leak). This is usually done when
1072  * it is known that the corresponding block is not a leak and does not contain
1073  * any references to other allocated memory blocks.
1074  */
kmemleak_ignore(const void * ptr)1075 void __ref kmemleak_ignore(const void *ptr)
1076 {
1077 	pr_debug("%s(0x%p)\n", __func__, ptr);
1078 
1079 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1080 		make_black_object((unsigned long)ptr);
1081 }
1082 EXPORT_SYMBOL(kmemleak_ignore);
1083 
1084 /**
1085  * kmemleak_scan_area - limit the range to be scanned in an allocated object
1086  * @ptr:	pointer to beginning or inside the object. This also
1087  *		represents the start of the scan area
1088  * @size:	size of the scan area
1089  * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
1090  *
1091  * This function is used when it is known that only certain parts of an object
1092  * contain references to other objects. Kmemleak will only scan these areas
1093  * reducing the number false negatives.
1094  */
kmemleak_scan_area(const void * ptr,size_t size,gfp_t gfp)1095 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1096 {
1097 	pr_debug("%s(0x%p)\n", __func__, ptr);
1098 
1099 	if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1100 		add_scan_area((unsigned long)ptr, size, gfp);
1101 }
1102 EXPORT_SYMBOL(kmemleak_scan_area);
1103 
1104 /**
1105  * kmemleak_no_scan - do not scan an allocated object
1106  * @ptr:	pointer to beginning of the object
1107  *
1108  * This function notifies kmemleak not to scan the given memory block. Useful
1109  * in situations where it is known that the given object does not contain any
1110  * references to other objects. Kmemleak will not scan such objects reducing
1111  * the number of false negatives.
1112  */
kmemleak_no_scan(const void * ptr)1113 void __ref kmemleak_no_scan(const void *ptr)
1114 {
1115 	pr_debug("%s(0x%p)\n", __func__, ptr);
1116 
1117 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1118 		object_no_scan((unsigned long)ptr);
1119 }
1120 EXPORT_SYMBOL(kmemleak_no_scan);
1121 
1122 /**
1123  * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1124  *			 address argument
1125  * @phys:	physical address of the object
1126  * @size:	size of the object
1127  * @min_count:	minimum number of references to this object.
1128  *              See kmemleak_alloc()
1129  * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
1130  */
kmemleak_alloc_phys(phys_addr_t phys,size_t size,int min_count,gfp_t gfp)1131 void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
1132 			       gfp_t gfp)
1133 {
1134 	if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1135 		kmemleak_alloc(__va(phys), size, min_count, gfp);
1136 }
1137 EXPORT_SYMBOL(kmemleak_alloc_phys);
1138 
1139 /**
1140  * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1141  *			     physical address argument
1142  * @phys:	physical address if the beginning or inside an object. This
1143  *		also represents the start of the range to be freed
1144  * @size:	size to be unregistered
1145  */
kmemleak_free_part_phys(phys_addr_t phys,size_t size)1146 void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1147 {
1148 	if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1149 		kmemleak_free_part(__va(phys), size);
1150 }
1151 EXPORT_SYMBOL(kmemleak_free_part_phys);
1152 
1153 /**
1154  * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
1155  *			    address argument
1156  * @phys:	physical address of the object
1157  */
kmemleak_not_leak_phys(phys_addr_t phys)1158 void __ref kmemleak_not_leak_phys(phys_addr_t phys)
1159 {
1160 	if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1161 		kmemleak_not_leak(__va(phys));
1162 }
1163 EXPORT_SYMBOL(kmemleak_not_leak_phys);
1164 
1165 /**
1166  * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1167  *			  address argument
1168  * @phys:	physical address of the object
1169  */
kmemleak_ignore_phys(phys_addr_t phys)1170 void __ref kmemleak_ignore_phys(phys_addr_t phys)
1171 {
1172 	if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1173 		kmemleak_ignore(__va(phys));
1174 }
1175 EXPORT_SYMBOL(kmemleak_ignore_phys);
1176 
1177 /*
1178  * Update an object's checksum and return true if it was modified.
1179  */
update_checksum(struct kmemleak_object * object)1180 static bool update_checksum(struct kmemleak_object *object)
1181 {
1182 	u32 old_csum = object->checksum;
1183 
1184 	kasan_disable_current();
1185 	kcsan_disable_current();
1186 	object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
1187 	kasan_enable_current();
1188 	kcsan_enable_current();
1189 
1190 	return object->checksum != old_csum;
1191 }
1192 
1193 /*
1194  * Update an object's references. object->lock must be held by the caller.
1195  */
update_refs(struct kmemleak_object * object)1196 static void update_refs(struct kmemleak_object *object)
1197 {
1198 	if (!color_white(object)) {
1199 		/* non-orphan, ignored or new */
1200 		return;
1201 	}
1202 
1203 	/*
1204 	 * Increase the object's reference count (number of pointers to the
1205 	 * memory block). If this count reaches the required minimum, the
1206 	 * object's color will become gray and it will be added to the
1207 	 * gray_list.
1208 	 */
1209 	object->count++;
1210 	if (color_gray(object)) {
1211 		/* put_object() called when removing from gray_list */
1212 		WARN_ON(!get_object(object));
1213 		list_add_tail(&object->gray_list, &gray_list);
1214 	}
1215 }
1216 
1217 /*
1218  * Memory scanning is a long process and it needs to be interruptable. This
1219  * function checks whether such interrupt condition occurred.
1220  */
scan_should_stop(void)1221 static int scan_should_stop(void)
1222 {
1223 	if (!kmemleak_enabled)
1224 		return 1;
1225 
1226 	/*
1227 	 * This function may be called from either process or kthread context,
1228 	 * hence the need to check for both stop conditions.
1229 	 */
1230 	if (current->mm)
1231 		return signal_pending(current);
1232 	else
1233 		return kthread_should_stop();
1234 
1235 	return 0;
1236 }
1237 
1238 /*
1239  * Scan a memory block (exclusive range) for valid pointers and add those
1240  * found to the gray list.
1241  */
scan_block(void * _start,void * _end,struct kmemleak_object * scanned)1242 static void scan_block(void *_start, void *_end,
1243 		       struct kmemleak_object *scanned)
1244 {
1245 	unsigned long *ptr;
1246 	unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1247 	unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1248 	unsigned long flags;
1249 	unsigned long untagged_ptr;
1250 
1251 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
1252 	for (ptr = start; ptr < end; ptr++) {
1253 		struct kmemleak_object *object;
1254 		unsigned long pointer;
1255 		unsigned long excess_ref;
1256 
1257 		if (scan_should_stop())
1258 			break;
1259 
1260 		kasan_disable_current();
1261 		pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
1262 		kasan_enable_current();
1263 
1264 		untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1265 		if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1266 			continue;
1267 
1268 		/*
1269 		 * No need for get_object() here since we hold kmemleak_lock.
1270 		 * object->use_count cannot be dropped to 0 while the object
1271 		 * is still present in object_tree_root and object_list
1272 		 * (with updates protected by kmemleak_lock).
1273 		 */
1274 		object = lookup_object(pointer, 1);
1275 		if (!object)
1276 			continue;
1277 		if (object == scanned)
1278 			/* self referenced, ignore */
1279 			continue;
1280 
1281 		/*
1282 		 * Avoid the lockdep recursive warning on object->lock being
1283 		 * previously acquired in scan_object(). These locks are
1284 		 * enclosed by scan_mutex.
1285 		 */
1286 		raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1287 		/* only pass surplus references (object already gray) */
1288 		if (color_gray(object)) {
1289 			excess_ref = object->excess_ref;
1290 			/* no need for update_refs() if object already gray */
1291 		} else {
1292 			excess_ref = 0;
1293 			update_refs(object);
1294 		}
1295 		raw_spin_unlock(&object->lock);
1296 
1297 		if (excess_ref) {
1298 			object = lookup_object(excess_ref, 0);
1299 			if (!object)
1300 				continue;
1301 			if (object == scanned)
1302 				/* circular reference, ignore */
1303 				continue;
1304 			raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1305 			update_refs(object);
1306 			raw_spin_unlock(&object->lock);
1307 		}
1308 	}
1309 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
1310 }
1311 
1312 /*
1313  * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1314  */
1315 #ifdef CONFIG_SMP
scan_large_block(void * start,void * end)1316 static void scan_large_block(void *start, void *end)
1317 {
1318 	void *next;
1319 
1320 	while (start < end) {
1321 		next = min(start + MAX_SCAN_SIZE, end);
1322 		scan_block(start, next, NULL);
1323 		start = next;
1324 		cond_resched();
1325 	}
1326 }
1327 #endif
1328 
1329 /*
1330  * Scan a memory block corresponding to a kmemleak_object. A condition is
1331  * that object->use_count >= 1.
1332  */
scan_object(struct kmemleak_object * object)1333 static void scan_object(struct kmemleak_object *object)
1334 {
1335 	struct kmemleak_scan_area *area;
1336 	unsigned long flags;
1337 
1338 	/*
1339 	 * Once the object->lock is acquired, the corresponding memory block
1340 	 * cannot be freed (the same lock is acquired in delete_object).
1341 	 */
1342 	raw_spin_lock_irqsave(&object->lock, flags);
1343 	if (object->flags & OBJECT_NO_SCAN)
1344 		goto out;
1345 	if (!(object->flags & OBJECT_ALLOCATED))
1346 		/* already freed object */
1347 		goto out;
1348 	if (hlist_empty(&object->area_list) ||
1349 	    object->flags & OBJECT_FULL_SCAN) {
1350 		void *start = (void *)object->pointer;
1351 		void *end = (void *)(object->pointer + object->size);
1352 		void *next;
1353 
1354 		do {
1355 			next = min(start + MAX_SCAN_SIZE, end);
1356 			scan_block(start, next, object);
1357 
1358 			start = next;
1359 			if (start >= end)
1360 				break;
1361 
1362 			raw_spin_unlock_irqrestore(&object->lock, flags);
1363 			cond_resched();
1364 			raw_spin_lock_irqsave(&object->lock, flags);
1365 		} while (object->flags & OBJECT_ALLOCATED);
1366 	} else
1367 		hlist_for_each_entry(area, &object->area_list, node)
1368 			scan_block((void *)area->start,
1369 				   (void *)(area->start + area->size),
1370 				   object);
1371 out:
1372 	raw_spin_unlock_irqrestore(&object->lock, flags);
1373 }
1374 
1375 /*
1376  * Scan the objects already referenced (gray objects). More objects will be
1377  * referenced and, if there are no memory leaks, all the objects are scanned.
1378  */
scan_gray_list(void)1379 static void scan_gray_list(void)
1380 {
1381 	struct kmemleak_object *object, *tmp;
1382 
1383 	/*
1384 	 * The list traversal is safe for both tail additions and removals
1385 	 * from inside the loop. The kmemleak objects cannot be freed from
1386 	 * outside the loop because their use_count was incremented.
1387 	 */
1388 	object = list_entry(gray_list.next, typeof(*object), gray_list);
1389 	while (&object->gray_list != &gray_list) {
1390 		cond_resched();
1391 
1392 		/* may add new objects to the list */
1393 		if (!scan_should_stop())
1394 			scan_object(object);
1395 
1396 		tmp = list_entry(object->gray_list.next, typeof(*object),
1397 				 gray_list);
1398 
1399 		/* remove the object from the list and release it */
1400 		list_del(&object->gray_list);
1401 		put_object(object);
1402 
1403 		object = tmp;
1404 	}
1405 	WARN_ON(!list_empty(&gray_list));
1406 }
1407 
1408 /*
1409  * Scan data sections and all the referenced memory blocks allocated via the
1410  * kernel's standard allocators. This function must be called with the
1411  * scan_mutex held.
1412  */
kmemleak_scan(void)1413 static void kmemleak_scan(void)
1414 {
1415 	unsigned long flags;
1416 	struct kmemleak_object *object;
1417 	struct zone *zone;
1418 	int __maybe_unused i;
1419 	int new_leaks = 0;
1420 
1421 	jiffies_last_scan = jiffies;
1422 
1423 	/* prepare the kmemleak_object's */
1424 	rcu_read_lock();
1425 	list_for_each_entry_rcu(object, &object_list, object_list) {
1426 		raw_spin_lock_irqsave(&object->lock, flags);
1427 #ifdef DEBUG
1428 		/*
1429 		 * With a few exceptions there should be a maximum of
1430 		 * 1 reference to any object at this point.
1431 		 */
1432 		if (atomic_read(&object->use_count) > 1) {
1433 			pr_debug("object->use_count = %d\n",
1434 				 atomic_read(&object->use_count));
1435 			dump_object_info(object);
1436 		}
1437 #endif
1438 		/* reset the reference count (whiten the object) */
1439 		object->count = 0;
1440 		if (color_gray(object) && get_object(object))
1441 			list_add_tail(&object->gray_list, &gray_list);
1442 
1443 		raw_spin_unlock_irqrestore(&object->lock, flags);
1444 	}
1445 	rcu_read_unlock();
1446 
1447 #ifdef CONFIG_SMP
1448 	/* per-cpu sections scanning */
1449 	for_each_possible_cpu(i)
1450 		scan_large_block(__per_cpu_start + per_cpu_offset(i),
1451 				 __per_cpu_end + per_cpu_offset(i));
1452 #endif
1453 
1454 	/*
1455 	 * Struct page scanning for each node.
1456 	 */
1457 	get_online_mems();
1458 	for_each_populated_zone(zone) {
1459 		unsigned long start_pfn = zone->zone_start_pfn;
1460 		unsigned long end_pfn = zone_end_pfn(zone);
1461 		unsigned long pfn;
1462 
1463 		for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1464 			struct page *page = pfn_to_online_page(pfn);
1465 
1466 			if (!page)
1467 				continue;
1468 
1469 			/* only scan pages belonging to this zone */
1470 			if (page_zone(page) != zone)
1471 				continue;
1472 			/* only scan if page is in use */
1473 			if (page_count(page) == 0)
1474 				continue;
1475 			scan_block(page, page + 1, NULL);
1476 			if (!(pfn & 63))
1477 				cond_resched();
1478 		}
1479 	}
1480 	put_online_mems();
1481 
1482 	/*
1483 	 * Scanning the task stacks (may introduce false negatives).
1484 	 */
1485 	if (kmemleak_stack_scan) {
1486 		struct task_struct *p, *g;
1487 
1488 		rcu_read_lock();
1489 		for_each_process_thread(g, p) {
1490 			void *stack = try_get_task_stack(p);
1491 			if (stack) {
1492 				scan_block(stack, stack + THREAD_SIZE, NULL);
1493 				put_task_stack(p);
1494 			}
1495 		}
1496 		rcu_read_unlock();
1497 	}
1498 
1499 	/*
1500 	 * Scan the objects already referenced from the sections scanned
1501 	 * above.
1502 	 */
1503 	scan_gray_list();
1504 
1505 	/*
1506 	 * Check for new or unreferenced objects modified since the previous
1507 	 * scan and color them gray until the next scan.
1508 	 */
1509 	rcu_read_lock();
1510 	list_for_each_entry_rcu(object, &object_list, object_list) {
1511 		raw_spin_lock_irqsave(&object->lock, flags);
1512 		if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1513 		    && update_checksum(object) && get_object(object)) {
1514 			/* color it gray temporarily */
1515 			object->count = object->min_count;
1516 			list_add_tail(&object->gray_list, &gray_list);
1517 		}
1518 		raw_spin_unlock_irqrestore(&object->lock, flags);
1519 	}
1520 	rcu_read_unlock();
1521 
1522 	/*
1523 	 * Re-scan the gray list for modified unreferenced objects.
1524 	 */
1525 	scan_gray_list();
1526 
1527 	/*
1528 	 * If scanning was stopped do not report any new unreferenced objects.
1529 	 */
1530 	if (scan_should_stop())
1531 		return;
1532 
1533 	/*
1534 	 * Scanning result reporting.
1535 	 */
1536 	rcu_read_lock();
1537 	list_for_each_entry_rcu(object, &object_list, object_list) {
1538 		raw_spin_lock_irqsave(&object->lock, flags);
1539 		if (unreferenced_object(object) &&
1540 		    !(object->flags & OBJECT_REPORTED)) {
1541 			object->flags |= OBJECT_REPORTED;
1542 
1543 			if (kmemleak_verbose)
1544 				print_unreferenced(NULL, object);
1545 
1546 			new_leaks++;
1547 		}
1548 		raw_spin_unlock_irqrestore(&object->lock, flags);
1549 	}
1550 	rcu_read_unlock();
1551 
1552 	if (new_leaks) {
1553 		kmemleak_found_leaks = true;
1554 
1555 		pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1556 			new_leaks);
1557 	}
1558 
1559 }
1560 
1561 /*
1562  * Thread function performing automatic memory scanning. Unreferenced objects
1563  * at the end of a memory scan are reported but only the first time.
1564  */
kmemleak_scan_thread(void * arg)1565 static int kmemleak_scan_thread(void *arg)
1566 {
1567 	static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
1568 
1569 	pr_info("Automatic memory scanning thread started\n");
1570 	set_user_nice(current, 10);
1571 
1572 	/*
1573 	 * Wait before the first scan to allow the system to fully initialize.
1574 	 */
1575 	if (first_run) {
1576 		signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1577 		first_run = 0;
1578 		while (timeout && !kthread_should_stop())
1579 			timeout = schedule_timeout_interruptible(timeout);
1580 	}
1581 
1582 	while (!kthread_should_stop()) {
1583 		signed long timeout = jiffies_scan_wait;
1584 
1585 		mutex_lock(&scan_mutex);
1586 		kmemleak_scan();
1587 		mutex_unlock(&scan_mutex);
1588 
1589 		/* wait before the next scan */
1590 		while (timeout && !kthread_should_stop())
1591 			timeout = schedule_timeout_interruptible(timeout);
1592 	}
1593 
1594 	pr_info("Automatic memory scanning thread ended\n");
1595 
1596 	return 0;
1597 }
1598 
1599 /*
1600  * Start the automatic memory scanning thread. This function must be called
1601  * with the scan_mutex held.
1602  */
start_scan_thread(void)1603 static void start_scan_thread(void)
1604 {
1605 	if (scan_thread)
1606 		return;
1607 	scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1608 	if (IS_ERR(scan_thread)) {
1609 		pr_warn("Failed to create the scan thread\n");
1610 		scan_thread = NULL;
1611 	}
1612 }
1613 
1614 /*
1615  * Stop the automatic memory scanning thread.
1616  */
stop_scan_thread(void)1617 static void stop_scan_thread(void)
1618 {
1619 	if (scan_thread) {
1620 		kthread_stop(scan_thread);
1621 		scan_thread = NULL;
1622 	}
1623 }
1624 
1625 /*
1626  * Iterate over the object_list and return the first valid object at or after
1627  * the required position with its use_count incremented. The function triggers
1628  * a memory scanning when the pos argument points to the first position.
1629  */
kmemleak_seq_start(struct seq_file * seq,loff_t * pos)1630 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1631 {
1632 	struct kmemleak_object *object;
1633 	loff_t n = *pos;
1634 	int err;
1635 
1636 	err = mutex_lock_interruptible(&scan_mutex);
1637 	if (err < 0)
1638 		return ERR_PTR(err);
1639 
1640 	rcu_read_lock();
1641 	list_for_each_entry_rcu(object, &object_list, object_list) {
1642 		if (n-- > 0)
1643 			continue;
1644 		if (get_object(object))
1645 			goto out;
1646 	}
1647 	object = NULL;
1648 out:
1649 	return object;
1650 }
1651 
1652 /*
1653  * Return the next object in the object_list. The function decrements the
1654  * use_count of the previous object and increases that of the next one.
1655  */
kmemleak_seq_next(struct seq_file * seq,void * v,loff_t * pos)1656 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1657 {
1658 	struct kmemleak_object *prev_obj = v;
1659 	struct kmemleak_object *next_obj = NULL;
1660 	struct kmemleak_object *obj = prev_obj;
1661 
1662 	++(*pos);
1663 
1664 	list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1665 		if (get_object(obj)) {
1666 			next_obj = obj;
1667 			break;
1668 		}
1669 	}
1670 
1671 	put_object(prev_obj);
1672 	return next_obj;
1673 }
1674 
1675 /*
1676  * Decrement the use_count of the last object required, if any.
1677  */
kmemleak_seq_stop(struct seq_file * seq,void * v)1678 static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1679 {
1680 	if (!IS_ERR(v)) {
1681 		/*
1682 		 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1683 		 * waiting was interrupted, so only release it if !IS_ERR.
1684 		 */
1685 		rcu_read_unlock();
1686 		mutex_unlock(&scan_mutex);
1687 		if (v)
1688 			put_object(v);
1689 	}
1690 }
1691 
1692 /*
1693  * Print the information for an unreferenced object to the seq file.
1694  */
kmemleak_seq_show(struct seq_file * seq,void * v)1695 static int kmemleak_seq_show(struct seq_file *seq, void *v)
1696 {
1697 	struct kmemleak_object *object = v;
1698 	unsigned long flags;
1699 
1700 	raw_spin_lock_irqsave(&object->lock, flags);
1701 	if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1702 		print_unreferenced(seq, object);
1703 	raw_spin_unlock_irqrestore(&object->lock, flags);
1704 	return 0;
1705 }
1706 
1707 static const struct seq_operations kmemleak_seq_ops = {
1708 	.start = kmemleak_seq_start,
1709 	.next  = kmemleak_seq_next,
1710 	.stop  = kmemleak_seq_stop,
1711 	.show  = kmemleak_seq_show,
1712 };
1713 
kmemleak_open(struct inode * inode,struct file * file)1714 static int kmemleak_open(struct inode *inode, struct file *file)
1715 {
1716 	return seq_open(file, &kmemleak_seq_ops);
1717 }
1718 
dump_str_object_info(const char * str)1719 static int dump_str_object_info(const char *str)
1720 {
1721 	unsigned long flags;
1722 	struct kmemleak_object *object;
1723 	unsigned long addr;
1724 
1725 	if (kstrtoul(str, 0, &addr))
1726 		return -EINVAL;
1727 	object = find_and_get_object(addr, 0);
1728 	if (!object) {
1729 		pr_info("Unknown object at 0x%08lx\n", addr);
1730 		return -EINVAL;
1731 	}
1732 
1733 	raw_spin_lock_irqsave(&object->lock, flags);
1734 	dump_object_info(object);
1735 	raw_spin_unlock_irqrestore(&object->lock, flags);
1736 
1737 	put_object(object);
1738 	return 0;
1739 }
1740 
1741 /*
1742  * We use grey instead of black to ensure we can do future scans on the same
1743  * objects. If we did not do future scans these black objects could
1744  * potentially contain references to newly allocated objects in the future and
1745  * we'd end up with false positives.
1746  */
kmemleak_clear(void)1747 static void kmemleak_clear(void)
1748 {
1749 	struct kmemleak_object *object;
1750 	unsigned long flags;
1751 
1752 	rcu_read_lock();
1753 	list_for_each_entry_rcu(object, &object_list, object_list) {
1754 		raw_spin_lock_irqsave(&object->lock, flags);
1755 		if ((object->flags & OBJECT_REPORTED) &&
1756 		    unreferenced_object(object))
1757 			__paint_it(object, KMEMLEAK_GREY);
1758 		raw_spin_unlock_irqrestore(&object->lock, flags);
1759 	}
1760 	rcu_read_unlock();
1761 
1762 	kmemleak_found_leaks = false;
1763 }
1764 
1765 static void __kmemleak_do_cleanup(void);
1766 
1767 /*
1768  * File write operation to configure kmemleak at run-time. The following
1769  * commands can be written to the /sys/kernel/debug/kmemleak file:
1770  *   off	- disable kmemleak (irreversible)
1771  *   stack=on	- enable the task stacks scanning
1772  *   stack=off	- disable the tasks stacks scanning
1773  *   scan=on	- start the automatic memory scanning thread
1774  *   scan=off	- stop the automatic memory scanning thread
1775  *   scan=...	- set the automatic memory scanning period in seconds (0 to
1776  *		  disable it)
1777  *   scan	- trigger a memory scan
1778  *   clear	- mark all current reported unreferenced kmemleak objects as
1779  *		  grey to ignore printing them, or free all kmemleak objects
1780  *		  if kmemleak has been disabled.
1781  *   dump=...	- dump information about the object found at the given address
1782  */
kmemleak_write(struct file * file,const char __user * user_buf,size_t size,loff_t * ppos)1783 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1784 			      size_t size, loff_t *ppos)
1785 {
1786 	char buf[64];
1787 	int buf_size;
1788 	int ret;
1789 
1790 	buf_size = min(size, (sizeof(buf) - 1));
1791 	if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1792 		return -EFAULT;
1793 	buf[buf_size] = 0;
1794 
1795 	ret = mutex_lock_interruptible(&scan_mutex);
1796 	if (ret < 0)
1797 		return ret;
1798 
1799 	if (strncmp(buf, "clear", 5) == 0) {
1800 		if (kmemleak_enabled)
1801 			kmemleak_clear();
1802 		else
1803 			__kmemleak_do_cleanup();
1804 		goto out;
1805 	}
1806 
1807 	if (!kmemleak_enabled) {
1808 		ret = -EPERM;
1809 		goto out;
1810 	}
1811 
1812 	if (strncmp(buf, "off", 3) == 0)
1813 		kmemleak_disable();
1814 	else if (strncmp(buf, "stack=on", 8) == 0)
1815 		kmemleak_stack_scan = 1;
1816 	else if (strncmp(buf, "stack=off", 9) == 0)
1817 		kmemleak_stack_scan = 0;
1818 	else if (strncmp(buf, "scan=on", 7) == 0)
1819 		start_scan_thread();
1820 	else if (strncmp(buf, "scan=off", 8) == 0)
1821 		stop_scan_thread();
1822 	else if (strncmp(buf, "scan=", 5) == 0) {
1823 		unsigned long secs;
1824 
1825 		ret = kstrtoul(buf + 5, 0, &secs);
1826 		if (ret < 0)
1827 			goto out;
1828 		stop_scan_thread();
1829 		if (secs) {
1830 			jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1831 			start_scan_thread();
1832 		}
1833 	} else if (strncmp(buf, "scan", 4) == 0)
1834 		kmemleak_scan();
1835 	else if (strncmp(buf, "dump=", 5) == 0)
1836 		ret = dump_str_object_info(buf + 5);
1837 	else
1838 		ret = -EINVAL;
1839 
1840 out:
1841 	mutex_unlock(&scan_mutex);
1842 	if (ret < 0)
1843 		return ret;
1844 
1845 	/* ignore the rest of the buffer, only one command at a time */
1846 	*ppos += size;
1847 	return size;
1848 }
1849 
1850 static const struct file_operations kmemleak_fops = {
1851 	.owner		= THIS_MODULE,
1852 	.open		= kmemleak_open,
1853 	.read		= seq_read,
1854 	.write		= kmemleak_write,
1855 	.llseek		= seq_lseek,
1856 	.release	= seq_release,
1857 };
1858 
__kmemleak_do_cleanup(void)1859 static void __kmemleak_do_cleanup(void)
1860 {
1861 	struct kmemleak_object *object, *tmp;
1862 
1863 	/*
1864 	 * Kmemleak has already been disabled, no need for RCU list traversal
1865 	 * or kmemleak_lock held.
1866 	 */
1867 	list_for_each_entry_safe(object, tmp, &object_list, object_list) {
1868 		__remove_object(object);
1869 		__delete_object(object);
1870 	}
1871 }
1872 
1873 /*
1874  * Stop the memory scanning thread and free the kmemleak internal objects if
1875  * no previous scan thread (otherwise, kmemleak may still have some useful
1876  * information on memory leaks).
1877  */
kmemleak_do_cleanup(struct work_struct * work)1878 static void kmemleak_do_cleanup(struct work_struct *work)
1879 {
1880 	stop_scan_thread();
1881 
1882 	mutex_lock(&scan_mutex);
1883 	/*
1884 	 * Once it is made sure that kmemleak_scan has stopped, it is safe to no
1885 	 * longer track object freeing. Ordering of the scan thread stopping and
1886 	 * the memory accesses below is guaranteed by the kthread_stop()
1887 	 * function.
1888 	 */
1889 	kmemleak_free_enabled = 0;
1890 	mutex_unlock(&scan_mutex);
1891 
1892 	if (!kmemleak_found_leaks)
1893 		__kmemleak_do_cleanup();
1894 	else
1895 		pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
1896 }
1897 
1898 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1899 
1900 /*
1901  * Disable kmemleak. No memory allocation/freeing will be traced once this
1902  * function is called. Disabling kmemleak is an irreversible operation.
1903  */
kmemleak_disable(void)1904 static void kmemleak_disable(void)
1905 {
1906 	/* atomically check whether it was already invoked */
1907 	if (cmpxchg(&kmemleak_error, 0, 1))
1908 		return;
1909 
1910 	/* stop any memory operation tracing */
1911 	kmemleak_enabled = 0;
1912 
1913 	/* check whether it is too early for a kernel thread */
1914 	if (kmemleak_initialized)
1915 		schedule_work(&cleanup_work);
1916 	else
1917 		kmemleak_free_enabled = 0;
1918 
1919 	pr_info("Kernel memory leak detector disabled\n");
1920 }
1921 
1922 /*
1923  * Allow boot-time kmemleak disabling (enabled by default).
1924  */
kmemleak_boot_config(char * str)1925 static int __init kmemleak_boot_config(char *str)
1926 {
1927 	if (!str)
1928 		return -EINVAL;
1929 	if (strcmp(str, "off") == 0)
1930 		kmemleak_disable();
1931 	else if (strcmp(str, "on") == 0)
1932 		kmemleak_skip_disable = 1;
1933 	else
1934 		return -EINVAL;
1935 	return 0;
1936 }
1937 early_param("kmemleak", kmemleak_boot_config);
1938 
1939 /*
1940  * Kmemleak initialization.
1941  */
kmemleak_init(void)1942 void __init kmemleak_init(void)
1943 {
1944 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1945 	if (!kmemleak_skip_disable) {
1946 		kmemleak_disable();
1947 		return;
1948 	}
1949 #endif
1950 
1951 	if (kmemleak_error)
1952 		return;
1953 
1954 	jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1955 	jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1956 
1957 	object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1958 	scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1959 
1960 	/* register the data/bss sections */
1961 	create_object((unsigned long)_sdata, _edata - _sdata,
1962 		      KMEMLEAK_GREY, GFP_ATOMIC);
1963 	create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
1964 		      KMEMLEAK_GREY, GFP_ATOMIC);
1965 	/* only register .data..ro_after_init if not within .data */
1966 	if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
1967 		create_object((unsigned long)__start_ro_after_init,
1968 			      __end_ro_after_init - __start_ro_after_init,
1969 			      KMEMLEAK_GREY, GFP_ATOMIC);
1970 }
1971 
1972 /*
1973  * Late initialization function.
1974  */
kmemleak_late_init(void)1975 static int __init kmemleak_late_init(void)
1976 {
1977 	kmemleak_initialized = 1;
1978 
1979 	debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
1980 
1981 	if (kmemleak_error) {
1982 		/*
1983 		 * Some error occurred and kmemleak was disabled. There is a
1984 		 * small chance that kmemleak_disable() was called immediately
1985 		 * after setting kmemleak_initialized and we may end up with
1986 		 * two clean-up threads but serialized by scan_mutex.
1987 		 */
1988 		schedule_work(&cleanup_work);
1989 		return -ENOMEM;
1990 	}
1991 
1992 	if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
1993 		mutex_lock(&scan_mutex);
1994 		start_scan_thread();
1995 		mutex_unlock(&scan_mutex);
1996 	}
1997 
1998 	pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
1999 		mem_pool_free_count);
2000 
2001 	return 0;
2002 }
2003 late_initcall(kmemleak_late_init);
2004