• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KFENCE guarded object allocator and fault handling.
4  *
5  * Copyright (C) 2020, Google LLC.
6  */
7 
8 #define pr_fmt(fmt) "kfence: " fmt
9 
10 #include <linux/atomic.h>
11 #include <linux/bug.h>
12 #include <linux/debugfs.h>
13 #include <linux/hash.h>
14 #include <linux/irq_work.h>
15 #include <linux/jhash.h>
16 #include <linux/kcsan-checks.h>
17 #include <linux/kfence.h>
18 #include <linux/kmemleak.h>
19 #include <linux/list.h>
20 #include <linux/lockdep.h>
21 #include <linux/log2.h>
22 #include <linux/memblock.h>
23 #include <linux/moduleparam.h>
24 #include <linux/random.h>
25 #include <linux/rcupdate.h>
26 #include <linux/sched/clock.h>
27 #include <linux/sched/sysctl.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/string.h>
32 
33 #include <asm/kfence.h>
34 
35 #include "kfence.h"
36 
37 /* Disables KFENCE on the first warning assuming an irrecoverable error. */
38 #define KFENCE_WARN_ON(cond)                                                   \
39 	({                                                                     \
40 		const bool __cond = WARN_ON(cond);                             \
41 		if (unlikely(__cond))                                          \
42 			WRITE_ONCE(kfence_enabled, false);                     \
43 		__cond;                                                        \
44 	})
45 
46 /* === Data ================================================================= */
47 
48 static bool kfence_enabled __read_mostly;
49 
50 static unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
51 
52 #ifdef MODULE_PARAM_PREFIX
53 #undef MODULE_PARAM_PREFIX
54 #endif
55 #define MODULE_PARAM_PREFIX "kfence."
56 
param_set_sample_interval(const char * val,const struct kernel_param * kp)57 static int param_set_sample_interval(const char *val, const struct kernel_param *kp)
58 {
59 	unsigned long num;
60 	int ret = kstrtoul(val, 0, &num);
61 
62 	if (ret < 0)
63 		return ret;
64 
65 	if (!num) /* Using 0 to indicate KFENCE is disabled. */
66 		WRITE_ONCE(kfence_enabled, false);
67 	else if (!READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING)
68 		return -EINVAL; /* Cannot (re-)enable KFENCE on-the-fly. */
69 
70 	*((unsigned long *)kp->arg) = num;
71 	return 0;
72 }
73 
param_get_sample_interval(char * buffer,const struct kernel_param * kp)74 static int param_get_sample_interval(char *buffer, const struct kernel_param *kp)
75 {
76 	if (!READ_ONCE(kfence_enabled))
77 		return sprintf(buffer, "0\n");
78 
79 	return param_get_ulong(buffer, kp);
80 }
81 
82 static const struct kernel_param_ops sample_interval_param_ops = {
83 	.set = param_set_sample_interval,
84 	.get = param_get_sample_interval,
85 };
86 module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600);
87 
88 /* Pool usage% threshold when currently covered allocations are skipped. */
89 static unsigned long kfence_skip_covered_thresh __read_mostly = 75;
90 module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644);
91 
92 /* The pool of pages used for guard pages and objects. */
93 char *__kfence_pool __ro_after_init;
94 EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */
95 
96 /*
97  * Per-object metadata, with one-to-one mapping of object metadata to
98  * backing pages (in __kfence_pool).
99  */
100 static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0);
101 struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
102 
103 /* Freelist with available objects. */
104 static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist);
105 static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
106 
107 /*
108  * The static key to set up a KFENCE allocation; or if static keys are not used
109  * to gate allocations, to avoid a load and compare if KFENCE is disabled.
110  */
111 DEFINE_STATIC_KEY_FALSE(kfence_allocation_key);
112 
113 /* Gates the allocation, ensuring only one succeeds in a given period. */
114 atomic_t kfence_allocation_gate = ATOMIC_INIT(1);
115 
116 /*
117  * A Counting Bloom filter of allocation coverage: limits currently covered
118  * allocations of the same source filling up the pool.
119  *
120  * Assuming a range of 15%-85% unique allocations in the pool at any point in
121  * time, the below parameters provide a probablity of 0.02-0.33 for false
122  * positive hits respectively:
123  *
124  *	P(alloc_traces) = (1 - e^(-HNUM * (alloc_traces / SIZE)) ^ HNUM
125  */
126 #define ALLOC_COVERED_HNUM	2
127 #define ALLOC_COVERED_ORDER	(const_ilog2(CONFIG_KFENCE_NUM_OBJECTS) + 2)
128 #define ALLOC_COVERED_SIZE	(1 << ALLOC_COVERED_ORDER)
129 #define ALLOC_COVERED_HNEXT(h)	hash_32(h, ALLOC_COVERED_ORDER)
130 #define ALLOC_COVERED_MASK	(ALLOC_COVERED_SIZE - 1)
131 static atomic_t alloc_covered[ALLOC_COVERED_SIZE];
132 
133 /* Stack depth used to determine uniqueness of an allocation. */
134 #define UNIQUE_ALLOC_STACK_DEPTH ((size_t)8)
135 
136 /*
137  * Randomness for stack hashes, making the same collisions across reboots and
138  * different machines less likely.
139  */
140 static u32 stack_hash_seed __ro_after_init;
141 
142 /* Statistics counters for debugfs. */
143 enum kfence_counter_id {
144 	KFENCE_COUNTER_ALLOCATED,
145 	KFENCE_COUNTER_ALLOCS,
146 	KFENCE_COUNTER_FREES,
147 	KFENCE_COUNTER_ZOMBIES,
148 	KFENCE_COUNTER_BUGS,
149 	KFENCE_COUNTER_SKIP_INCOMPAT,
150 	KFENCE_COUNTER_SKIP_CAPACITY,
151 	KFENCE_COUNTER_SKIP_COVERED,
152 	KFENCE_COUNTER_COUNT,
153 };
154 static atomic_long_t counters[KFENCE_COUNTER_COUNT];
155 static const char *const counter_names[] = {
156 	[KFENCE_COUNTER_ALLOCATED]	= "currently allocated",
157 	[KFENCE_COUNTER_ALLOCS]		= "total allocations",
158 	[KFENCE_COUNTER_FREES]		= "total frees",
159 	[KFENCE_COUNTER_ZOMBIES]	= "zombie allocations",
160 	[KFENCE_COUNTER_BUGS]		= "total bugs",
161 	[KFENCE_COUNTER_SKIP_INCOMPAT]	= "skipped allocations (incompatible)",
162 	[KFENCE_COUNTER_SKIP_CAPACITY]	= "skipped allocations (capacity)",
163 	[KFENCE_COUNTER_SKIP_COVERED]	= "skipped allocations (covered)",
164 };
165 static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT);
166 
167 /* === Internals ============================================================ */
168 
should_skip_covered(void)169 static inline bool should_skip_covered(void)
170 {
171 	unsigned long thresh = (CONFIG_KFENCE_NUM_OBJECTS * kfence_skip_covered_thresh) / 100;
172 
173 	return atomic_long_read(&counters[KFENCE_COUNTER_ALLOCATED]) > thresh;
174 }
175 
get_alloc_stack_hash(unsigned long * stack_entries,size_t num_entries)176 static u32 get_alloc_stack_hash(unsigned long *stack_entries, size_t num_entries)
177 {
178 	num_entries = min(num_entries, UNIQUE_ALLOC_STACK_DEPTH);
179 	num_entries = filter_irq_stacks(stack_entries, num_entries);
180 	return jhash(stack_entries, num_entries * sizeof(stack_entries[0]), stack_hash_seed);
181 }
182 
183 /*
184  * Adds (or subtracts) count @val for allocation stack trace hash
185  * @alloc_stack_hash from Counting Bloom filter.
186  */
alloc_covered_add(u32 alloc_stack_hash,int val)187 static void alloc_covered_add(u32 alloc_stack_hash, int val)
188 {
189 	int i;
190 
191 	for (i = 0; i < ALLOC_COVERED_HNUM; i++) {
192 		atomic_add(val, &alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]);
193 		alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash);
194 	}
195 }
196 
197 /*
198  * Returns true if the allocation stack trace hash @alloc_stack_hash is
199  * currently contained (non-zero count) in Counting Bloom filter.
200  */
alloc_covered_contains(u32 alloc_stack_hash)201 static bool alloc_covered_contains(u32 alloc_stack_hash)
202 {
203 	int i;
204 
205 	for (i = 0; i < ALLOC_COVERED_HNUM; i++) {
206 		if (!atomic_read(&alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]))
207 			return false;
208 		alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash);
209 	}
210 
211 	return true;
212 }
213 
kfence_protect(unsigned long addr)214 static bool kfence_protect(unsigned long addr)
215 {
216 	return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true));
217 }
218 
kfence_unprotect(unsigned long addr)219 static bool kfence_unprotect(unsigned long addr)
220 {
221 	return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
222 }
223 
metadata_to_pageaddr(const struct kfence_metadata * meta)224 static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
225 {
226 	unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
227 	unsigned long pageaddr = (unsigned long)&__kfence_pool[offset];
228 
229 	/* The checks do not affect performance; only called from slow-paths. */
230 
231 	/* Only call with a pointer into kfence_metadata. */
232 	if (KFENCE_WARN_ON(meta < kfence_metadata ||
233 			   meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS))
234 		return 0;
235 
236 	/*
237 	 * This metadata object only ever maps to 1 page; verify that the stored
238 	 * address is in the expected range.
239 	 */
240 	if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr))
241 		return 0;
242 
243 	return pageaddr;
244 }
245 
246 /*
247  * Update the object's metadata state, including updating the alloc/free stacks
248  * depending on the state transition.
249  */
250 static noinline void
metadata_update_state(struct kfence_metadata * meta,enum kfence_object_state next,unsigned long * stack_entries,size_t num_stack_entries)251 metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next,
252 		      unsigned long *stack_entries, size_t num_stack_entries)
253 {
254 	struct kfence_track *track =
255 		next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track;
256 
257 	lockdep_assert_held(&meta->lock);
258 
259 	if (stack_entries) {
260 		memcpy(track->stack_entries, stack_entries,
261 		       num_stack_entries * sizeof(stack_entries[0]));
262 	} else {
263 		/*
264 		 * Skip over 1 (this) functions; noinline ensures we do not
265 		 * accidentally skip over the caller by never inlining.
266 		 */
267 		num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
268 	}
269 	track->num_stack_entries = num_stack_entries;
270 	track->pid = task_pid_nr(current);
271 	track->cpu = raw_smp_processor_id();
272 	track->ts_nsec = local_clock(); /* Same source as printk timestamps. */
273 
274 	/*
275 	 * Pairs with READ_ONCE() in
276 	 *	kfence_shutdown_cache(),
277 	 *	kfence_handle_page_fault().
278 	 */
279 	WRITE_ONCE(meta->state, next);
280 }
281 
282 /* Write canary byte to @addr. */
set_canary_byte(u8 * addr)283 static inline bool set_canary_byte(u8 *addr)
284 {
285 	*addr = KFENCE_CANARY_PATTERN(addr);
286 	return true;
287 }
288 
289 /* Check canary byte at @addr. */
check_canary_byte(u8 * addr)290 static inline bool check_canary_byte(u8 *addr)
291 {
292 	if (likely(*addr == KFENCE_CANARY_PATTERN(addr)))
293 		return true;
294 
295 	atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
296 	kfence_report_error((unsigned long)addr, false, NULL, addr_to_metadata((unsigned long)addr),
297 			    KFENCE_ERROR_CORRUPTION);
298 	return false;
299 }
300 
301 /* __always_inline this to ensure we won't do an indirect call to fn. */
for_each_canary(const struct kfence_metadata * meta,bool (* fn)(u8 *))302 static __always_inline void for_each_canary(const struct kfence_metadata *meta, bool (*fn)(u8 *))
303 {
304 	const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
305 	unsigned long addr;
306 
307 	lockdep_assert_held(&meta->lock);
308 
309 	/*
310 	 * We'll iterate over each canary byte per-side until fn() returns
311 	 * false. However, we'll still iterate over the canary bytes to the
312 	 * right of the object even if there was an error in the canary bytes to
313 	 * the left of the object. Specifically, if check_canary_byte()
314 	 * generates an error, showing both sides might give more clues as to
315 	 * what the error is about when displaying which bytes were corrupted.
316 	 */
317 
318 	/* Apply to left of object. */
319 	for (addr = pageaddr; addr < meta->addr; addr++) {
320 		if (!fn((u8 *)addr))
321 			break;
322 	}
323 
324 	/* Apply to right of object. */
325 	for (addr = meta->addr + meta->size; addr < pageaddr + PAGE_SIZE; addr++) {
326 		if (!fn((u8 *)addr))
327 			break;
328 	}
329 }
330 
kfence_guarded_alloc(struct kmem_cache * cache,size_t size,gfp_t gfp,unsigned long * stack_entries,size_t num_stack_entries,u32 alloc_stack_hash)331 static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp,
332 				  unsigned long *stack_entries, size_t num_stack_entries,
333 				  u32 alloc_stack_hash)
334 {
335 	struct kfence_metadata *meta = NULL;
336 	unsigned long flags;
337 	struct page *page;
338 	void *addr;
339 
340 	/* Try to obtain a free object. */
341 	raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
342 	if (!list_empty(&kfence_freelist)) {
343 		meta = list_entry(kfence_freelist.next, struct kfence_metadata, list);
344 		list_del_init(&meta->list);
345 	}
346 	raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
347 	if (!meta) {
348 		atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_CAPACITY]);
349 		return NULL;
350 	}
351 
352 	if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) {
353 		/*
354 		 * This is extremely unlikely -- we are reporting on a
355 		 * use-after-free, which locked meta->lock, and the reporting
356 		 * code via printk calls kmalloc() which ends up in
357 		 * kfence_alloc() and tries to grab the same object that we're
358 		 * reporting on. While it has never been observed, lockdep does
359 		 * report that there is a possibility of deadlock. Fix it by
360 		 * using trylock and bailing out gracefully.
361 		 */
362 		raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
363 		/* Put the object back on the freelist. */
364 		list_add_tail(&meta->list, &kfence_freelist);
365 		raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
366 
367 		return NULL;
368 	}
369 
370 	meta->addr = metadata_to_pageaddr(meta);
371 	/* Unprotect if we're reusing this page. */
372 	if (meta->state == KFENCE_OBJECT_FREED)
373 		kfence_unprotect(meta->addr);
374 
375 	/*
376 	 * Note: for allocations made before RNG initialization, will always
377 	 * return zero. We still benefit from enabling KFENCE as early as
378 	 * possible, even when the RNG is not yet available, as this will allow
379 	 * KFENCE to detect bugs due to earlier allocations. The only downside
380 	 * is that the out-of-bounds accesses detected are deterministic for
381 	 * such allocations.
382 	 */
383 	if (prandom_u32_max(2)) {
384 		/* Allocate on the "right" side, re-calculate address. */
385 		meta->addr += PAGE_SIZE - size;
386 		meta->addr = ALIGN_DOWN(meta->addr, cache->align);
387 	}
388 
389 	addr = (void *)meta->addr;
390 
391 	/* Update remaining metadata. */
392 	metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED, stack_entries, num_stack_entries);
393 	/* Pairs with READ_ONCE() in kfence_shutdown_cache(). */
394 	WRITE_ONCE(meta->cache, cache);
395 	meta->size = size;
396 	meta->alloc_stack_hash = alloc_stack_hash;
397 
398 	for_each_canary(meta, set_canary_byte);
399 
400 	/* Set required struct page fields. */
401 	page = virt_to_page(meta->addr);
402 	page->slab_cache = cache;
403 	if (IS_ENABLED(CONFIG_SLUB))
404 		page->objects = 1;
405 	if (IS_ENABLED(CONFIG_SLAB))
406 		page->s_mem = addr;
407 
408 	raw_spin_unlock_irqrestore(&meta->lock, flags);
409 
410 	alloc_covered_add(alloc_stack_hash, 1);
411 
412 	/* Memory initialization. */
413 
414 	/*
415 	 * We check slab_want_init_on_alloc() ourselves, rather than letting
416 	 * SL*B do the initialization, as otherwise we might overwrite KFENCE's
417 	 * redzone.
418 	 */
419 	if (unlikely(slab_want_init_on_alloc(gfp, cache)))
420 		memzero_explicit(addr, size);
421 	if (cache->ctor)
422 		cache->ctor(addr);
423 
424 	if (CONFIG_KFENCE_STRESS_TEST_FAULTS && !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS))
425 		kfence_protect(meta->addr); /* Random "faults" by protecting the object. */
426 
427 	atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]);
428 	atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]);
429 
430 	return addr;
431 }
432 
kfence_guarded_free(void * addr,struct kfence_metadata * meta,bool zombie)433 static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie)
434 {
435 	struct kcsan_scoped_access assert_page_exclusive;
436 	unsigned long flags;
437 
438 	raw_spin_lock_irqsave(&meta->lock, flags);
439 
440 	if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) {
441 		/* Invalid or double-free, bail out. */
442 		atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
443 		kfence_report_error((unsigned long)addr, false, NULL, meta,
444 				    KFENCE_ERROR_INVALID_FREE);
445 		raw_spin_unlock_irqrestore(&meta->lock, flags);
446 		return;
447 	}
448 
449 	/* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */
450 	kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE,
451 				  KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT,
452 				  &assert_page_exclusive);
453 
454 	if (CONFIG_KFENCE_STRESS_TEST_FAULTS)
455 		kfence_unprotect((unsigned long)addr); /* To check canary bytes. */
456 
457 	/* Restore page protection if there was an OOB access. */
458 	if (meta->unprotected_page) {
459 		memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE);
460 		kfence_protect(meta->unprotected_page);
461 		meta->unprotected_page = 0;
462 	}
463 
464 	/* Check canary bytes for memory corruption. */
465 	for_each_canary(meta, check_canary_byte);
466 
467 	/*
468 	 * Clear memory if init-on-free is set. While we protect the page, the
469 	 * data is still there, and after a use-after-free is detected, we
470 	 * unprotect the page, so the data is still accessible.
471 	 */
472 	if (!zombie && unlikely(slab_want_init_on_free(meta->cache)))
473 		memzero_explicit(addr, meta->size);
474 
475 	/* Mark the object as freed. */
476 	metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
477 
478 	raw_spin_unlock_irqrestore(&meta->lock, flags);
479 
480 	alloc_covered_add(meta->alloc_stack_hash, -1);
481 
482 	/* Protect to detect use-after-frees. */
483 	kfence_protect((unsigned long)addr);
484 
485 	kcsan_end_scoped_access(&assert_page_exclusive);
486 	if (!zombie) {
487 		/* Add it to the tail of the freelist for reuse. */
488 		raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
489 		KFENCE_WARN_ON(!list_empty(&meta->list));
490 		list_add_tail(&meta->list, &kfence_freelist);
491 		raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
492 
493 		atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]);
494 		atomic_long_inc(&counters[KFENCE_COUNTER_FREES]);
495 	} else {
496 		/* See kfence_shutdown_cache(). */
497 		atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]);
498 	}
499 }
500 
rcu_guarded_free(struct rcu_head * h)501 static void rcu_guarded_free(struct rcu_head *h)
502 {
503 	struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head);
504 
505 	kfence_guarded_free((void *)meta->addr, meta, false);
506 }
507 
kfence_init_pool(void)508 static bool __init kfence_init_pool(void)
509 {
510 	unsigned long addr = (unsigned long)__kfence_pool;
511 	struct page *pages;
512 	int i;
513 	char *p;
514 
515 	if (!__kfence_pool)
516 		return false;
517 
518 	if (!arch_kfence_init_pool())
519 		goto err;
520 
521 	pages = virt_to_page(addr);
522 
523 	/*
524 	 * Set up object pages: they must have PG_slab set, to avoid freeing
525 	 * these as real pages.
526 	 *
527 	 * We also want to avoid inserting kfence_free() in the kfree()
528 	 * fast-path in SLUB, and therefore need to ensure kfree() correctly
529 	 * enters __slab_free() slow-path.
530 	 */
531 	for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
532 		struct page *page = &pages[i];
533 
534 		if (!i || (i % 2))
535 			continue;
536 
537 		/* Verify we do not have a compound head page. */
538 		if (WARN_ON(compound_head(&pages[i]) != &pages[i]))
539 			goto err;
540 
541 		__SetPageSlab(page);
542 #ifdef CONFIG_MEMCG
543 		page->memcg_data = (unsigned long)&kfence_metadata[i / 2 - 1].objcg |
544 				   MEMCG_DATA_OBJCGS;
545 #endif
546 	}
547 
548 	/*
549 	 * Protect the first 2 pages. The first page is mostly unnecessary, and
550 	 * merely serves as an extended guard page. However, adding one
551 	 * additional page in the beginning gives us an even number of pages,
552 	 * which simplifies the mapping of address to metadata index.
553 	 */
554 	for (i = 0; i < 2; i++) {
555 		if (unlikely(!kfence_protect(addr)))
556 			goto err;
557 
558 		addr += PAGE_SIZE;
559 	}
560 
561 	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
562 		struct kfence_metadata *meta = &kfence_metadata[i];
563 
564 		/* Initialize metadata. */
565 		INIT_LIST_HEAD(&meta->list);
566 		raw_spin_lock_init(&meta->lock);
567 		meta->state = KFENCE_OBJECT_UNUSED;
568 		meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */
569 		list_add_tail(&meta->list, &kfence_freelist);
570 
571 		/* Protect the right redzone. */
572 		if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
573 			goto err;
574 
575 		addr += 2 * PAGE_SIZE;
576 	}
577 
578 	/*
579 	 * The pool is live and will never be deallocated from this point on.
580 	 * Remove the pool object from the kmemleak object tree, as it would
581 	 * otherwise overlap with allocations returned by kfence_alloc(), which
582 	 * are registered with kmemleak through the slab post-alloc hook.
583 	 */
584 	kmemleak_free(__kfence_pool);
585 
586 	return true;
587 
588 err:
589 	/*
590 	 * Only release unprotected pages, and do not try to go back and change
591 	 * page attributes due to risk of failing to do so as well. If changing
592 	 * page attributes for some pages fails, it is very likely that it also
593 	 * fails for the first page, and therefore expect addr==__kfence_pool in
594 	 * most failure cases.
595 	 */
596 	for (p = (char *)addr; p < __kfence_pool + KFENCE_POOL_SIZE; p += PAGE_SIZE) {
597 		struct page *page = virt_to_page(p);
598 
599 		if (!PageSlab(page))
600 			continue;
601 #ifdef CONFIG_MEMCG
602 		page->memcg_data = 0;
603 #endif
604 		__ClearPageSlab(page);
605 	}
606 	memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
607 	__kfence_pool = NULL;
608 	return false;
609 }
610 
611 /* === DebugFS Interface ==================================================== */
612 
stats_show(struct seq_file * seq,void * v)613 static int stats_show(struct seq_file *seq, void *v)
614 {
615 	int i;
616 
617 	seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled));
618 	for (i = 0; i < KFENCE_COUNTER_COUNT; i++)
619 		seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i]));
620 
621 	return 0;
622 }
623 DEFINE_SHOW_ATTRIBUTE(stats);
624 
625 /*
626  * debugfs seq_file operations for /sys/kernel/debug/kfence/objects.
627  * start_object() and next_object() return the object index + 1, because NULL is used
628  * to stop iteration.
629  */
start_object(struct seq_file * seq,loff_t * pos)630 static void *start_object(struct seq_file *seq, loff_t *pos)
631 {
632 	if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
633 		return (void *)((long)*pos + 1);
634 	return NULL;
635 }
636 
stop_object(struct seq_file * seq,void * v)637 static void stop_object(struct seq_file *seq, void *v)
638 {
639 }
640 
next_object(struct seq_file * seq,void * v,loff_t * pos)641 static void *next_object(struct seq_file *seq, void *v, loff_t *pos)
642 {
643 	++*pos;
644 	if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
645 		return (void *)((long)*pos + 1);
646 	return NULL;
647 }
648 
show_object(struct seq_file * seq,void * v)649 static int show_object(struct seq_file *seq, void *v)
650 {
651 	struct kfence_metadata *meta = &kfence_metadata[(long)v - 1];
652 	unsigned long flags;
653 
654 	raw_spin_lock_irqsave(&meta->lock, flags);
655 	kfence_print_object(seq, meta);
656 	raw_spin_unlock_irqrestore(&meta->lock, flags);
657 	seq_puts(seq, "---------------------------------\n");
658 
659 	return 0;
660 }
661 
662 static const struct seq_operations object_seqops = {
663 	.start = start_object,
664 	.next = next_object,
665 	.stop = stop_object,
666 	.show = show_object,
667 };
668 
open_objects(struct inode * inode,struct file * file)669 static int open_objects(struct inode *inode, struct file *file)
670 {
671 	return seq_open(file, &object_seqops);
672 }
673 
674 static const struct file_operations objects_fops = {
675 	.open = open_objects,
676 	.read = seq_read,
677 	.llseek = seq_lseek,
678 	.release = seq_release,
679 };
680 
kfence_debugfs_init(void)681 static int kfence_debugfs_init(void)
682 {
683 	struct dentry *kfence_dir;
684 
685 	if (!READ_ONCE(kfence_enabled))
686 		return 0;
687 
688 	kfence_dir = debugfs_create_dir("kfence", NULL);
689 	debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops);
690 	debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops);
691 	return 0;
692 }
693 
694 late_initcall(kfence_debugfs_init);
695 
696 /* === Allocation Gate Timer ================================================ */
697 
698 #ifdef CONFIG_KFENCE_STATIC_KEYS
699 /* Wait queue to wake up allocation-gate timer task. */
700 static DECLARE_WAIT_QUEUE_HEAD(allocation_wait);
701 
wake_up_kfence_timer(struct irq_work * work)702 static void wake_up_kfence_timer(struct irq_work *work)
703 {
704 	wake_up(&allocation_wait);
705 }
706 static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer);
707 #endif
708 
709 /*
710  * Set up delayed work, which will enable and disable the static key. We need to
711  * use a work queue (rather than a simple timer), since enabling and disabling a
712  * static key cannot be done from an interrupt.
713  *
714  * Note: Toggling a static branch currently causes IPIs, and here we'll end up
715  * with a total of 2 IPIs to all CPUs. If this ends up a problem in future (with
716  * more aggressive sampling intervals), we could get away with a variant that
717  * avoids IPIs, at the cost of not immediately capturing allocations if the
718  * instructions remain cached.
719  */
720 static struct delayed_work kfence_timer;
toggle_allocation_gate(struct work_struct * work)721 static void toggle_allocation_gate(struct work_struct *work)
722 {
723 	if (!READ_ONCE(kfence_enabled))
724 		return;
725 
726 	atomic_set(&kfence_allocation_gate, 0);
727 #ifdef CONFIG_KFENCE_STATIC_KEYS
728 	/* Enable static key, and await allocation to happen. */
729 	static_branch_enable(&kfence_allocation_key);
730 
731 	if (sysctl_hung_task_timeout_secs) {
732 		/*
733 		 * During low activity with no allocations we might wait a
734 		 * while; let's avoid the hung task warning.
735 		 */
736 		wait_event_idle_timeout(allocation_wait, atomic_read(&kfence_allocation_gate),
737 					sysctl_hung_task_timeout_secs * HZ / 2);
738 	} else {
739 		wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate));
740 	}
741 
742 	/* Disable static key and reset timer. */
743 	static_branch_disable(&kfence_allocation_key);
744 #endif
745 	queue_delayed_work(system_unbound_wq, &kfence_timer,
746 			   msecs_to_jiffies(kfence_sample_interval));
747 }
748 static DECLARE_DELAYED_WORK(kfence_timer, toggle_allocation_gate);
749 
750 /* === Public interface ===================================================== */
751 
kfence_alloc_pool(void)752 void __init kfence_alloc_pool(void)
753 {
754 	if (!kfence_sample_interval)
755 		return;
756 
757 	/* if the pool has already been initialized by arch, skip the below. */
758 	if (__kfence_pool)
759 		return;
760 
761 	__kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
762 
763 	if (!__kfence_pool)
764 		pr_err("failed to allocate pool\n");
765 }
766 
kfence_init(void)767 void __init kfence_init(void)
768 {
769 	/* Setting kfence_sample_interval to 0 on boot disables KFENCE. */
770 	if (!kfence_sample_interval)
771 		return;
772 
773 	stack_hash_seed = (u32)random_get_entropy();
774 	if (!kfence_init_pool()) {
775 		pr_err("%s failed\n", __func__);
776 		return;
777 	}
778 
779 	if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS))
780 		static_branch_enable(&kfence_allocation_key);
781 	WRITE_ONCE(kfence_enabled, true);
782 	queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
783 	pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
784 		CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
785 		(void *)(__kfence_pool + KFENCE_POOL_SIZE));
786 }
787 
kfence_shutdown_cache(struct kmem_cache * s)788 void kfence_shutdown_cache(struct kmem_cache *s)
789 {
790 	unsigned long flags;
791 	struct kfence_metadata *meta;
792 	int i;
793 
794 	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
795 		bool in_use;
796 
797 		meta = &kfence_metadata[i];
798 
799 		/*
800 		 * If we observe some inconsistent cache and state pair where we
801 		 * should have returned false here, cache destruction is racing
802 		 * with either kmem_cache_alloc() or kmem_cache_free(). Taking
803 		 * the lock will not help, as different critical section
804 		 * serialization will have the same outcome.
805 		 */
806 		if (READ_ONCE(meta->cache) != s ||
807 		    READ_ONCE(meta->state) != KFENCE_OBJECT_ALLOCATED)
808 			continue;
809 
810 		raw_spin_lock_irqsave(&meta->lock, flags);
811 		in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED;
812 		raw_spin_unlock_irqrestore(&meta->lock, flags);
813 
814 		if (in_use) {
815 			/*
816 			 * This cache still has allocations, and we should not
817 			 * release them back into the freelist so they can still
818 			 * safely be used and retain the kernel's default
819 			 * behaviour of keeping the allocations alive (leak the
820 			 * cache); however, they effectively become "zombie
821 			 * allocations" as the KFENCE objects are the only ones
822 			 * still in use and the owning cache is being destroyed.
823 			 *
824 			 * We mark them freed, so that any subsequent use shows
825 			 * more useful error messages that will include stack
826 			 * traces of the user of the object, the original
827 			 * allocation, and caller to shutdown_cache().
828 			 */
829 			kfence_guarded_free((void *)meta->addr, meta, /*zombie=*/true);
830 		}
831 	}
832 
833 	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
834 		meta = &kfence_metadata[i];
835 
836 		/* See above. */
837 		if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED)
838 			continue;
839 
840 		raw_spin_lock_irqsave(&meta->lock, flags);
841 		if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED)
842 			meta->cache = NULL;
843 		raw_spin_unlock_irqrestore(&meta->lock, flags);
844 	}
845 }
846 
__kfence_alloc(struct kmem_cache * s,size_t size,gfp_t flags)847 void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
848 {
849 	unsigned long stack_entries[KFENCE_STACK_DEPTH];
850 	size_t num_stack_entries;
851 	u32 alloc_stack_hash;
852 
853 	/*
854 	 * Perform size check before switching kfence_allocation_gate, so that
855 	 * we don't disable KFENCE without making an allocation.
856 	 */
857 	if (size > PAGE_SIZE) {
858 		atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
859 		return NULL;
860 	}
861 
862 	/*
863 	 * Skip allocations from non-default zones, including DMA. We cannot
864 	 * guarantee that pages in the KFENCE pool will have the requested
865 	 * properties (e.g. reside in DMAable memory).
866 	 */
867 	if ((flags & GFP_ZONEMASK) ||
868 	    (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) {
869 		atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
870 		return NULL;
871 	}
872 
873 	if (atomic_inc_return(&kfence_allocation_gate) > 1)
874 		return NULL;
875 #ifdef CONFIG_KFENCE_STATIC_KEYS
876 	/*
877 	 * waitqueue_active() is fully ordered after the update of
878 	 * kfence_allocation_gate per atomic_inc_return().
879 	 */
880 	if (waitqueue_active(&allocation_wait)) {
881 		/*
882 		 * Calling wake_up() here may deadlock when allocations happen
883 		 * from within timer code. Use an irq_work to defer it.
884 		 */
885 		irq_work_queue(&wake_up_kfence_timer_work);
886 	}
887 #endif
888 
889 	if (!READ_ONCE(kfence_enabled))
890 		return NULL;
891 
892 	num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0);
893 
894 	/*
895 	 * Do expensive check for coverage of allocation in slow-path after
896 	 * allocation_gate has already become non-zero, even though it might
897 	 * mean not making any allocation within a given sample interval.
898 	 *
899 	 * This ensures reasonable allocation coverage when the pool is almost
900 	 * full, including avoiding long-lived allocations of the same source
901 	 * filling up the pool (e.g. pagecache allocations).
902 	 */
903 	alloc_stack_hash = get_alloc_stack_hash(stack_entries, num_stack_entries);
904 	if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash)) {
905 		atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_COVERED]);
906 		return NULL;
907 	}
908 
909 	return kfence_guarded_alloc(s, size, flags, stack_entries, num_stack_entries,
910 				    alloc_stack_hash);
911 }
912 
kfence_ksize(const void * addr)913 size_t kfence_ksize(const void *addr)
914 {
915 	const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
916 
917 	/*
918 	 * Read locklessly -- if there is a race with __kfence_alloc(), this is
919 	 * either a use-after-free or invalid access.
920 	 */
921 	return meta ? meta->size : 0;
922 }
923 
kfence_object_start(const void * addr)924 void *kfence_object_start(const void *addr)
925 {
926 	const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
927 
928 	/*
929 	 * Read locklessly -- if there is a race with __kfence_alloc(), this is
930 	 * either a use-after-free or invalid access.
931 	 */
932 	return meta ? (void *)meta->addr : NULL;
933 }
934 
__kfence_free(void * addr)935 void __kfence_free(void *addr)
936 {
937 	struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
938 
939 #ifdef CONFIG_MEMCG
940 	KFENCE_WARN_ON(meta->objcg);
941 #endif
942 	/*
943 	 * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing
944 	 * the object, as the object page may be recycled for other-typed
945 	 * objects once it has been freed. meta->cache may be NULL if the cache
946 	 * was destroyed.
947 	 */
948 	if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU)))
949 		call_rcu(&meta->rcu_head, rcu_guarded_free);
950 	else
951 		kfence_guarded_free(addr, meta, false);
952 }
953 
kfence_handle_page_fault(unsigned long addr,bool is_write,struct pt_regs * regs)954 bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs)
955 {
956 	const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE;
957 	struct kfence_metadata *to_report = NULL;
958 	enum kfence_error_type error_type;
959 	unsigned long flags;
960 
961 	if (!is_kfence_address((void *)addr))
962 		return false;
963 
964 	if (!READ_ONCE(kfence_enabled)) /* If disabled at runtime ... */
965 		return kfence_unprotect(addr); /* ... unprotect and proceed. */
966 
967 	atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
968 
969 	if (page_index % 2) {
970 		/* This is a redzone, report a buffer overflow. */
971 		struct kfence_metadata *meta;
972 		int distance = 0;
973 
974 		meta = addr_to_metadata(addr - PAGE_SIZE);
975 		if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
976 			to_report = meta;
977 			/* Data race ok; distance calculation approximate. */
978 			distance = addr - data_race(meta->addr + meta->size);
979 		}
980 
981 		meta = addr_to_metadata(addr + PAGE_SIZE);
982 		if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
983 			/* Data race ok; distance calculation approximate. */
984 			if (!to_report || distance > data_race(meta->addr) - addr)
985 				to_report = meta;
986 		}
987 
988 		if (!to_report)
989 			goto out;
990 
991 		raw_spin_lock_irqsave(&to_report->lock, flags);
992 		to_report->unprotected_page = addr;
993 		error_type = KFENCE_ERROR_OOB;
994 
995 		/*
996 		 * If the object was freed before we took the look we can still
997 		 * report this as an OOB -- the report will simply show the
998 		 * stacktrace of the free as well.
999 		 */
1000 	} else {
1001 		to_report = addr_to_metadata(addr);
1002 		if (!to_report)
1003 			goto out;
1004 
1005 		raw_spin_lock_irqsave(&to_report->lock, flags);
1006 		error_type = KFENCE_ERROR_UAF;
1007 		/*
1008 		 * We may race with __kfence_alloc(), and it is possible that a
1009 		 * freed object may be reallocated. We simply report this as a
1010 		 * use-after-free, with the stack trace showing the place where
1011 		 * the object was re-allocated.
1012 		 */
1013 	}
1014 
1015 out:
1016 	if (to_report) {
1017 		kfence_report_error(addr, is_write, regs, to_report, error_type);
1018 		raw_spin_unlock_irqrestore(&to_report->lock, flags);
1019 	} else {
1020 		/* This may be a UAF or OOB access, but we can't be sure. */
1021 		kfence_report_error(addr, is_write, regs, NULL, KFENCE_ERROR_INVALID);
1022 	}
1023 
1024 	return kfence_unprotect(addr); /* Unprotect and let access proceed. */
1025 }
1026