1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * KFENCE guarded object allocator and fault handling.
4 *
5 * Copyright (C) 2020, Google LLC.
6 */
7
8 #define pr_fmt(fmt) "kfence: " fmt
9
10 #include <linux/atomic.h>
11 #include <linux/bug.h>
12 #include <linux/debugfs.h>
13 #include <linux/irq_work.h>
14 #include <linux/kcsan-checks.h>
15 #include <linux/kfence.h>
16 #include <linux/kmemleak.h>
17 #include <linux/list.h>
18 #include <linux/lockdep.h>
19 #include <linux/memblock.h>
20 #include <linux/moduleparam.h>
21 #include <linux/random.h>
22 #include <linux/rcupdate.h>
23 #include <linux/sched/sysctl.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/spinlock.h>
27 #include <linux/string.h>
28
29 #include <asm/kfence.h>
30
31 #include "kfence.h"
32
33 /* Disables KFENCE on the first warning assuming an irrecoverable error. */
34 #define KFENCE_WARN_ON(cond) \
35 ({ \
36 const bool __cond = WARN_ON(cond); \
37 if (unlikely(__cond)) \
38 WRITE_ONCE(kfence_enabled, false); \
39 __cond; \
40 })
41
42 /* === Data ================================================================= */
43
44 static bool kfence_enabled __read_mostly;
45
46 static unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
47
48 #ifdef MODULE_PARAM_PREFIX
49 #undef MODULE_PARAM_PREFIX
50 #endif
51 #define MODULE_PARAM_PREFIX "kfence."
52
param_set_sample_interval(const char * val,const struct kernel_param * kp)53 static int param_set_sample_interval(const char *val, const struct kernel_param *kp)
54 {
55 unsigned long num;
56 int ret = kstrtoul(val, 0, &num);
57
58 if (ret < 0)
59 return ret;
60
61 if (!num) /* Using 0 to indicate KFENCE is disabled. */
62 WRITE_ONCE(kfence_enabled, false);
63 else if (!READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING)
64 return -EINVAL; /* Cannot (re-)enable KFENCE on-the-fly. */
65
66 *((unsigned long *)kp->arg) = num;
67 return 0;
68 }
69
param_get_sample_interval(char * buffer,const struct kernel_param * kp)70 static int param_get_sample_interval(char *buffer, const struct kernel_param *kp)
71 {
72 if (!READ_ONCE(kfence_enabled))
73 return sprintf(buffer, "0\n");
74
75 return param_get_ulong(buffer, kp);
76 }
77
78 static const struct kernel_param_ops sample_interval_param_ops = {
79 .set = param_set_sample_interval,
80 .get = param_get_sample_interval,
81 };
82 module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600);
83
84 /* The pool of pages used for guard pages and objects. */
85 char *__kfence_pool __ro_after_init;
86 EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */
87
88 /*
89 * Per-object metadata, with one-to-one mapping of object metadata to
90 * backing pages (in __kfence_pool).
91 */
92 static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0);
93 struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
94
95 /* Freelist with available objects. */
96 static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist);
97 static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
98
99 #ifdef CONFIG_KFENCE_STATIC_KEYS
100 /* The static key to set up a KFENCE allocation. */
101 DEFINE_STATIC_KEY_FALSE(kfence_allocation_key);
102 #endif
103
104 /* Gates the allocation, ensuring only one succeeds in a given period. */
105 atomic_t kfence_allocation_gate = ATOMIC_INIT(1);
106
107 /* Statistics counters for debugfs. */
108 enum kfence_counter_id {
109 KFENCE_COUNTER_ALLOCATED,
110 KFENCE_COUNTER_ALLOCS,
111 KFENCE_COUNTER_FREES,
112 KFENCE_COUNTER_ZOMBIES,
113 KFENCE_COUNTER_BUGS,
114 KFENCE_COUNTER_COUNT,
115 };
116 static atomic_long_t counters[KFENCE_COUNTER_COUNT];
117 static const char *const counter_names[] = {
118 [KFENCE_COUNTER_ALLOCATED] = "currently allocated",
119 [KFENCE_COUNTER_ALLOCS] = "total allocations",
120 [KFENCE_COUNTER_FREES] = "total frees",
121 [KFENCE_COUNTER_ZOMBIES] = "zombie allocations",
122 [KFENCE_COUNTER_BUGS] = "total bugs",
123 };
124 static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT);
125
126 /* === Internals ============================================================ */
127
kfence_protect(unsigned long addr)128 static bool kfence_protect(unsigned long addr)
129 {
130 return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true));
131 }
132
kfence_unprotect(unsigned long addr)133 static bool kfence_unprotect(unsigned long addr)
134 {
135 return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
136 }
137
addr_to_metadata(unsigned long addr)138 static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
139 {
140 long index;
141
142 /* The checks do not affect performance; only called from slow-paths. */
143
144 if (!is_kfence_address((void *)addr))
145 return NULL;
146
147 /*
148 * May be an invalid index if called with an address at the edge of
149 * __kfence_pool, in which case we would report an "invalid access"
150 * error.
151 */
152 index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
153 if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
154 return NULL;
155
156 return &kfence_metadata[index];
157 }
158
metadata_to_pageaddr(const struct kfence_metadata * meta)159 static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
160 {
161 unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
162 unsigned long pageaddr = (unsigned long)&__kfence_pool[offset];
163
164 /* The checks do not affect performance; only called from slow-paths. */
165
166 /* Only call with a pointer into kfence_metadata. */
167 if (KFENCE_WARN_ON(meta < kfence_metadata ||
168 meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS))
169 return 0;
170
171 /*
172 * This metadata object only ever maps to 1 page; verify that the stored
173 * address is in the expected range.
174 */
175 if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr))
176 return 0;
177
178 return pageaddr;
179 }
180
181 /*
182 * Update the object's metadata state, including updating the alloc/free stacks
183 * depending on the state transition.
184 */
metadata_update_state(struct kfence_metadata * meta,enum kfence_object_state next)185 static noinline void metadata_update_state(struct kfence_metadata *meta,
186 enum kfence_object_state next)
187 {
188 struct kfence_track *track =
189 next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track;
190
191 lockdep_assert_held(&meta->lock);
192
193 /*
194 * Skip over 1 (this) functions; noinline ensures we do not accidentally
195 * skip over the caller by never inlining.
196 */
197 track->num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
198 track->pid = task_pid_nr(current);
199
200 /*
201 * Pairs with READ_ONCE() in
202 * kfence_shutdown_cache(),
203 * kfence_handle_page_fault().
204 */
205 WRITE_ONCE(meta->state, next);
206 }
207
208 /* Write canary byte to @addr. */
set_canary_byte(u8 * addr)209 static inline bool set_canary_byte(u8 *addr)
210 {
211 *addr = KFENCE_CANARY_PATTERN(addr);
212 return true;
213 }
214
215 /* Check canary byte at @addr. */
check_canary_byte(u8 * addr)216 static inline bool check_canary_byte(u8 *addr)
217 {
218 if (likely(*addr == KFENCE_CANARY_PATTERN(addr)))
219 return true;
220
221 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
222 kfence_report_error((unsigned long)addr, false, NULL, addr_to_metadata((unsigned long)addr),
223 KFENCE_ERROR_CORRUPTION);
224 return false;
225 }
226
227 /* __always_inline this to ensure we won't do an indirect call to fn. */
for_each_canary(const struct kfence_metadata * meta,bool (* fn)(u8 *))228 static __always_inline void for_each_canary(const struct kfence_metadata *meta, bool (*fn)(u8 *))
229 {
230 const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
231 unsigned long addr;
232
233 lockdep_assert_held(&meta->lock);
234
235 /*
236 * We'll iterate over each canary byte per-side until fn() returns
237 * false. However, we'll still iterate over the canary bytes to the
238 * right of the object even if there was an error in the canary bytes to
239 * the left of the object. Specifically, if check_canary_byte()
240 * generates an error, showing both sides might give more clues as to
241 * what the error is about when displaying which bytes were corrupted.
242 */
243
244 /* Apply to left of object. */
245 for (addr = pageaddr; addr < meta->addr; addr++) {
246 if (!fn((u8 *)addr))
247 break;
248 }
249
250 /* Apply to right of object. */
251 for (addr = meta->addr + meta->size; addr < pageaddr + PAGE_SIZE; addr++) {
252 if (!fn((u8 *)addr))
253 break;
254 }
255 }
256
kfence_guarded_alloc(struct kmem_cache * cache,size_t size,gfp_t gfp)257 static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp)
258 {
259 struct kfence_metadata *meta = NULL;
260 unsigned long flags;
261 struct page *page;
262 void *addr;
263
264 /* Try to obtain a free object. */
265 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
266 if (!list_empty(&kfence_freelist)) {
267 meta = list_entry(kfence_freelist.next, struct kfence_metadata, list);
268 list_del_init(&meta->list);
269 }
270 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
271 if (!meta)
272 return NULL;
273
274 if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) {
275 /*
276 * This is extremely unlikely -- we are reporting on a
277 * use-after-free, which locked meta->lock, and the reporting
278 * code via printk calls kmalloc() which ends up in
279 * kfence_alloc() and tries to grab the same object that we're
280 * reporting on. While it has never been observed, lockdep does
281 * report that there is a possibility of deadlock. Fix it by
282 * using trylock and bailing out gracefully.
283 */
284 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
285 /* Put the object back on the freelist. */
286 list_add_tail(&meta->list, &kfence_freelist);
287 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
288
289 return NULL;
290 }
291
292 meta->addr = metadata_to_pageaddr(meta);
293 /* Unprotect if we're reusing this page. */
294 if (meta->state == KFENCE_OBJECT_FREED)
295 kfence_unprotect(meta->addr);
296
297 /*
298 * Note: for allocations made before RNG initialization, will always
299 * return zero. We still benefit from enabling KFENCE as early as
300 * possible, even when the RNG is not yet available, as this will allow
301 * KFENCE to detect bugs due to earlier allocations. The only downside
302 * is that the out-of-bounds accesses detected are deterministic for
303 * such allocations.
304 */
305 if (prandom_u32_max(2)) {
306 /* Allocate on the "right" side, re-calculate address. */
307 meta->addr += PAGE_SIZE - size;
308 meta->addr = ALIGN_DOWN(meta->addr, cache->align);
309 }
310
311 addr = (void *)meta->addr;
312
313 /* Update remaining metadata. */
314 metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED);
315 /* Pairs with READ_ONCE() in kfence_shutdown_cache(). */
316 WRITE_ONCE(meta->cache, cache);
317 meta->size = size;
318 for_each_canary(meta, set_canary_byte);
319
320 /* Set required struct page fields. */
321 page = virt_to_page(meta->addr);
322 page->slab_cache = cache;
323 if (IS_ENABLED(CONFIG_SLUB))
324 page->objects = 1;
325 if (IS_ENABLED(CONFIG_SLAB))
326 page->s_mem = addr;
327
328 raw_spin_unlock_irqrestore(&meta->lock, flags);
329
330 /* Memory initialization. */
331
332 /*
333 * We check slab_want_init_on_alloc() ourselves, rather than letting
334 * SL*B do the initialization, as otherwise we might overwrite KFENCE's
335 * redzone.
336 */
337 if (unlikely(slab_want_init_on_alloc(gfp, cache)))
338 memzero_explicit(addr, size);
339 if (cache->ctor)
340 cache->ctor(addr);
341
342 if (CONFIG_KFENCE_STRESS_TEST_FAULTS && !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS))
343 kfence_protect(meta->addr); /* Random "faults" by protecting the object. */
344
345 atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]);
346 atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]);
347
348 return addr;
349 }
350
kfence_guarded_free(void * addr,struct kfence_metadata * meta,bool zombie)351 static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie)
352 {
353 struct kcsan_scoped_access assert_page_exclusive;
354 unsigned long flags;
355
356 raw_spin_lock_irqsave(&meta->lock, flags);
357
358 if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) {
359 /* Invalid or double-free, bail out. */
360 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
361 kfence_report_error((unsigned long)addr, false, NULL, meta,
362 KFENCE_ERROR_INVALID_FREE);
363 raw_spin_unlock_irqrestore(&meta->lock, flags);
364 return;
365 }
366
367 /* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */
368 kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE,
369 KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT,
370 &assert_page_exclusive);
371
372 if (CONFIG_KFENCE_STRESS_TEST_FAULTS)
373 kfence_unprotect((unsigned long)addr); /* To check canary bytes. */
374
375 /* Restore page protection if there was an OOB access. */
376 if (meta->unprotected_page) {
377 memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE);
378 kfence_protect(meta->unprotected_page);
379 meta->unprotected_page = 0;
380 }
381
382 /* Check canary bytes for memory corruption. */
383 for_each_canary(meta, check_canary_byte);
384
385 /*
386 * Clear memory if init-on-free is set. While we protect the page, the
387 * data is still there, and after a use-after-free is detected, we
388 * unprotect the page, so the data is still accessible.
389 */
390 if (!zombie && unlikely(slab_want_init_on_free(meta->cache)))
391 memzero_explicit(addr, meta->size);
392
393 /* Mark the object as freed. */
394 metadata_update_state(meta, KFENCE_OBJECT_FREED);
395
396 raw_spin_unlock_irqrestore(&meta->lock, flags);
397
398 /* Protect to detect use-after-frees. */
399 kfence_protect((unsigned long)addr);
400
401 kcsan_end_scoped_access(&assert_page_exclusive);
402 if (!zombie) {
403 /* Add it to the tail of the freelist for reuse. */
404 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
405 KFENCE_WARN_ON(!list_empty(&meta->list));
406 list_add_tail(&meta->list, &kfence_freelist);
407 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
408
409 atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]);
410 atomic_long_inc(&counters[KFENCE_COUNTER_FREES]);
411 } else {
412 /* See kfence_shutdown_cache(). */
413 atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]);
414 }
415 }
416
rcu_guarded_free(struct rcu_head * h)417 static void rcu_guarded_free(struct rcu_head *h)
418 {
419 struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head);
420
421 kfence_guarded_free((void *)meta->addr, meta, false);
422 }
423
kfence_init_pool(void)424 static bool __init kfence_init_pool(void)
425 {
426 unsigned long addr = (unsigned long)__kfence_pool;
427 struct page *pages;
428 int i;
429
430 if (!__kfence_pool)
431 return false;
432
433 if (!arch_kfence_init_pool())
434 goto err;
435
436 pages = virt_to_page(addr);
437
438 /*
439 * Set up object pages: they must have PG_slab set, to avoid freeing
440 * these as real pages.
441 *
442 * We also want to avoid inserting kfence_free() in the kfree()
443 * fast-path in SLUB, and therefore need to ensure kfree() correctly
444 * enters __slab_free() slow-path.
445 */
446 for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
447 if (!i || (i % 2))
448 continue;
449
450 /* Verify we do not have a compound head page. */
451 if (WARN_ON(compound_head(&pages[i]) != &pages[i]))
452 goto err;
453
454 __SetPageSlab(&pages[i]);
455 }
456
457 /*
458 * Protect the first 2 pages. The first page is mostly unnecessary, and
459 * merely serves as an extended guard page. However, adding one
460 * additional page in the beginning gives us an even number of pages,
461 * which simplifies the mapping of address to metadata index.
462 */
463 for (i = 0; i < 2; i++) {
464 if (unlikely(!kfence_protect(addr)))
465 goto err;
466
467 addr += PAGE_SIZE;
468 }
469
470 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
471 struct kfence_metadata *meta = &kfence_metadata[i];
472
473 /* Initialize metadata. */
474 INIT_LIST_HEAD(&meta->list);
475 raw_spin_lock_init(&meta->lock);
476 meta->state = KFENCE_OBJECT_UNUSED;
477 meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */
478 list_add_tail(&meta->list, &kfence_freelist);
479
480 /* Protect the right redzone. */
481 if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
482 goto err;
483
484 addr += 2 * PAGE_SIZE;
485 }
486
487 /*
488 * The pool is live and will never be deallocated from this point on.
489 * Remove the pool object from the kmemleak object tree, as it would
490 * otherwise overlap with allocations returned by kfence_alloc(), which
491 * are registered with kmemleak through the slab post-alloc hook.
492 */
493 kmemleak_free(__kfence_pool);
494
495 return true;
496
497 err:
498 /*
499 * Only release unprotected pages, and do not try to go back and change
500 * page attributes due to risk of failing to do so as well. If changing
501 * page attributes for some pages fails, it is very likely that it also
502 * fails for the first page, and therefore expect addr==__kfence_pool in
503 * most failure cases.
504 */
505 memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
506 __kfence_pool = NULL;
507 return false;
508 }
509
510 /* === DebugFS Interface ==================================================== */
511
stats_show(struct seq_file * seq,void * v)512 static int stats_show(struct seq_file *seq, void *v)
513 {
514 int i;
515
516 seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled));
517 for (i = 0; i < KFENCE_COUNTER_COUNT; i++)
518 seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i]));
519
520 return 0;
521 }
522 DEFINE_SHOW_ATTRIBUTE(stats);
523
524 /*
525 * debugfs seq_file operations for /sys/kernel/debug/kfence/objects.
526 * start_object() and next_object() return the object index + 1, because NULL is used
527 * to stop iteration.
528 */
start_object(struct seq_file * seq,loff_t * pos)529 static void *start_object(struct seq_file *seq, loff_t *pos)
530 {
531 if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
532 return (void *)((long)*pos + 1);
533 return NULL;
534 }
535
stop_object(struct seq_file * seq,void * v)536 static void stop_object(struct seq_file *seq, void *v)
537 {
538 }
539
next_object(struct seq_file * seq,void * v,loff_t * pos)540 static void *next_object(struct seq_file *seq, void *v, loff_t *pos)
541 {
542 ++*pos;
543 if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
544 return (void *)((long)*pos + 1);
545 return NULL;
546 }
547
show_object(struct seq_file * seq,void * v)548 static int show_object(struct seq_file *seq, void *v)
549 {
550 struct kfence_metadata *meta = &kfence_metadata[(long)v - 1];
551 unsigned long flags;
552
553 raw_spin_lock_irqsave(&meta->lock, flags);
554 kfence_print_object(seq, meta);
555 raw_spin_unlock_irqrestore(&meta->lock, flags);
556 seq_puts(seq, "---------------------------------\n");
557
558 return 0;
559 }
560
561 static const struct seq_operations object_seqops = {
562 .start = start_object,
563 .next = next_object,
564 .stop = stop_object,
565 .show = show_object,
566 };
567
open_objects(struct inode * inode,struct file * file)568 static int open_objects(struct inode *inode, struct file *file)
569 {
570 return seq_open(file, &object_seqops);
571 }
572
573 static const struct file_operations objects_fops = {
574 .open = open_objects,
575 .read = seq_read,
576 .llseek = seq_lseek,
577 .release = seq_release,
578 };
579
kfence_debugfs_init(void)580 static int __init kfence_debugfs_init(void)
581 {
582 struct dentry *kfence_dir = debugfs_create_dir("kfence", NULL);
583
584 debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops);
585 debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops);
586 return 0;
587 }
588
589 late_initcall(kfence_debugfs_init);
590
591 /* === Allocation Gate Timer ================================================ */
592
593 #ifdef CONFIG_KFENCE_STATIC_KEYS
594 /* Wait queue to wake up allocation-gate timer task. */
595 static DECLARE_WAIT_QUEUE_HEAD(allocation_wait);
596
wake_up_kfence_timer(struct irq_work * work)597 static void wake_up_kfence_timer(struct irq_work *work)
598 {
599 wake_up(&allocation_wait);
600 }
601 static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer);
602 #endif
603
604 /*
605 * Set up delayed work, which will enable and disable the static key. We need to
606 * use a work queue (rather than a simple timer), since enabling and disabling a
607 * static key cannot be done from an interrupt.
608 *
609 * Note: Toggling a static branch currently causes IPIs, and here we'll end up
610 * with a total of 2 IPIs to all CPUs. If this ends up a problem in future (with
611 * more aggressive sampling intervals), we could get away with a variant that
612 * avoids IPIs, at the cost of not immediately capturing allocations if the
613 * instructions remain cached.
614 */
615 static struct delayed_work kfence_timer;
toggle_allocation_gate(struct work_struct * work)616 static void toggle_allocation_gate(struct work_struct *work)
617 {
618 if (!READ_ONCE(kfence_enabled))
619 return;
620
621 atomic_set(&kfence_allocation_gate, 0);
622 #ifdef CONFIG_KFENCE_STATIC_KEYS
623 /* Enable static key, and await allocation to happen. */
624 static_branch_enable(&kfence_allocation_key);
625
626 if (sysctl_hung_task_timeout_secs) {
627 /*
628 * During low activity with no allocations we might wait a
629 * while; let's avoid the hung task warning.
630 */
631 wait_event_idle_timeout(allocation_wait, atomic_read(&kfence_allocation_gate),
632 sysctl_hung_task_timeout_secs * HZ / 2);
633 } else {
634 wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate));
635 }
636
637 /* Disable static key and reset timer. */
638 static_branch_disable(&kfence_allocation_key);
639 #endif
640 queue_delayed_work(system_unbound_wq, &kfence_timer,
641 msecs_to_jiffies(kfence_sample_interval));
642 }
643 static DECLARE_DELAYED_WORK(kfence_timer, toggle_allocation_gate);
644
645 /* === Public interface ===================================================== */
646
kfence_alloc_pool(void)647 void __init kfence_alloc_pool(void)
648 {
649 if (!kfence_sample_interval)
650 return;
651
652 __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
653
654 if (!__kfence_pool)
655 pr_err("failed to allocate pool\n");
656 }
657
kfence_init(void)658 void __init kfence_init(void)
659 {
660 /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */
661 if (!kfence_sample_interval)
662 return;
663
664 if (!kfence_init_pool()) {
665 pr_err("%s failed\n", __func__);
666 return;
667 }
668
669 WRITE_ONCE(kfence_enabled, true);
670 queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
671 pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
672 CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
673 (void *)(__kfence_pool + KFENCE_POOL_SIZE));
674 }
675
kfence_shutdown_cache(struct kmem_cache * s)676 void kfence_shutdown_cache(struct kmem_cache *s)
677 {
678 unsigned long flags;
679 struct kfence_metadata *meta;
680 int i;
681
682 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
683 bool in_use;
684
685 meta = &kfence_metadata[i];
686
687 /*
688 * If we observe some inconsistent cache and state pair where we
689 * should have returned false here, cache destruction is racing
690 * with either kmem_cache_alloc() or kmem_cache_free(). Taking
691 * the lock will not help, as different critical section
692 * serialization will have the same outcome.
693 */
694 if (READ_ONCE(meta->cache) != s ||
695 READ_ONCE(meta->state) != KFENCE_OBJECT_ALLOCATED)
696 continue;
697
698 raw_spin_lock_irqsave(&meta->lock, flags);
699 in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED;
700 raw_spin_unlock_irqrestore(&meta->lock, flags);
701
702 if (in_use) {
703 /*
704 * This cache still has allocations, and we should not
705 * release them back into the freelist so they can still
706 * safely be used and retain the kernel's default
707 * behaviour of keeping the allocations alive (leak the
708 * cache); however, they effectively become "zombie
709 * allocations" as the KFENCE objects are the only ones
710 * still in use and the owning cache is being destroyed.
711 *
712 * We mark them freed, so that any subsequent use shows
713 * more useful error messages that will include stack
714 * traces of the user of the object, the original
715 * allocation, and caller to shutdown_cache().
716 */
717 kfence_guarded_free((void *)meta->addr, meta, /*zombie=*/true);
718 }
719 }
720
721 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
722 meta = &kfence_metadata[i];
723
724 /* See above. */
725 if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED)
726 continue;
727
728 raw_spin_lock_irqsave(&meta->lock, flags);
729 if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED)
730 meta->cache = NULL;
731 raw_spin_unlock_irqrestore(&meta->lock, flags);
732 }
733 }
734
__kfence_alloc(struct kmem_cache * s,size_t size,gfp_t flags)735 void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
736 {
737 /*
738 * Perform size check before switching kfence_allocation_gate, so that
739 * we don't disable KFENCE without making an allocation.
740 */
741 if (size > PAGE_SIZE)
742 return NULL;
743
744 /*
745 * Skip allocations from non-default zones, including DMA. We cannot
746 * guarantee that pages in the KFENCE pool will have the requested
747 * properties (e.g. reside in DMAable memory).
748 */
749 if ((flags & GFP_ZONEMASK) ||
750 (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32)))
751 return NULL;
752
753 /*
754 * allocation_gate only needs to become non-zero, so it doesn't make
755 * sense to continue writing to it and pay the associated contention
756 * cost, in case we have a large number of concurrent allocations.
757 */
758 if (atomic_read(&kfence_allocation_gate) || atomic_inc_return(&kfence_allocation_gate) > 1)
759 return NULL;
760 #ifdef CONFIG_KFENCE_STATIC_KEYS
761 /*
762 * waitqueue_active() is fully ordered after the update of
763 * kfence_allocation_gate per atomic_inc_return().
764 */
765 if (waitqueue_active(&allocation_wait)) {
766 /*
767 * Calling wake_up() here may deadlock when allocations happen
768 * from within timer code. Use an irq_work to defer it.
769 */
770 irq_work_queue(&wake_up_kfence_timer_work);
771 }
772 #endif
773
774 if (!READ_ONCE(kfence_enabled))
775 return NULL;
776
777 return kfence_guarded_alloc(s, size, flags);
778 }
779
kfence_ksize(const void * addr)780 size_t kfence_ksize(const void *addr)
781 {
782 const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
783
784 /*
785 * Read locklessly -- if there is a race with __kfence_alloc(), this is
786 * either a use-after-free or invalid access.
787 */
788 return meta ? meta->size : 0;
789 }
790
kfence_object_start(const void * addr)791 void *kfence_object_start(const void *addr)
792 {
793 const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
794
795 /*
796 * Read locklessly -- if there is a race with __kfence_alloc(), this is
797 * either a use-after-free or invalid access.
798 */
799 return meta ? (void *)meta->addr : NULL;
800 }
801
__kfence_free(void * addr)802 void __kfence_free(void *addr)
803 {
804 struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
805
806 /*
807 * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing
808 * the object, as the object page may be recycled for other-typed
809 * objects once it has been freed. meta->cache may be NULL if the cache
810 * was destroyed.
811 */
812 if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU)))
813 call_rcu(&meta->rcu_head, rcu_guarded_free);
814 else
815 kfence_guarded_free(addr, meta, false);
816 }
817
kfence_handle_page_fault(unsigned long addr,bool is_write,struct pt_regs * regs)818 bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs)
819 {
820 const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE;
821 struct kfence_metadata *to_report = NULL;
822 enum kfence_error_type error_type;
823 unsigned long flags;
824
825 if (!is_kfence_address((void *)addr))
826 return false;
827
828 if (!READ_ONCE(kfence_enabled)) /* If disabled at runtime ... */
829 return kfence_unprotect(addr); /* ... unprotect and proceed. */
830
831 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
832
833 if (page_index % 2) {
834 /* This is a redzone, report a buffer overflow. */
835 struct kfence_metadata *meta;
836 int distance = 0;
837
838 meta = addr_to_metadata(addr - PAGE_SIZE);
839 if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
840 to_report = meta;
841 /* Data race ok; distance calculation approximate. */
842 distance = addr - data_race(meta->addr + meta->size);
843 }
844
845 meta = addr_to_metadata(addr + PAGE_SIZE);
846 if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
847 /* Data race ok; distance calculation approximate. */
848 if (!to_report || distance > data_race(meta->addr) - addr)
849 to_report = meta;
850 }
851
852 if (!to_report)
853 goto out;
854
855 raw_spin_lock_irqsave(&to_report->lock, flags);
856 to_report->unprotected_page = addr;
857 error_type = KFENCE_ERROR_OOB;
858
859 /*
860 * If the object was freed before we took the look we can still
861 * report this as an OOB -- the report will simply show the
862 * stacktrace of the free as well.
863 */
864 } else {
865 to_report = addr_to_metadata(addr);
866 if (!to_report)
867 goto out;
868
869 raw_spin_lock_irqsave(&to_report->lock, flags);
870 error_type = KFENCE_ERROR_UAF;
871 /*
872 * We may race with __kfence_alloc(), and it is possible that a
873 * freed object may be reallocated. We simply report this as a
874 * use-after-free, with the stack trace showing the place where
875 * the object was re-allocated.
876 */
877 }
878
879 out:
880 if (to_report) {
881 kfence_report_error(addr, is_write, regs, to_report, error_type);
882 raw_spin_unlock_irqrestore(&to_report->lock, flags);
883 } else {
884 /* This may be a UAF or OOB access, but we can't be sure. */
885 kfence_report_error(addr, is_write, regs, NULL, KFENCE_ERROR_INVALID);
886 }
887
888 return kfence_unprotect(addr); /* Unprotect and let access proceed. */
889 }
890