1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * This file contains common KASAN code.
4 *
5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 *
8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9 * Andrey Konovalov <andreyknvl@gmail.com>
10 */
11
12 #include <linux/export.h>
13 #include <linux/init.h>
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
16 #include <linux/linkage.h>
17 #include <linux/memblock.h>
18 #include <linux/memory.h>
19 #include <linux/mm.h>
20 #include <linux/module.h>
21 #include <linux/printk.h>
22 #include <linux/sched.h>
23 #include <linux/sched/task_stack.h>
24 #include <linux/slab.h>
25 #include <linux/stacktrace.h>
26 #include <linux/string.h>
27 #include <linux/types.h>
28 #include <linux/bug.h>
29
30 #include "kasan.h"
31 #include "../slab.h"
32
kasan_save_stack(gfp_t flags)33 depot_stack_handle_t kasan_save_stack(gfp_t flags)
34 {
35 unsigned long entries[KASAN_STACK_DEPTH];
36 unsigned int nr_entries;
37
38 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
39 nr_entries = filter_irq_stacks(entries, nr_entries);
40 return stack_depot_save(entries, nr_entries, flags);
41 }
42
kasan_set_track(struct kasan_track * track,gfp_t flags)43 void kasan_set_track(struct kasan_track *track, gfp_t flags)
44 {
45 track->pid = current->pid;
46 track->stack = kasan_save_stack(flags);
47 }
48
49 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
kasan_enable_current(void)50 void kasan_enable_current(void)
51 {
52 current->kasan_depth++;
53 }
54
kasan_disable_current(void)55 void kasan_disable_current(void)
56 {
57 current->kasan_depth--;
58 }
59 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
60
__kasan_unpoison_range(const void * address,size_t size)61 void __kasan_unpoison_range(const void *address, size_t size)
62 {
63 kasan_unpoison(address, size, false);
64 }
65
66 #ifdef CONFIG_KASAN_STACK
67 /* Unpoison the entire stack for a task. */
kasan_unpoison_task_stack(struct task_struct * task)68 void kasan_unpoison_task_stack(struct task_struct *task)
69 {
70 void *base = task_stack_page(task);
71
72 kasan_unpoison(base, THREAD_SIZE, false);
73 }
74
75 /* Unpoison the stack for the current task beyond a watermark sp value. */
kasan_unpoison_task_stack_below(const void * watermark)76 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
77 {
78 /*
79 * Calculate the task stack base address. Avoid using 'current'
80 * because this function is called by early resume code which hasn't
81 * yet set up the percpu register (%gs).
82 */
83 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
84
85 kasan_unpoison(base, watermark - base, false);
86 }
87 #endif /* CONFIG_KASAN_STACK */
88
89 /*
90 * Only allow cache merging when stack collection is disabled and no metadata
91 * is present.
92 */
__kasan_never_merge(void)93 slab_flags_t __kasan_never_merge(void)
94 {
95 if (kasan_stack_collection_enabled())
96 return SLAB_KASAN;
97 return 0;
98 }
99
__kasan_unpoison_pages(struct page * page,unsigned int order,bool init)100 void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
101 {
102 u8 tag;
103 unsigned long i;
104
105 if (unlikely(PageHighMem(page)))
106 return;
107
108 tag = kasan_random_tag();
109 for (i = 0; i < (1 << order); i++)
110 page_kasan_tag_set(page + i, tag);
111 kasan_unpoison(page_address(page), PAGE_SIZE << order, init);
112 }
113
__kasan_poison_pages(struct page * page,unsigned int order,bool init)114 void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
115 {
116 if (likely(!PageHighMem(page)))
117 kasan_poison(page_address(page), PAGE_SIZE << order,
118 KASAN_FREE_PAGE, init);
119 }
120
121 /*
122 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
123 * For larger allocations larger redzones are used.
124 */
optimal_redzone(unsigned int object_size)125 static inline unsigned int optimal_redzone(unsigned int object_size)
126 {
127 return
128 object_size <= 64 - 16 ? 16 :
129 object_size <= 128 - 32 ? 32 :
130 object_size <= 512 - 64 ? 64 :
131 object_size <= 4096 - 128 ? 128 :
132 object_size <= (1 << 14) - 256 ? 256 :
133 object_size <= (1 << 15) - 512 ? 512 :
134 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
135 }
136
__kasan_cache_create(struct kmem_cache * cache,unsigned int * size,slab_flags_t * flags)137 void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
138 slab_flags_t *flags)
139 {
140 unsigned int ok_size;
141 unsigned int optimal_size;
142
143 /*
144 * SLAB_KASAN is used to mark caches as ones that are sanitized by
145 * KASAN. Currently this flag is used in two places:
146 * 1. In slab_ksize() when calculating the size of the accessible
147 * memory within the object.
148 * 2. In slab_common.c to prevent merging of sanitized caches.
149 */
150 *flags |= SLAB_KASAN;
151
152 if (!kasan_stack_collection_enabled())
153 return;
154
155 ok_size = *size;
156
157 /* Add alloc meta into redzone. */
158 cache->kasan_info.alloc_meta_offset = *size;
159 *size += sizeof(struct kasan_alloc_meta);
160
161 /*
162 * If alloc meta doesn't fit, don't add it.
163 * This can only happen with SLAB, as it has KMALLOC_MAX_SIZE equal
164 * to KMALLOC_MAX_CACHE_SIZE and doesn't fall back to page_alloc for
165 * larger sizes.
166 */
167 if (*size > KMALLOC_MAX_SIZE) {
168 cache->kasan_info.alloc_meta_offset = 0;
169 *size = ok_size;
170 /* Continue, since free meta might still fit. */
171 }
172
173 /* Only the generic mode uses free meta or flexible redzones. */
174 if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
175 cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
176 return;
177 }
178
179 /*
180 * Add free meta into redzone when it's not possible to store
181 * it in the object. This is the case when:
182 * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can
183 * be touched after it was freed, or
184 * 2. Object has a constructor, which means it's expected to
185 * retain its content until the next allocation, or
186 * 3. Object is too small.
187 * Otherwise cache->kasan_info.free_meta_offset = 0 is implied.
188 */
189 if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor ||
190 cache->object_size < sizeof(struct kasan_free_meta)) {
191 ok_size = *size;
192
193 cache->kasan_info.free_meta_offset = *size;
194 *size += sizeof(struct kasan_free_meta);
195
196 /* If free meta doesn't fit, don't add it. */
197 if (*size > KMALLOC_MAX_SIZE) {
198 cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
199 *size = ok_size;
200 }
201 }
202
203 /* Calculate size with optimal redzone. */
204 optimal_size = cache->object_size + optimal_redzone(cache->object_size);
205 /* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */
206 if (optimal_size > KMALLOC_MAX_SIZE)
207 optimal_size = KMALLOC_MAX_SIZE;
208 /* Use optimal size if the size with added metas is not large enough. */
209 if (*size < optimal_size)
210 *size = optimal_size;
211 }
212
__kasan_cache_create_kmalloc(struct kmem_cache * cache)213 void __kasan_cache_create_kmalloc(struct kmem_cache *cache)
214 {
215 cache->kasan_info.is_kmalloc = true;
216 }
217
__kasan_metadata_size(struct kmem_cache * cache)218 size_t __kasan_metadata_size(struct kmem_cache *cache)
219 {
220 if (!kasan_stack_collection_enabled())
221 return 0;
222 return (cache->kasan_info.alloc_meta_offset ?
223 sizeof(struct kasan_alloc_meta) : 0) +
224 (cache->kasan_info.free_meta_offset ?
225 sizeof(struct kasan_free_meta) : 0);
226 }
227
kasan_get_alloc_meta(struct kmem_cache * cache,const void * object)228 struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
229 const void *object)
230 {
231 if (!cache->kasan_info.alloc_meta_offset)
232 return NULL;
233 return kasan_reset_tag(object) + cache->kasan_info.alloc_meta_offset;
234 }
235
236 #ifdef CONFIG_KASAN_GENERIC
kasan_get_free_meta(struct kmem_cache * cache,const void * object)237 struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
238 const void *object)
239 {
240 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
241 if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META)
242 return NULL;
243 return kasan_reset_tag(object) + cache->kasan_info.free_meta_offset;
244 }
245 #endif
246
__kasan_poison_slab(struct page * page)247 void __kasan_poison_slab(struct page *page)
248 {
249 unsigned long i;
250
251 for (i = 0; i < compound_nr(page); i++)
252 page_kasan_tag_reset(page + i);
253 kasan_poison(page_address(page), page_size(page),
254 KASAN_KMALLOC_REDZONE, false);
255 }
256
__kasan_unpoison_object_data(struct kmem_cache * cache,void * object)257 void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
258 {
259 kasan_unpoison(object, cache->object_size, false);
260 }
261
__kasan_poison_object_data(struct kmem_cache * cache,void * object)262 void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
263 {
264 kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
265 KASAN_KMALLOC_REDZONE, false);
266 }
267
268 /*
269 * This function assigns a tag to an object considering the following:
270 * 1. A cache might have a constructor, which might save a pointer to a slab
271 * object somewhere (e.g. in the object itself). We preassign a tag for
272 * each object in caches with constructors during slab creation and reuse
273 * the same tag each time a particular object is allocated.
274 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
275 * accessed after being freed. We preassign tags for objects in these
276 * caches as well.
277 * 3. For SLAB allocator we can't preassign tags randomly since the freelist
278 * is stored as an array of indexes instead of a linked list. Assign tags
279 * based on objects indexes, so that objects that are next to each other
280 * get different tags.
281 */
assign_tag(struct kmem_cache * cache,const void * object,bool init)282 static inline u8 assign_tag(struct kmem_cache *cache,
283 const void *object, bool init)
284 {
285 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
286 return 0xff;
287
288 /*
289 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
290 * set, assign a tag when the object is being allocated (init == false).
291 */
292 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
293 return init ? KASAN_TAG_KERNEL : kasan_random_tag();
294
295 /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
296 #ifdef CONFIG_SLAB
297 /* For SLAB assign tags based on the object index in the freelist. */
298 return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
299 #else
300 /*
301 * For SLUB assign a random tag during slab creation, otherwise reuse
302 * the already assigned tag.
303 */
304 return init ? kasan_random_tag() : get_tag(object);
305 #endif
306 }
307
__kasan_init_slab_obj(struct kmem_cache * cache,const void * object)308 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
309 const void *object)
310 {
311 struct kasan_alloc_meta *alloc_meta;
312
313 if (kasan_stack_collection_enabled()) {
314 alloc_meta = kasan_get_alloc_meta(cache, object);
315 if (alloc_meta)
316 __memset(alloc_meta, 0, sizeof(*alloc_meta));
317 }
318
319 /* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */
320 object = set_tag(object, assign_tag(cache, object, true));
321
322 return (void *)object;
323 }
324
____kasan_slab_free(struct kmem_cache * cache,void * object,unsigned long ip,bool quarantine,bool init)325 static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
326 unsigned long ip, bool quarantine, bool init)
327 {
328 u8 tag;
329 void *tagged_object;
330
331 tag = get_tag(object);
332 tagged_object = object;
333 object = kasan_reset_tag(object);
334
335 if (is_kfence_address(object))
336 return false;
337
338 if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
339 object)) {
340 kasan_report_invalid_free(tagged_object, ip);
341 return true;
342 }
343
344 /* RCU slabs could be legally used after free within the RCU period */
345 if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
346 return false;
347
348 if (!kasan_byte_accessible(tagged_object)) {
349 kasan_report_invalid_free(tagged_object, ip);
350 return true;
351 }
352
353 kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
354 KASAN_KMALLOC_FREE, init);
355
356 if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine))
357 return false;
358
359 if (kasan_stack_collection_enabled())
360 kasan_set_free_info(cache, object, tag);
361
362 return kasan_quarantine_put(cache, object);
363 }
364
__kasan_slab_free(struct kmem_cache * cache,void * object,unsigned long ip,bool init)365 bool __kasan_slab_free(struct kmem_cache *cache, void *object,
366 unsigned long ip, bool init)
367 {
368 return ____kasan_slab_free(cache, object, ip, true, init);
369 }
370
____kasan_kfree_large(void * ptr,unsigned long ip)371 static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
372 {
373 if (ptr != page_address(virt_to_head_page(ptr))) {
374 kasan_report_invalid_free(ptr, ip);
375 return true;
376 }
377
378 if (!kasan_byte_accessible(ptr)) {
379 kasan_report_invalid_free(ptr, ip);
380 return true;
381 }
382
383 /*
384 * The object will be poisoned by kasan_free_pages() or
385 * kasan_slab_free_mempool().
386 */
387
388 return false;
389 }
390
__kasan_kfree_large(void * ptr,unsigned long ip)391 void __kasan_kfree_large(void *ptr, unsigned long ip)
392 {
393 ____kasan_kfree_large(ptr, ip);
394 }
395
__kasan_slab_free_mempool(void * ptr,unsigned long ip)396 void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
397 {
398 struct page *page;
399
400 page = virt_to_head_page(ptr);
401
402 /*
403 * Even though this function is only called for kmem_cache_alloc and
404 * kmalloc backed mempool allocations, those allocations can still be
405 * !PageSlab() when the size provided to kmalloc is larger than
406 * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
407 */
408 if (unlikely(!PageSlab(page))) {
409 if (____kasan_kfree_large(ptr, ip))
410 return;
411 kasan_poison(ptr, page_size(page), KASAN_FREE_PAGE, false);
412 } else {
413 ____kasan_slab_free(page->slab_cache, ptr, ip, false, false);
414 }
415 }
416
set_alloc_info(struct kmem_cache * cache,void * object,gfp_t flags,bool is_kmalloc)417 static void set_alloc_info(struct kmem_cache *cache, void *object,
418 gfp_t flags, bool is_kmalloc)
419 {
420 struct kasan_alloc_meta *alloc_meta;
421
422 /* Don't save alloc info for kmalloc caches in kasan_slab_alloc(). */
423 if (cache->kasan_info.is_kmalloc && !is_kmalloc)
424 return;
425
426 alloc_meta = kasan_get_alloc_meta(cache, object);
427 if (alloc_meta)
428 kasan_set_track(&alloc_meta->alloc_track, flags);
429 }
430
__kasan_slab_alloc(struct kmem_cache * cache,void * object,gfp_t flags,bool init)431 void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
432 void *object, gfp_t flags, bool init)
433 {
434 u8 tag;
435 void *tagged_object;
436
437 if (gfpflags_allow_blocking(flags))
438 kasan_quarantine_reduce();
439
440 if (unlikely(object == NULL))
441 return NULL;
442
443 if (is_kfence_address(object))
444 return (void *)object;
445
446 /*
447 * Generate and assign random tag for tag-based modes.
448 * Tag is ignored in set_tag() for the generic mode.
449 */
450 tag = assign_tag(cache, object, false);
451 tagged_object = set_tag(object, tag);
452
453 /*
454 * Unpoison the whole object.
455 * For kmalloc() allocations, kasan_kmalloc() will do precise poisoning.
456 */
457 kasan_unpoison(tagged_object, cache->object_size, init);
458
459 /* Save alloc info (if possible) for non-kmalloc() allocations. */
460 if (kasan_stack_collection_enabled())
461 set_alloc_info(cache, (void *)object, flags, false);
462
463 return tagged_object;
464 }
465
____kasan_kmalloc(struct kmem_cache * cache,const void * object,size_t size,gfp_t flags)466 static inline void *____kasan_kmalloc(struct kmem_cache *cache,
467 const void *object, size_t size, gfp_t flags)
468 {
469 unsigned long redzone_start;
470 unsigned long redzone_end;
471
472 if (gfpflags_allow_blocking(flags))
473 kasan_quarantine_reduce();
474
475 if (unlikely(object == NULL))
476 return NULL;
477
478 if (is_kfence_address(kasan_reset_tag(object)))
479 return (void *)object;
480
481 /*
482 * The object has already been unpoisoned by kasan_slab_alloc() for
483 * kmalloc() or by kasan_krealloc() for krealloc().
484 */
485
486 /*
487 * The redzone has byte-level precision for the generic mode.
488 * Partially poison the last object granule to cover the unaligned
489 * part of the redzone.
490 */
491 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
492 kasan_poison_last_granule((void *)object, size);
493
494 /* Poison the aligned part of the redzone. */
495 redzone_start = round_up((unsigned long)(object + size),
496 KASAN_GRANULE_SIZE);
497 redzone_end = round_up((unsigned long)(object + cache->object_size),
498 KASAN_GRANULE_SIZE);
499 kasan_poison((void *)redzone_start, redzone_end - redzone_start,
500 KASAN_KMALLOC_REDZONE, false);
501
502 /*
503 * Save alloc info (if possible) for kmalloc() allocations.
504 * This also rewrites the alloc info when called from kasan_krealloc().
505 */
506 if (kasan_stack_collection_enabled())
507 set_alloc_info(cache, (void *)object, flags, true);
508
509 /* Keep the tag that was set by kasan_slab_alloc(). */
510 return (void *)object;
511 }
512
__kasan_kmalloc(struct kmem_cache * cache,const void * object,size_t size,gfp_t flags)513 void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
514 size_t size, gfp_t flags)
515 {
516 return ____kasan_kmalloc(cache, object, size, flags);
517 }
518 EXPORT_SYMBOL(__kasan_kmalloc);
519
__kasan_kmalloc_large(const void * ptr,size_t size,gfp_t flags)520 void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
521 gfp_t flags)
522 {
523 unsigned long redzone_start;
524 unsigned long redzone_end;
525
526 if (gfpflags_allow_blocking(flags))
527 kasan_quarantine_reduce();
528
529 if (unlikely(ptr == NULL))
530 return NULL;
531
532 /*
533 * The object has already been unpoisoned by kasan_alloc_pages() for
534 * alloc_pages() or by kasan_krealloc() for krealloc().
535 */
536
537 /*
538 * The redzone has byte-level precision for the generic mode.
539 * Partially poison the last object granule to cover the unaligned
540 * part of the redzone.
541 */
542 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
543 kasan_poison_last_granule(ptr, size);
544
545 /* Poison the aligned part of the redzone. */
546 redzone_start = round_up((unsigned long)(ptr + size),
547 KASAN_GRANULE_SIZE);
548 redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr));
549 kasan_poison((void *)redzone_start, redzone_end - redzone_start,
550 KASAN_PAGE_REDZONE, false);
551
552 return (void *)ptr;
553 }
554
__kasan_krealloc(const void * object,size_t size,gfp_t flags)555 void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
556 {
557 struct page *page;
558
559 if (unlikely(object == ZERO_SIZE_PTR))
560 return (void *)object;
561
562 /*
563 * Unpoison the object's data.
564 * Part of it might already have been unpoisoned, but it's unknown
565 * how big that part is.
566 */
567 kasan_unpoison(object, size, false);
568
569 page = virt_to_head_page(object);
570
571 /* Piggy-back on kmalloc() instrumentation to poison the redzone. */
572 if (unlikely(!PageSlab(page)))
573 return __kasan_kmalloc_large(object, size, flags);
574 else
575 return ____kasan_kmalloc(page->slab_cache, object, size, flags);
576 }
577
__kasan_check_byte(const void * address,unsigned long ip)578 bool __kasan_check_byte(const void *address, unsigned long ip)
579 {
580 if (!kasan_byte_accessible(address)) {
581 kasan_report((unsigned long)address, 1, false, ip);
582 return false;
583 }
584 return true;
585 }
586