1 /*
2 * This file contains shadow memory manipulation code.
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
6 *
7 * Some code borrowed from https://github.com/xairy/kasan-prototype by
8 * Andrey Konovalov <adech.fo@gmail.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #define DISABLE_BRANCH_PROFILING
18
19 #include <linux/export.h>
20 #include <linux/interrupt.h>
21 #include <linux/init.h>
22 #include <linux/kasan.h>
23 #include <linux/kernel.h>
24 #include <linux/kmemleak.h>
25 #include <linux/linkage.h>
26 #include <linux/memblock.h>
27 #include <linux/memory.h>
28 #include <linux/mm.h>
29 #include <linux/module.h>
30 #include <linux/printk.h>
31 #include <linux/sched.h>
32 #include <linux/slab.h>
33 #include <linux/stacktrace.h>
34 #include <linux/string.h>
35 #include <linux/types.h>
36 #include <linux/vmalloc.h>
37 #include <linux/bug.h>
38
39 #include "kasan.h"
40 #include "../slab.h"
41
kasan_enable_current(void)42 void kasan_enable_current(void)
43 {
44 current->kasan_depth++;
45 }
46
kasan_disable_current(void)47 void kasan_disable_current(void)
48 {
49 current->kasan_depth--;
50 }
51
52 /*
53 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
54 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
55 */
kasan_poison_shadow(const void * address,size_t size,u8 value)56 static void kasan_poison_shadow(const void *address, size_t size, u8 value)
57 {
58 void *shadow_start, *shadow_end;
59
60 shadow_start = kasan_mem_to_shadow(address);
61 shadow_end = kasan_mem_to_shadow(address + size);
62
63 memset(shadow_start, value, shadow_end - shadow_start);
64 }
65
kasan_unpoison_shadow(const void * address,size_t size)66 void kasan_unpoison_shadow(const void *address, size_t size)
67 {
68 kasan_poison_shadow(address, size, 0);
69
70 if (size & KASAN_SHADOW_MASK) {
71 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
72 *shadow = size & KASAN_SHADOW_MASK;
73 }
74 }
75
__kasan_unpoison_stack(struct task_struct * task,const void * sp)76 static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
77 {
78 void *base = task_stack_page(task);
79 size_t size = sp - base;
80
81 kasan_unpoison_shadow(base, size);
82 }
83
84 /* Unpoison the entire stack for a task. */
kasan_unpoison_task_stack(struct task_struct * task)85 void kasan_unpoison_task_stack(struct task_struct *task)
86 {
87 __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
88 }
89
90 /* Unpoison the stack for the current task beyond a watermark sp value. */
kasan_unpoison_task_stack_below(const void * watermark)91 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
92 {
93 /*
94 * Calculate the task stack base address. Avoid using 'current'
95 * because this function is called by early resume code which hasn't
96 * yet set up the percpu register (%gs).
97 */
98 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
99
100 kasan_unpoison_shadow(base, watermark - base);
101 }
102
103 /*
104 * Clear all poison for the region between the current SP and a provided
105 * watermark value, as is sometimes required prior to hand-crafted asm function
106 * returns in the middle of functions.
107 */
kasan_unpoison_stack_above_sp_to(const void * watermark)108 void kasan_unpoison_stack_above_sp_to(const void *watermark)
109 {
110 const void *sp = __builtin_frame_address(0);
111 size_t size = watermark - sp;
112
113 if (WARN_ON(sp > watermark))
114 return;
115 kasan_unpoison_shadow(sp, size);
116 }
117
118 /*
119 * All functions below always inlined so compiler could
120 * perform better optimizations in each of __asan_loadX/__assn_storeX
121 * depending on memory access size X.
122 */
123
memory_is_poisoned_1(unsigned long addr)124 static __always_inline bool memory_is_poisoned_1(unsigned long addr)
125 {
126 s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
127
128 if (unlikely(shadow_value)) {
129 s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
130 return unlikely(last_accessible_byte >= shadow_value);
131 }
132
133 return false;
134 }
135
memory_is_poisoned_2(unsigned long addr)136 static __always_inline bool memory_is_poisoned_2(unsigned long addr)
137 {
138 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
139
140 if (unlikely(*shadow_addr)) {
141 if (memory_is_poisoned_1(addr + 1))
142 return true;
143
144 /*
145 * If single shadow byte covers 2-byte access, we don't
146 * need to do anything more. Otherwise, test the first
147 * shadow byte.
148 */
149 if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0))
150 return false;
151
152 return unlikely(*(u8 *)shadow_addr);
153 }
154
155 return false;
156 }
157
memory_is_poisoned_4(unsigned long addr)158 static __always_inline bool memory_is_poisoned_4(unsigned long addr)
159 {
160 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
161
162 if (unlikely(*shadow_addr)) {
163 if (memory_is_poisoned_1(addr + 3))
164 return true;
165
166 /*
167 * If single shadow byte covers 4-byte access, we don't
168 * need to do anything more. Otherwise, test the first
169 * shadow byte.
170 */
171 if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3))
172 return false;
173
174 return unlikely(*(u8 *)shadow_addr);
175 }
176
177 return false;
178 }
179
memory_is_poisoned_8(unsigned long addr)180 static __always_inline bool memory_is_poisoned_8(unsigned long addr)
181 {
182 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
183
184 if (unlikely(*shadow_addr)) {
185 if (memory_is_poisoned_1(addr + 7))
186 return true;
187
188 /*
189 * If single shadow byte covers 8-byte access, we don't
190 * need to do anything more. Otherwise, test the first
191 * shadow byte.
192 */
193 if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
194 return false;
195
196 return unlikely(*(u8 *)shadow_addr);
197 }
198
199 return false;
200 }
201
memory_is_poisoned_16(unsigned long addr)202 static __always_inline bool memory_is_poisoned_16(unsigned long addr)
203 {
204 u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr);
205
206 if (unlikely(*shadow_addr)) {
207 u16 shadow_first_bytes = *(u16 *)shadow_addr;
208
209 if (unlikely(shadow_first_bytes))
210 return true;
211
212 /*
213 * If two shadow bytes covers 16-byte access, we don't
214 * need to do anything more. Otherwise, test the last
215 * shadow byte.
216 */
217 if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
218 return false;
219
220 return memory_is_poisoned_1(addr + 15);
221 }
222
223 return false;
224 }
225
bytes_is_zero(const u8 * start,size_t size)226 static __always_inline unsigned long bytes_is_zero(const u8 *start,
227 size_t size)
228 {
229 while (size) {
230 if (unlikely(*start))
231 return (unsigned long)start;
232 start++;
233 size--;
234 }
235
236 return 0;
237 }
238
memory_is_zero(const void * start,const void * end)239 static __always_inline unsigned long memory_is_zero(const void *start,
240 const void *end)
241 {
242 unsigned int words;
243 unsigned long ret;
244 unsigned int prefix = (unsigned long)start % 8;
245
246 if (end - start <= 16)
247 return bytes_is_zero(start, end - start);
248
249 if (prefix) {
250 prefix = 8 - prefix;
251 ret = bytes_is_zero(start, prefix);
252 if (unlikely(ret))
253 return ret;
254 start += prefix;
255 }
256
257 words = (end - start) / 8;
258 while (words) {
259 if (unlikely(*(u64 *)start))
260 return bytes_is_zero(start, 8);
261 start += 8;
262 words--;
263 }
264
265 return bytes_is_zero(start, (end - start) % 8);
266 }
267
memory_is_poisoned_n(unsigned long addr,size_t size)268 static __always_inline bool memory_is_poisoned_n(unsigned long addr,
269 size_t size)
270 {
271 unsigned long ret;
272
273 ret = memory_is_zero(kasan_mem_to_shadow((void *)addr),
274 kasan_mem_to_shadow((void *)addr + size - 1) + 1);
275
276 if (unlikely(ret)) {
277 unsigned long last_byte = addr + size - 1;
278 s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
279
280 if (unlikely(ret != (unsigned long)last_shadow ||
281 ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
282 return true;
283 }
284 return false;
285 }
286
memory_is_poisoned(unsigned long addr,size_t size)287 static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
288 {
289 if (__builtin_constant_p(size)) {
290 switch (size) {
291 case 1:
292 return memory_is_poisoned_1(addr);
293 case 2:
294 return memory_is_poisoned_2(addr);
295 case 4:
296 return memory_is_poisoned_4(addr);
297 case 8:
298 return memory_is_poisoned_8(addr);
299 case 16:
300 return memory_is_poisoned_16(addr);
301 default:
302 BUILD_BUG();
303 }
304 }
305
306 return memory_is_poisoned_n(addr, size);
307 }
308
check_memory_region_inline(unsigned long addr,size_t size,bool write,unsigned long ret_ip)309 static __always_inline void check_memory_region_inline(unsigned long addr,
310 size_t size, bool write,
311 unsigned long ret_ip)
312 {
313 if (unlikely(size == 0))
314 return;
315
316 if (unlikely((void *)addr <
317 kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
318 kasan_report(addr, size, write, ret_ip);
319 return;
320 }
321
322 if (likely(!memory_is_poisoned(addr, size)))
323 return;
324
325 kasan_report(addr, size, write, ret_ip);
326 }
327
check_memory_region(unsigned long addr,size_t size,bool write,unsigned long ret_ip)328 static void check_memory_region(unsigned long addr,
329 size_t size, bool write,
330 unsigned long ret_ip)
331 {
332 check_memory_region_inline(addr, size, write, ret_ip);
333 }
334
kasan_check_read(const void * p,unsigned int size)335 void kasan_check_read(const void *p, unsigned int size)
336 {
337 check_memory_region((unsigned long)p, size, false, _RET_IP_);
338 }
339 EXPORT_SYMBOL(kasan_check_read);
340
kasan_check_write(const void * p,unsigned int size)341 void kasan_check_write(const void *p, unsigned int size)
342 {
343 check_memory_region((unsigned long)p, size, true, _RET_IP_);
344 }
345 EXPORT_SYMBOL(kasan_check_write);
346
347 #undef memset
memset(void * addr,int c,size_t len)348 void *memset(void *addr, int c, size_t len)
349 {
350 check_memory_region((unsigned long)addr, len, true, _RET_IP_);
351
352 return __memset(addr, c, len);
353 }
354
355 #undef memmove
memmove(void * dest,const void * src,size_t len)356 void *memmove(void *dest, const void *src, size_t len)
357 {
358 check_memory_region((unsigned long)src, len, false, _RET_IP_);
359 check_memory_region((unsigned long)dest, len, true, _RET_IP_);
360
361 return __memmove(dest, src, len);
362 }
363
364 #undef memcpy
memcpy(void * dest,const void * src,size_t len)365 void *memcpy(void *dest, const void *src, size_t len)
366 {
367 check_memory_region((unsigned long)src, len, false, _RET_IP_);
368 check_memory_region((unsigned long)dest, len, true, _RET_IP_);
369
370 return __memcpy(dest, src, len);
371 }
372
kasan_alloc_pages(struct page * page,unsigned int order)373 void kasan_alloc_pages(struct page *page, unsigned int order)
374 {
375 if (likely(!PageHighMem(page)))
376 kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
377 }
378
kasan_free_pages(struct page * page,unsigned int order)379 void kasan_free_pages(struct page *page, unsigned int order)
380 {
381 if (likely(!PageHighMem(page)))
382 kasan_poison_shadow(page_address(page),
383 PAGE_SIZE << order,
384 KASAN_FREE_PAGE);
385 }
386
387 /*
388 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
389 * For larger allocations larger redzones are used.
390 */
optimal_redzone(size_t object_size)391 static size_t optimal_redzone(size_t object_size)
392 {
393 int rz =
394 object_size <= 64 - 16 ? 16 :
395 object_size <= 128 - 32 ? 32 :
396 object_size <= 512 - 64 ? 64 :
397 object_size <= 4096 - 128 ? 128 :
398 object_size <= (1 << 14) - 256 ? 256 :
399 object_size <= (1 << 15) - 512 ? 512 :
400 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
401 return rz;
402 }
403
kasan_cache_create(struct kmem_cache * cache,size_t * size,unsigned long * flags)404 void kasan_cache_create(struct kmem_cache *cache, size_t *size,
405 unsigned long *flags)
406 {
407 int redzone_adjust;
408 int orig_size = *size;
409
410 /* Add alloc meta. */
411 cache->kasan_info.alloc_meta_offset = *size;
412 *size += sizeof(struct kasan_alloc_meta);
413
414 /* Add free meta. */
415 if (cache->flags & SLAB_DESTROY_BY_RCU || cache->ctor ||
416 cache->object_size < sizeof(struct kasan_free_meta)) {
417 cache->kasan_info.free_meta_offset = *size;
418 *size += sizeof(struct kasan_free_meta);
419 }
420 redzone_adjust = optimal_redzone(cache->object_size) -
421 (*size - cache->object_size);
422
423 if (redzone_adjust > 0)
424 *size += redzone_adjust;
425
426 *size = min(KMALLOC_MAX_SIZE, max(*size, cache->object_size +
427 optimal_redzone(cache->object_size)));
428
429 /*
430 * If the metadata doesn't fit, don't enable KASAN at all.
431 */
432 if (*size <= cache->kasan_info.alloc_meta_offset ||
433 *size <= cache->kasan_info.free_meta_offset) {
434 cache->kasan_info.alloc_meta_offset = 0;
435 cache->kasan_info.free_meta_offset = 0;
436 *size = orig_size;
437 return;
438 }
439
440 *flags |= SLAB_KASAN;
441 }
442
kasan_cache_shrink(struct kmem_cache * cache)443 void kasan_cache_shrink(struct kmem_cache *cache)
444 {
445 quarantine_remove_cache(cache);
446 }
447
kasan_cache_shutdown(struct kmem_cache * cache)448 void kasan_cache_shutdown(struct kmem_cache *cache)
449 {
450 quarantine_remove_cache(cache);
451 }
452
kasan_metadata_size(struct kmem_cache * cache)453 size_t kasan_metadata_size(struct kmem_cache *cache)
454 {
455 return (cache->kasan_info.alloc_meta_offset ?
456 sizeof(struct kasan_alloc_meta) : 0) +
457 (cache->kasan_info.free_meta_offset ?
458 sizeof(struct kasan_free_meta) : 0);
459 }
460
kasan_poison_slab(struct page * page)461 void kasan_poison_slab(struct page *page)
462 {
463 kasan_poison_shadow(page_address(page),
464 PAGE_SIZE << compound_order(page),
465 KASAN_KMALLOC_REDZONE);
466 }
467
kasan_unpoison_object_data(struct kmem_cache * cache,void * object)468 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
469 {
470 kasan_unpoison_shadow(object, cache->object_size);
471 }
472
kasan_poison_object_data(struct kmem_cache * cache,void * object)473 void kasan_poison_object_data(struct kmem_cache *cache, void *object)
474 {
475 kasan_poison_shadow(object,
476 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
477 KASAN_KMALLOC_REDZONE);
478 }
479
in_irqentry_text(unsigned long ptr)480 static inline int in_irqentry_text(unsigned long ptr)
481 {
482 return (ptr >= (unsigned long)&__irqentry_text_start &&
483 ptr < (unsigned long)&__irqentry_text_end) ||
484 (ptr >= (unsigned long)&__softirqentry_text_start &&
485 ptr < (unsigned long)&__softirqentry_text_end);
486 }
487
filter_irq_stacks(struct stack_trace * trace)488 static inline void filter_irq_stacks(struct stack_trace *trace)
489 {
490 int i;
491
492 if (!trace->nr_entries)
493 return;
494 for (i = 0; i < trace->nr_entries; i++)
495 if (in_irqentry_text(trace->entries[i])) {
496 /* Include the irqentry function into the stack. */
497 trace->nr_entries = i + 1;
498 break;
499 }
500 }
501
save_stack(gfp_t flags)502 static inline depot_stack_handle_t save_stack(gfp_t flags)
503 {
504 unsigned long entries[KASAN_STACK_DEPTH];
505 struct stack_trace trace = {
506 .nr_entries = 0,
507 .entries = entries,
508 .max_entries = KASAN_STACK_DEPTH,
509 .skip = 0
510 };
511
512 save_stack_trace(&trace);
513 filter_irq_stacks(&trace);
514 if (trace.nr_entries != 0 &&
515 trace.entries[trace.nr_entries-1] == ULONG_MAX)
516 trace.nr_entries--;
517
518 return depot_save_stack(&trace, flags);
519 }
520
set_track(struct kasan_track * track,gfp_t flags)521 static inline void set_track(struct kasan_track *track, gfp_t flags)
522 {
523 track->pid = current->pid;
524 track->stack = save_stack(flags);
525 }
526
get_alloc_info(struct kmem_cache * cache,const void * object)527 struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
528 const void *object)
529 {
530 BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
531 return (void *)object + cache->kasan_info.alloc_meta_offset;
532 }
533
get_free_info(struct kmem_cache * cache,const void * object)534 struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
535 const void *object)
536 {
537 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
538 return (void *)object + cache->kasan_info.free_meta_offset;
539 }
540
kasan_init_slab_obj(struct kmem_cache * cache,const void * object)541 void kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
542 {
543 struct kasan_alloc_meta *alloc_info;
544
545 if (!(cache->flags & SLAB_KASAN))
546 return;
547
548 alloc_info = get_alloc_info(cache, object);
549 __memset(alloc_info, 0, sizeof(*alloc_info));
550 }
551
kasan_slab_alloc(struct kmem_cache * cache,void * object,gfp_t flags)552 void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
553 {
554 kasan_kmalloc(cache, object, cache->object_size, flags);
555 }
556
kasan_poison_slab_free(struct kmem_cache * cache,void * object)557 static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
558 {
559 unsigned long size = cache->object_size;
560 unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
561
562 /* RCU slabs could be legally used after free within the RCU period */
563 if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
564 return;
565
566 kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
567 }
568
kasan_slab_free(struct kmem_cache * cache,void * object)569 bool kasan_slab_free(struct kmem_cache *cache, void *object)
570 {
571 s8 shadow_byte;
572
573 /* RCU slabs could be legally used after free within the RCU period */
574 if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
575 return false;
576
577 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
578 if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
579 kasan_report_double_free(cache, object,
580 __builtin_return_address(1));
581 return true;
582 }
583
584 kasan_poison_slab_free(cache, object);
585
586 if (unlikely(!(cache->flags & SLAB_KASAN)))
587 return false;
588
589 set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
590 quarantine_put(get_free_info(cache, object), cache);
591 return true;
592 }
593
kasan_kmalloc(struct kmem_cache * cache,const void * object,size_t size,gfp_t flags)594 void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
595 gfp_t flags)
596 {
597 unsigned long redzone_start;
598 unsigned long redzone_end;
599
600 if (gfpflags_allow_blocking(flags))
601 quarantine_reduce();
602
603 if (unlikely(object == NULL))
604 return;
605
606 redzone_start = round_up((unsigned long)(object + size),
607 KASAN_SHADOW_SCALE_SIZE);
608 redzone_end = round_up((unsigned long)object + cache->object_size,
609 KASAN_SHADOW_SCALE_SIZE);
610
611 kasan_unpoison_shadow(object, size);
612 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
613 KASAN_KMALLOC_REDZONE);
614
615 if (cache->flags & SLAB_KASAN)
616 set_track(&get_alloc_info(cache, object)->alloc_track, flags);
617 }
618 EXPORT_SYMBOL(kasan_kmalloc);
619
kasan_kmalloc_large(const void * ptr,size_t size,gfp_t flags)620 void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
621 {
622 struct page *page;
623 unsigned long redzone_start;
624 unsigned long redzone_end;
625
626 if (gfpflags_allow_blocking(flags))
627 quarantine_reduce();
628
629 if (unlikely(ptr == NULL))
630 return;
631
632 page = virt_to_page(ptr);
633 redzone_start = round_up((unsigned long)(ptr + size),
634 KASAN_SHADOW_SCALE_SIZE);
635 redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
636
637 kasan_unpoison_shadow(ptr, size);
638 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
639 KASAN_PAGE_REDZONE);
640 }
641
kasan_krealloc(const void * object,size_t size,gfp_t flags)642 void kasan_krealloc(const void *object, size_t size, gfp_t flags)
643 {
644 struct page *page;
645
646 if (unlikely(object == ZERO_SIZE_PTR))
647 return;
648
649 page = virt_to_head_page(object);
650
651 if (unlikely(!PageSlab(page)))
652 kasan_kmalloc_large(object, size, flags);
653 else
654 kasan_kmalloc(page->slab_cache, object, size, flags);
655 }
656
kasan_poison_kfree(void * ptr)657 void kasan_poison_kfree(void *ptr)
658 {
659 struct page *page;
660
661 page = virt_to_head_page(ptr);
662
663 if (unlikely(!PageSlab(page)))
664 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
665 KASAN_FREE_PAGE);
666 else
667 kasan_poison_slab_free(page->slab_cache, ptr);
668 }
669
kasan_kfree_large(const void * ptr)670 void kasan_kfree_large(const void *ptr)
671 {
672 struct page *page = virt_to_page(ptr);
673
674 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
675 KASAN_FREE_PAGE);
676 }
677
kasan_module_alloc(void * addr,size_t size)678 int kasan_module_alloc(void *addr, size_t size)
679 {
680 void *ret;
681 size_t scaled_size;
682 size_t shadow_size;
683 unsigned long shadow_start;
684
685 shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
686 scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
687 shadow_size = round_up(scaled_size, PAGE_SIZE);
688
689 if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
690 return -EINVAL;
691
692 ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
693 shadow_start + shadow_size,
694 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
695 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
696 __builtin_return_address(0));
697
698 if (ret) {
699 find_vm_area(addr)->flags |= VM_KASAN;
700 kmemleak_ignore(ret);
701 return 0;
702 }
703
704 return -ENOMEM;
705 }
706
kasan_free_shadow(const struct vm_struct * vm)707 void kasan_free_shadow(const struct vm_struct *vm)
708 {
709 if (vm->flags & VM_KASAN)
710 vfree(kasan_mem_to_shadow(vm->addr));
711 }
712
register_global(struct kasan_global * global)713 static void register_global(struct kasan_global *global)
714 {
715 size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
716
717 kasan_unpoison_shadow(global->beg, global->size);
718
719 kasan_poison_shadow(global->beg + aligned_size,
720 global->size_with_redzone - aligned_size,
721 KASAN_GLOBAL_REDZONE);
722 }
723
__asan_register_globals(struct kasan_global * globals,size_t size)724 void __asan_register_globals(struct kasan_global *globals, size_t size)
725 {
726 int i;
727
728 for (i = 0; i < size; i++)
729 register_global(&globals[i]);
730 }
731 EXPORT_SYMBOL(__asan_register_globals);
732
__asan_unregister_globals(struct kasan_global * globals,size_t size)733 void __asan_unregister_globals(struct kasan_global *globals, size_t size)
734 {
735 }
736 EXPORT_SYMBOL(__asan_unregister_globals);
737
738 #define DEFINE_ASAN_LOAD_STORE(size) \
739 void __asan_load##size(unsigned long addr) \
740 { \
741 check_memory_region_inline(addr, size, false, _RET_IP_);\
742 } \
743 EXPORT_SYMBOL(__asan_load##size); \
744 __alias(__asan_load##size) \
745 void __asan_load##size##_noabort(unsigned long); \
746 EXPORT_SYMBOL(__asan_load##size##_noabort); \
747 void __asan_store##size(unsigned long addr) \
748 { \
749 check_memory_region_inline(addr, size, true, _RET_IP_); \
750 } \
751 EXPORT_SYMBOL(__asan_store##size); \
752 __alias(__asan_store##size) \
753 void __asan_store##size##_noabort(unsigned long); \
754 EXPORT_SYMBOL(__asan_store##size##_noabort)
755
756 DEFINE_ASAN_LOAD_STORE(1);
757 DEFINE_ASAN_LOAD_STORE(2);
758 DEFINE_ASAN_LOAD_STORE(4);
759 DEFINE_ASAN_LOAD_STORE(8);
760 DEFINE_ASAN_LOAD_STORE(16);
761
__asan_loadN(unsigned long addr,size_t size)762 void __asan_loadN(unsigned long addr, size_t size)
763 {
764 check_memory_region(addr, size, false, _RET_IP_);
765 }
766 EXPORT_SYMBOL(__asan_loadN);
767
768 __alias(__asan_loadN)
769 void __asan_loadN_noabort(unsigned long, size_t);
770 EXPORT_SYMBOL(__asan_loadN_noabort);
771
__asan_storeN(unsigned long addr,size_t size)772 void __asan_storeN(unsigned long addr, size_t size)
773 {
774 check_memory_region(addr, size, true, _RET_IP_);
775 }
776 EXPORT_SYMBOL(__asan_storeN);
777
778 __alias(__asan_storeN)
779 void __asan_storeN_noabort(unsigned long, size_t);
780 EXPORT_SYMBOL(__asan_storeN_noabort);
781
782 /* to shut up compiler complaints */
__asan_handle_no_return(void)783 void __asan_handle_no_return(void) {}
784 EXPORT_SYMBOL(__asan_handle_no_return);
785
786 /* Emitted by compiler to poison large objects when they go out of scope. */
__asan_poison_stack_memory(const void * addr,size_t size)787 void __asan_poison_stack_memory(const void *addr, size_t size)
788 {
789 /*
790 * Addr is KASAN_SHADOW_SCALE_SIZE-aligned and the object is surrounded
791 * by redzones, so we simply round up size to simplify logic.
792 */
793 kasan_poison_shadow(addr, round_up(size, KASAN_SHADOW_SCALE_SIZE),
794 KASAN_USE_AFTER_SCOPE);
795 }
796 EXPORT_SYMBOL(__asan_poison_stack_memory);
797
798 /* Emitted by compiler to unpoison large objects when they go into scope. */
__asan_unpoison_stack_memory(const void * addr,size_t size)799 void __asan_unpoison_stack_memory(const void *addr, size_t size)
800 {
801 kasan_unpoison_shadow(addr, size);
802 }
803 EXPORT_SYMBOL(__asan_unpoison_stack_memory);
804
805 #ifdef CONFIG_MEMORY_HOTPLUG
kasan_mem_notifier(struct notifier_block * nb,unsigned long action,void * data)806 static int kasan_mem_notifier(struct notifier_block *nb,
807 unsigned long action, void *data)
808 {
809 return (action == MEM_GOING_ONLINE) ? NOTIFY_BAD : NOTIFY_OK;
810 }
811
kasan_memhotplug_init(void)812 static int __init kasan_memhotplug_init(void)
813 {
814 pr_info("WARNING: KASAN doesn't support memory hot-add\n");
815 pr_info("Memory hot-add will be disabled\n");
816
817 hotplug_memory_notifier(kasan_mem_notifier, 0);
818
819 return 0;
820 }
821
822 core_initcall(kasan_memhotplug_init);
823 #endif
824