• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file contains shadow memory manipulation code.
3  *
4  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
6  *
7  * Some code borrowed from https://github.com/xairy/kasan-prototype by
8  *        Andrey Konovalov <adech.fo@gmail.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  */
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #define DISABLE_BRANCH_PROFILING
18 
19 #include <linux/export.h>
20 #include <linux/interrupt.h>
21 #include <linux/init.h>
22 #include <linux/kasan.h>
23 #include <linux/kernel.h>
24 #include <linux/kmemleak.h>
25 #include <linux/linkage.h>
26 #include <linux/memblock.h>
27 #include <linux/memory.h>
28 #include <linux/mm.h>
29 #include <linux/module.h>
30 #include <linux/printk.h>
31 #include <linux/sched.h>
32 #include <linux/sched/task_stack.h>
33 #include <linux/slab.h>
34 #include <linux/stacktrace.h>
35 #include <linux/string.h>
36 #include <linux/types.h>
37 #include <linux/vmalloc.h>
38 #include <linux/bug.h>
39 
40 #include "kasan.h"
41 #include "../slab.h"
42 
kasan_enable_current(void)43 void kasan_enable_current(void)
44 {
45 	current->kasan_depth++;
46 }
47 
kasan_disable_current(void)48 void kasan_disable_current(void)
49 {
50 	current->kasan_depth--;
51 }
52 
53 /*
54  * Poisons the shadow memory for 'size' bytes starting from 'addr'.
55  * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
56  */
kasan_poison_shadow(const void * address,size_t size,u8 value)57 static void kasan_poison_shadow(const void *address, size_t size, u8 value)
58 {
59 	void *shadow_start, *shadow_end;
60 
61 	shadow_start = kasan_mem_to_shadow(address);
62 	shadow_end = kasan_mem_to_shadow(address + size);
63 
64 	memset(shadow_start, value, shadow_end - shadow_start);
65 }
66 
kasan_unpoison_shadow(const void * address,size_t size)67 void kasan_unpoison_shadow(const void *address, size_t size)
68 {
69 	kasan_poison_shadow(address, size, 0);
70 
71 	if (size & KASAN_SHADOW_MASK) {
72 		u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
73 		*shadow = size & KASAN_SHADOW_MASK;
74 	}
75 }
76 
__kasan_unpoison_stack(struct task_struct * task,const void * sp)77 static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
78 {
79 	void *base = task_stack_page(task);
80 	size_t size = sp - base;
81 
82 	kasan_unpoison_shadow(base, size);
83 }
84 
85 /* Unpoison the entire stack for a task. */
kasan_unpoison_task_stack(struct task_struct * task)86 void kasan_unpoison_task_stack(struct task_struct *task)
87 {
88 	__kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
89 }
90 
91 /* Unpoison the stack for the current task beyond a watermark sp value. */
kasan_unpoison_task_stack_below(const void * watermark)92 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
93 {
94 	/*
95 	 * Calculate the task stack base address.  Avoid using 'current'
96 	 * because this function is called by early resume code which hasn't
97 	 * yet set up the percpu register (%gs).
98 	 */
99 	void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
100 
101 	kasan_unpoison_shadow(base, watermark - base);
102 }
103 
104 /*
105  * Clear all poison for the region between the current SP and a provided
106  * watermark value, as is sometimes required prior to hand-crafted asm function
107  * returns in the middle of functions.
108  */
kasan_unpoison_stack_above_sp_to(const void * watermark)109 void kasan_unpoison_stack_above_sp_to(const void *watermark)
110 {
111 	const void *sp = __builtin_frame_address(0);
112 	size_t size = watermark - sp;
113 
114 	if (WARN_ON(sp > watermark))
115 		return;
116 	kasan_unpoison_shadow(sp, size);
117 }
118 
119 /*
120  * All functions below always inlined so compiler could
121  * perform better optimizations in each of __asan_loadX/__assn_storeX
122  * depending on memory access size X.
123  */
124 
memory_is_poisoned_1(unsigned long addr)125 static __always_inline bool memory_is_poisoned_1(unsigned long addr)
126 {
127 	s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
128 
129 	if (unlikely(shadow_value)) {
130 		s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
131 		return unlikely(last_accessible_byte >= shadow_value);
132 	}
133 
134 	return false;
135 }
136 
memory_is_poisoned_2_4_8(unsigned long addr,unsigned long size)137 static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
138 						unsigned long size)
139 {
140 	u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr);
141 
142 	/*
143 	 * Access crosses 8(shadow size)-byte boundary. Such access maps
144 	 * into 2 shadow bytes, so we need to check them both.
145 	 */
146 	if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1))
147 		return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
148 
149 	return memory_is_poisoned_1(addr + size - 1);
150 }
151 
memory_is_poisoned_16(unsigned long addr)152 static __always_inline bool memory_is_poisoned_16(unsigned long addr)
153 {
154 	u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
155 
156 	/* Unaligned 16-bytes access maps into 3 shadow bytes. */
157 	if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
158 		return *shadow_addr || memory_is_poisoned_1(addr + 15);
159 
160 	return *shadow_addr;
161 }
162 
bytes_is_nonzero(const u8 * start,size_t size)163 static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
164 					size_t size)
165 {
166 	while (size) {
167 		if (unlikely(*start))
168 			return (unsigned long)start;
169 		start++;
170 		size--;
171 	}
172 
173 	return 0;
174 }
175 
memory_is_nonzero(const void * start,const void * end)176 static __always_inline unsigned long memory_is_nonzero(const void *start,
177 						const void *end)
178 {
179 	unsigned int words;
180 	unsigned long ret;
181 	unsigned int prefix = (unsigned long)start % 8;
182 
183 	if (end - start <= 16)
184 		return bytes_is_nonzero(start, end - start);
185 
186 	if (prefix) {
187 		prefix = 8 - prefix;
188 		ret = bytes_is_nonzero(start, prefix);
189 		if (unlikely(ret))
190 			return ret;
191 		start += prefix;
192 	}
193 
194 	words = (end - start) / 8;
195 	while (words) {
196 		if (unlikely(*(u64 *)start))
197 			return bytes_is_nonzero(start, 8);
198 		start += 8;
199 		words--;
200 	}
201 
202 	return bytes_is_nonzero(start, (end - start) % 8);
203 }
204 
memory_is_poisoned_n(unsigned long addr,size_t size)205 static __always_inline bool memory_is_poisoned_n(unsigned long addr,
206 						size_t size)
207 {
208 	unsigned long ret;
209 
210 	ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr),
211 			kasan_mem_to_shadow((void *)addr + size - 1) + 1);
212 
213 	if (unlikely(ret)) {
214 		unsigned long last_byte = addr + size - 1;
215 		s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
216 
217 		if (unlikely(ret != (unsigned long)last_shadow ||
218 			((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
219 			return true;
220 	}
221 	return false;
222 }
223 
memory_is_poisoned(unsigned long addr,size_t size)224 static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
225 {
226 	if (__builtin_constant_p(size)) {
227 		switch (size) {
228 		case 1:
229 			return memory_is_poisoned_1(addr);
230 		case 2:
231 		case 4:
232 		case 8:
233 			return memory_is_poisoned_2_4_8(addr, size);
234 		case 16:
235 			return memory_is_poisoned_16(addr);
236 		default:
237 			BUILD_BUG();
238 		}
239 	}
240 
241 	return memory_is_poisoned_n(addr, size);
242 }
243 
check_memory_region_inline(unsigned long addr,size_t size,bool write,unsigned long ret_ip)244 static __always_inline void check_memory_region_inline(unsigned long addr,
245 						size_t size, bool write,
246 						unsigned long ret_ip)
247 {
248 	if (unlikely(size == 0))
249 		return;
250 
251 	if (unlikely((void *)addr <
252 		kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
253 		kasan_report(addr, size, write, ret_ip);
254 		return;
255 	}
256 
257 	if (likely(!memory_is_poisoned(addr, size)))
258 		return;
259 
260 	kasan_report(addr, size, write, ret_ip);
261 }
262 
check_memory_region(unsigned long addr,size_t size,bool write,unsigned long ret_ip)263 static void check_memory_region(unsigned long addr,
264 				size_t size, bool write,
265 				unsigned long ret_ip)
266 {
267 	check_memory_region_inline(addr, size, write, ret_ip);
268 }
269 
kasan_check_read(const volatile void * p,unsigned int size)270 void kasan_check_read(const volatile void *p, unsigned int size)
271 {
272 	check_memory_region((unsigned long)p, size, false, _RET_IP_);
273 }
274 EXPORT_SYMBOL(kasan_check_read);
275 
kasan_check_write(const volatile void * p,unsigned int size)276 void kasan_check_write(const volatile void *p, unsigned int size)
277 {
278 	check_memory_region((unsigned long)p, size, true, _RET_IP_);
279 }
280 EXPORT_SYMBOL(kasan_check_write);
281 
282 #undef memset
memset(void * addr,int c,size_t len)283 void *memset(void *addr, int c, size_t len)
284 {
285 	check_memory_region((unsigned long)addr, len, true, _RET_IP_);
286 
287 	return __memset(addr, c, len);
288 }
289 
290 #undef memmove
memmove(void * dest,const void * src,size_t len)291 void *memmove(void *dest, const void *src, size_t len)
292 {
293 	check_memory_region((unsigned long)src, len, false, _RET_IP_);
294 	check_memory_region((unsigned long)dest, len, true, _RET_IP_);
295 
296 	return __memmove(dest, src, len);
297 }
298 
299 #undef memcpy
memcpy(void * dest,const void * src,size_t len)300 void *memcpy(void *dest, const void *src, size_t len)
301 {
302 	check_memory_region((unsigned long)src, len, false, _RET_IP_);
303 	check_memory_region((unsigned long)dest, len, true, _RET_IP_);
304 
305 	return __memcpy(dest, src, len);
306 }
307 
kasan_alloc_pages(struct page * page,unsigned int order)308 void kasan_alloc_pages(struct page *page, unsigned int order)
309 {
310 	if (likely(!PageHighMem(page)))
311 		kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
312 }
313 
kasan_free_pages(struct page * page,unsigned int order)314 void kasan_free_pages(struct page *page, unsigned int order)
315 {
316 	if (likely(!PageHighMem(page)))
317 		kasan_poison_shadow(page_address(page),
318 				PAGE_SIZE << order,
319 				KASAN_FREE_PAGE);
320 }
321 
322 /*
323  * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
324  * For larger allocations larger redzones are used.
325  */
optimal_redzone(size_t object_size)326 static size_t optimal_redzone(size_t object_size)
327 {
328 	int rz =
329 		object_size <= 64        - 16   ? 16 :
330 		object_size <= 128       - 32   ? 32 :
331 		object_size <= 512       - 64   ? 64 :
332 		object_size <= 4096      - 128  ? 128 :
333 		object_size <= (1 << 14) - 256  ? 256 :
334 		object_size <= (1 << 15) - 512  ? 512 :
335 		object_size <= (1 << 16) - 1024 ? 1024 : 2048;
336 	return rz;
337 }
338 
kasan_cache_create(struct kmem_cache * cache,size_t * size,unsigned long * flags)339 void kasan_cache_create(struct kmem_cache *cache, size_t *size,
340 			unsigned long *flags)
341 {
342 	int redzone_adjust;
343 	int orig_size = *size;
344 
345 	/* Add alloc meta. */
346 	cache->kasan_info.alloc_meta_offset = *size;
347 	*size += sizeof(struct kasan_alloc_meta);
348 
349 	/* Add free meta. */
350 	if (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
351 	    cache->object_size < sizeof(struct kasan_free_meta)) {
352 		cache->kasan_info.free_meta_offset = *size;
353 		*size += sizeof(struct kasan_free_meta);
354 	}
355 	redzone_adjust = optimal_redzone(cache->object_size) -
356 		(*size - cache->object_size);
357 
358 	if (redzone_adjust > 0)
359 		*size += redzone_adjust;
360 
361 	*size = min(KMALLOC_MAX_SIZE, max(*size, cache->object_size +
362 					optimal_redzone(cache->object_size)));
363 
364 	/*
365 	 * If the metadata doesn't fit, don't enable KASAN at all.
366 	 */
367 	if (*size <= cache->kasan_info.alloc_meta_offset ||
368 			*size <= cache->kasan_info.free_meta_offset) {
369 		cache->kasan_info.alloc_meta_offset = 0;
370 		cache->kasan_info.free_meta_offset = 0;
371 		*size = orig_size;
372 		return;
373 	}
374 
375 	*flags |= SLAB_KASAN;
376 }
377 
kasan_cache_shrink(struct kmem_cache * cache)378 void kasan_cache_shrink(struct kmem_cache *cache)
379 {
380 	quarantine_remove_cache(cache);
381 }
382 
kasan_cache_shutdown(struct kmem_cache * cache)383 void kasan_cache_shutdown(struct kmem_cache *cache)
384 {
385 	quarantine_remove_cache(cache);
386 }
387 
kasan_metadata_size(struct kmem_cache * cache)388 size_t kasan_metadata_size(struct kmem_cache *cache)
389 {
390 	return (cache->kasan_info.alloc_meta_offset ?
391 		sizeof(struct kasan_alloc_meta) : 0) +
392 		(cache->kasan_info.free_meta_offset ?
393 		sizeof(struct kasan_free_meta) : 0);
394 }
395 
kasan_poison_slab(struct page * page)396 void kasan_poison_slab(struct page *page)
397 {
398 	kasan_poison_shadow(page_address(page),
399 			PAGE_SIZE << compound_order(page),
400 			KASAN_KMALLOC_REDZONE);
401 }
402 
kasan_unpoison_object_data(struct kmem_cache * cache,void * object)403 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
404 {
405 	kasan_unpoison_shadow(object, cache->object_size);
406 }
407 
kasan_poison_object_data(struct kmem_cache * cache,void * object)408 void kasan_poison_object_data(struct kmem_cache *cache, void *object)
409 {
410 	kasan_poison_shadow(object,
411 			round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
412 			KASAN_KMALLOC_REDZONE);
413 }
414 
in_irqentry_text(unsigned long ptr)415 static inline int in_irqentry_text(unsigned long ptr)
416 {
417 	return (ptr >= (unsigned long)&__irqentry_text_start &&
418 		ptr < (unsigned long)&__irqentry_text_end) ||
419 		(ptr >= (unsigned long)&__softirqentry_text_start &&
420 		 ptr < (unsigned long)&__softirqentry_text_end);
421 }
422 
filter_irq_stacks(struct stack_trace * trace)423 static inline void filter_irq_stacks(struct stack_trace *trace)
424 {
425 	int i;
426 
427 	if (!trace->nr_entries)
428 		return;
429 	for (i = 0; i < trace->nr_entries; i++)
430 		if (in_irqentry_text(trace->entries[i])) {
431 			/* Include the irqentry function into the stack. */
432 			trace->nr_entries = i + 1;
433 			break;
434 		}
435 }
436 
save_stack(gfp_t flags)437 static inline depot_stack_handle_t save_stack(gfp_t flags)
438 {
439 	unsigned long entries[KASAN_STACK_DEPTH];
440 	struct stack_trace trace = {
441 		.nr_entries = 0,
442 		.entries = entries,
443 		.max_entries = KASAN_STACK_DEPTH,
444 		.skip = 0
445 	};
446 
447 	save_stack_trace(&trace);
448 	filter_irq_stacks(&trace);
449 	if (trace.nr_entries != 0 &&
450 	    trace.entries[trace.nr_entries-1] == ULONG_MAX)
451 		trace.nr_entries--;
452 
453 	return depot_save_stack(&trace, flags);
454 }
455 
set_track(struct kasan_track * track,gfp_t flags)456 static inline void set_track(struct kasan_track *track, gfp_t flags)
457 {
458 	track->pid = current->pid;
459 	track->stack = save_stack(flags);
460 }
461 
get_alloc_info(struct kmem_cache * cache,const void * object)462 struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
463 					const void *object)
464 {
465 	BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
466 	return (void *)object + cache->kasan_info.alloc_meta_offset;
467 }
468 
get_free_info(struct kmem_cache * cache,const void * object)469 struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
470 				      const void *object)
471 {
472 	BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
473 	return (void *)object + cache->kasan_info.free_meta_offset;
474 }
475 
kasan_init_slab_obj(struct kmem_cache * cache,const void * object)476 void kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
477 {
478 	struct kasan_alloc_meta *alloc_info;
479 
480 	if (!(cache->flags & SLAB_KASAN))
481 		return;
482 
483 	alloc_info = get_alloc_info(cache, object);
484 	__memset(alloc_info, 0, sizeof(*alloc_info));
485 }
486 
kasan_slab_alloc(struct kmem_cache * cache,void * object,gfp_t flags)487 void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
488 {
489 	kasan_kmalloc(cache, object, cache->object_size, flags);
490 }
491 
kasan_poison_slab_free(struct kmem_cache * cache,void * object)492 static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
493 {
494 	unsigned long size = cache->object_size;
495 	unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
496 
497 	/* RCU slabs could be legally used after free within the RCU period */
498 	if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
499 		return;
500 
501 	kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
502 }
503 
kasan_slab_free(struct kmem_cache * cache,void * object)504 bool kasan_slab_free(struct kmem_cache *cache, void *object)
505 {
506 	s8 shadow_byte;
507 
508 	/* RCU slabs could be legally used after free within the RCU period */
509 	if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
510 		return false;
511 
512 	shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
513 	if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
514 		kasan_report_double_free(cache, object,
515 				__builtin_return_address(1));
516 		return true;
517 	}
518 
519 	kasan_poison_slab_free(cache, object);
520 
521 	if (unlikely(!(cache->flags & SLAB_KASAN)))
522 		return false;
523 
524 	set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
525 	quarantine_put(get_free_info(cache, object), cache);
526 	return true;
527 }
528 
kasan_kmalloc(struct kmem_cache * cache,const void * object,size_t size,gfp_t flags)529 void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
530 		   gfp_t flags)
531 {
532 	unsigned long redzone_start;
533 	unsigned long redzone_end;
534 
535 	if (gfpflags_allow_blocking(flags))
536 		quarantine_reduce();
537 
538 	if (unlikely(object == NULL))
539 		return;
540 
541 	redzone_start = round_up((unsigned long)(object + size),
542 				KASAN_SHADOW_SCALE_SIZE);
543 	redzone_end = round_up((unsigned long)object + cache->object_size,
544 				KASAN_SHADOW_SCALE_SIZE);
545 
546 	kasan_unpoison_shadow(object, size);
547 	kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
548 		KASAN_KMALLOC_REDZONE);
549 
550 	if (cache->flags & SLAB_KASAN)
551 		set_track(&get_alloc_info(cache, object)->alloc_track, flags);
552 }
553 EXPORT_SYMBOL(kasan_kmalloc);
554 
kasan_kmalloc_large(const void * ptr,size_t size,gfp_t flags)555 void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
556 {
557 	struct page *page;
558 	unsigned long redzone_start;
559 	unsigned long redzone_end;
560 
561 	if (gfpflags_allow_blocking(flags))
562 		quarantine_reduce();
563 
564 	if (unlikely(ptr == NULL))
565 		return;
566 
567 	page = virt_to_page(ptr);
568 	redzone_start = round_up((unsigned long)(ptr + size),
569 				KASAN_SHADOW_SCALE_SIZE);
570 	redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
571 
572 	kasan_unpoison_shadow(ptr, size);
573 	kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
574 		KASAN_PAGE_REDZONE);
575 }
576 
kasan_krealloc(const void * object,size_t size,gfp_t flags)577 void kasan_krealloc(const void *object, size_t size, gfp_t flags)
578 {
579 	struct page *page;
580 
581 	if (unlikely(object == ZERO_SIZE_PTR))
582 		return;
583 
584 	page = virt_to_head_page(object);
585 
586 	if (unlikely(!PageSlab(page)))
587 		kasan_kmalloc_large(object, size, flags);
588 	else
589 		kasan_kmalloc(page->slab_cache, object, size, flags);
590 }
591 
kasan_poison_kfree(void * ptr)592 void kasan_poison_kfree(void *ptr)
593 {
594 	struct page *page;
595 
596 	page = virt_to_head_page(ptr);
597 
598 	if (unlikely(!PageSlab(page)))
599 		kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
600 				KASAN_FREE_PAGE);
601 	else
602 		kasan_poison_slab_free(page->slab_cache, ptr);
603 }
604 
kasan_kfree_large(const void * ptr)605 void kasan_kfree_large(const void *ptr)
606 {
607 	struct page *page = virt_to_page(ptr);
608 
609 	kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
610 			KASAN_FREE_PAGE);
611 }
612 
kasan_module_alloc(void * addr,size_t size)613 int kasan_module_alloc(void *addr, size_t size)
614 {
615 	void *ret;
616 	size_t scaled_size;
617 	size_t shadow_size;
618 	unsigned long shadow_start;
619 
620 	shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
621 	scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
622 	shadow_size = round_up(scaled_size, PAGE_SIZE);
623 
624 	if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
625 		return -EINVAL;
626 
627 	ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
628 			shadow_start + shadow_size,
629 			GFP_KERNEL | __GFP_ZERO,
630 			PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
631 			__builtin_return_address(0));
632 
633 	if (ret) {
634 		find_vm_area(addr)->flags |= VM_KASAN;
635 		kmemleak_ignore(ret);
636 		return 0;
637 	}
638 
639 	return -ENOMEM;
640 }
641 
kasan_free_shadow(const struct vm_struct * vm)642 void kasan_free_shadow(const struct vm_struct *vm)
643 {
644 	if (vm->flags & VM_KASAN)
645 		vfree(kasan_mem_to_shadow(vm->addr));
646 }
647 
register_global(struct kasan_global * global)648 static void register_global(struct kasan_global *global)
649 {
650 	size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
651 
652 	kasan_unpoison_shadow(global->beg, global->size);
653 
654 	kasan_poison_shadow(global->beg + aligned_size,
655 		global->size_with_redzone - aligned_size,
656 		KASAN_GLOBAL_REDZONE);
657 }
658 
__asan_register_globals(struct kasan_global * globals,size_t size)659 void __asan_register_globals(struct kasan_global *globals, size_t size)
660 {
661 	int i;
662 
663 	for (i = 0; i < size; i++)
664 		register_global(&globals[i]);
665 }
666 EXPORT_SYMBOL(__asan_register_globals);
667 
__asan_unregister_globals(struct kasan_global * globals,size_t size)668 void __asan_unregister_globals(struct kasan_global *globals, size_t size)
669 {
670 }
671 EXPORT_SYMBOL(__asan_unregister_globals);
672 
673 #define DEFINE_ASAN_LOAD_STORE(size)					\
674 	void __asan_load##size(unsigned long addr)			\
675 	{								\
676 		check_memory_region_inline(addr, size, false, _RET_IP_);\
677 	}								\
678 	EXPORT_SYMBOL(__asan_load##size);				\
679 	__alias(__asan_load##size)					\
680 	void __asan_load##size##_noabort(unsigned long);		\
681 	EXPORT_SYMBOL(__asan_load##size##_noabort);			\
682 	void __asan_store##size(unsigned long addr)			\
683 	{								\
684 		check_memory_region_inline(addr, size, true, _RET_IP_);	\
685 	}								\
686 	EXPORT_SYMBOL(__asan_store##size);				\
687 	__alias(__asan_store##size)					\
688 	void __asan_store##size##_noabort(unsigned long);		\
689 	EXPORT_SYMBOL(__asan_store##size##_noabort)
690 
691 DEFINE_ASAN_LOAD_STORE(1);
692 DEFINE_ASAN_LOAD_STORE(2);
693 DEFINE_ASAN_LOAD_STORE(4);
694 DEFINE_ASAN_LOAD_STORE(8);
695 DEFINE_ASAN_LOAD_STORE(16);
696 
__asan_loadN(unsigned long addr,size_t size)697 void __asan_loadN(unsigned long addr, size_t size)
698 {
699 	check_memory_region(addr, size, false, _RET_IP_);
700 }
701 EXPORT_SYMBOL(__asan_loadN);
702 
703 __alias(__asan_loadN)
704 void __asan_loadN_noabort(unsigned long, size_t);
705 EXPORT_SYMBOL(__asan_loadN_noabort);
706 
__asan_storeN(unsigned long addr,size_t size)707 void __asan_storeN(unsigned long addr, size_t size)
708 {
709 	check_memory_region(addr, size, true, _RET_IP_);
710 }
711 EXPORT_SYMBOL(__asan_storeN);
712 
713 __alias(__asan_storeN)
714 void __asan_storeN_noabort(unsigned long, size_t);
715 EXPORT_SYMBOL(__asan_storeN_noabort);
716 
717 /* to shut up compiler complaints */
__asan_handle_no_return(void)718 void __asan_handle_no_return(void) {}
719 EXPORT_SYMBOL(__asan_handle_no_return);
720 
721 /* Emitted by compiler to poison large objects when they go out of scope. */
__asan_poison_stack_memory(const void * addr,size_t size)722 void __asan_poison_stack_memory(const void *addr, size_t size)
723 {
724 	/*
725 	 * Addr is KASAN_SHADOW_SCALE_SIZE-aligned and the object is surrounded
726 	 * by redzones, so we simply round up size to simplify logic.
727 	 */
728 	kasan_poison_shadow(addr, round_up(size, KASAN_SHADOW_SCALE_SIZE),
729 			    KASAN_USE_AFTER_SCOPE);
730 }
731 EXPORT_SYMBOL(__asan_poison_stack_memory);
732 
733 /* Emitted by compiler to unpoison large objects when they go into scope. */
__asan_unpoison_stack_memory(const void * addr,size_t size)734 void __asan_unpoison_stack_memory(const void *addr, size_t size)
735 {
736 	kasan_unpoison_shadow(addr, size);
737 }
738 EXPORT_SYMBOL(__asan_unpoison_stack_memory);
739 
740 #ifdef CONFIG_MEMORY_HOTPLUG
shadow_mapped(unsigned long addr)741 static bool shadow_mapped(unsigned long addr)
742 {
743 	pgd_t *pgd = pgd_offset_k(addr);
744 	p4d_t *p4d;
745 	pud_t *pud;
746 	pmd_t *pmd;
747 	pte_t *pte;
748 
749 	if (pgd_none(*pgd))
750 		return false;
751 	p4d = p4d_offset(pgd, addr);
752 	if (p4d_none(*p4d))
753 		return false;
754 	pud = pud_offset(p4d, addr);
755 	if (pud_none(*pud))
756 		return false;
757 
758 	/*
759 	 * We can't use pud_large() or pud_huge(), the first one is
760 	 * arch-specific, the last one depends on HUGETLB_PAGE.  So let's abuse
761 	 * pud_bad(), if pud is bad then it's bad because it's huge.
762 	 */
763 	if (pud_bad(*pud))
764 		return true;
765 	pmd = pmd_offset(pud, addr);
766 	if (pmd_none(*pmd))
767 		return false;
768 
769 	if (pmd_bad(*pmd))
770 		return true;
771 	pte = pte_offset_kernel(pmd, addr);
772 	return !pte_none(*pte);
773 }
774 
kasan_mem_notifier(struct notifier_block * nb,unsigned long action,void * data)775 static int __meminit kasan_mem_notifier(struct notifier_block *nb,
776 			unsigned long action, void *data)
777 {
778 	struct memory_notify *mem_data = data;
779 	unsigned long nr_shadow_pages, start_kaddr, shadow_start;
780 	unsigned long shadow_end, shadow_size;
781 
782 	nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT;
783 	start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn);
784 	shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr);
785 	shadow_size = nr_shadow_pages << PAGE_SHIFT;
786 	shadow_end = shadow_start + shadow_size;
787 
788 	if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) ||
789 		WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT)))
790 		return NOTIFY_BAD;
791 
792 	switch (action) {
793 	case MEM_GOING_ONLINE: {
794 		void *ret;
795 
796 		/*
797 		 * If shadow is mapped already than it must have been mapped
798 		 * during the boot. This could happen if we onlining previously
799 		 * offlined memory.
800 		 */
801 		if (shadow_mapped(shadow_start))
802 			return NOTIFY_OK;
803 
804 		ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
805 					shadow_end, GFP_KERNEL,
806 					PAGE_KERNEL, VM_NO_GUARD,
807 					pfn_to_nid(mem_data->start_pfn),
808 					__builtin_return_address(0));
809 		if (!ret)
810 			return NOTIFY_BAD;
811 
812 		kmemleak_ignore(ret);
813 		return NOTIFY_OK;
814 	}
815 	case MEM_CANCEL_ONLINE:
816 	case MEM_OFFLINE: {
817 		struct vm_struct *vm;
818 
819 		/*
820 		 * shadow_start was either mapped during boot by kasan_init()
821 		 * or during memory online by __vmalloc_node_range().
822 		 * In the latter case we can use vfree() to free shadow.
823 		 * Non-NULL result of the find_vm_area() will tell us if
824 		 * that was the second case.
825 		 *
826 		 * Currently it's not possible to free shadow mapped
827 		 * during boot by kasan_init(). It's because the code
828 		 * to do that hasn't been written yet. So we'll just
829 		 * leak the memory.
830 		 */
831 		vm = find_vm_area((void *)shadow_start);
832 		if (vm)
833 			vfree((void *)shadow_start);
834 	}
835 	}
836 
837 	return NOTIFY_OK;
838 }
839 
kasan_memhotplug_init(void)840 static int __init kasan_memhotplug_init(void)
841 {
842 	hotplug_memory_notifier(kasan_mem_notifier, 0);
843 
844 	return 0;
845 }
846 
847 core_initcall(kasan_memhotplug_init);
848 #endif
849