• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file contains shadow memory manipulation code.
3  *
4  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
6  *
7  * Some code borrowed from https://github.com/xairy/kasan-prototype by
8  *        Andrey Konovalov <adech.fo@gmail.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  */
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #define DISABLE_BRANCH_PROFILING
18 
19 #include <linux/export.h>
20 #include <linux/interrupt.h>
21 #include <linux/init.h>
22 #include <linux/kasan.h>
23 #include <linux/kernel.h>
24 #include <linux/kmemleak.h>
25 #include <linux/linkage.h>
26 #include <linux/memblock.h>
27 #include <linux/memory.h>
28 #include <linux/mm.h>
29 #include <linux/module.h>
30 #include <linux/printk.h>
31 #include <linux/sched.h>
32 #include <linux/slab.h>
33 #include <linux/stacktrace.h>
34 #include <linux/string.h>
35 #include <linux/types.h>
36 #include <linux/vmalloc.h>
37 #include <linux/bug.h>
38 
39 #include "kasan.h"
40 #include "../slab.h"
41 
kasan_enable_current(void)42 void kasan_enable_current(void)
43 {
44 	current->kasan_depth++;
45 }
46 
kasan_disable_current(void)47 void kasan_disable_current(void)
48 {
49 	current->kasan_depth--;
50 }
51 
52 /*
53  * Poisons the shadow memory for 'size' bytes starting from 'addr'.
54  * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
55  */
kasan_poison_shadow(const void * address,size_t size,u8 value)56 static void kasan_poison_shadow(const void *address, size_t size, u8 value)
57 {
58 	void *shadow_start, *shadow_end;
59 
60 	shadow_start = kasan_mem_to_shadow(address);
61 	shadow_end = kasan_mem_to_shadow(address + size);
62 
63 	memset(shadow_start, value, shadow_end - shadow_start);
64 }
65 
kasan_unpoison_shadow(const void * address,size_t size)66 void kasan_unpoison_shadow(const void *address, size_t size)
67 {
68 	kasan_poison_shadow(address, size, 0);
69 
70 	if (size & KASAN_SHADOW_MASK) {
71 		u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
72 		*shadow = size & KASAN_SHADOW_MASK;
73 	}
74 }
75 
__kasan_unpoison_stack(struct task_struct * task,const void * sp)76 static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
77 {
78 	void *base = task_stack_page(task);
79 	size_t size = sp - base;
80 
81 	kasan_unpoison_shadow(base, size);
82 }
83 
84 /* Unpoison the entire stack for a task. */
kasan_unpoison_task_stack(struct task_struct * task)85 void kasan_unpoison_task_stack(struct task_struct *task)
86 {
87 	__kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
88 }
89 
90 /* Unpoison the stack for the current task beyond a watermark sp value. */
kasan_unpoison_task_stack_below(const void * watermark)91 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
92 {
93 	__kasan_unpoison_stack(current, watermark);
94 }
95 
96 /*
97  * Clear all poison for the region between the current SP and a provided
98  * watermark value, as is sometimes required prior to hand-crafted asm function
99  * returns in the middle of functions.
100  */
kasan_unpoison_stack_above_sp_to(const void * watermark)101 void kasan_unpoison_stack_above_sp_to(const void *watermark)
102 {
103 	const void *sp = __builtin_frame_address(0);
104 	size_t size = watermark - sp;
105 
106 	if (WARN_ON(sp > watermark))
107 		return;
108 	kasan_unpoison_shadow(sp, size);
109 }
110 
111 /*
112  * All functions below always inlined so compiler could
113  * perform better optimizations in each of __asan_loadX/__assn_storeX
114  * depending on memory access size X.
115  */
116 
memory_is_poisoned_1(unsigned long addr)117 static __always_inline bool memory_is_poisoned_1(unsigned long addr)
118 {
119 	s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
120 
121 	if (unlikely(shadow_value)) {
122 		s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
123 		return unlikely(last_accessible_byte >= shadow_value);
124 	}
125 
126 	return false;
127 }
128 
memory_is_poisoned_2(unsigned long addr)129 static __always_inline bool memory_is_poisoned_2(unsigned long addr)
130 {
131 	u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
132 
133 	if (unlikely(*shadow_addr)) {
134 		if (memory_is_poisoned_1(addr + 1))
135 			return true;
136 
137 		/*
138 		 * If single shadow byte covers 2-byte access, we don't
139 		 * need to do anything more. Otherwise, test the first
140 		 * shadow byte.
141 		 */
142 		if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0))
143 			return false;
144 
145 		return unlikely(*(u8 *)shadow_addr);
146 	}
147 
148 	return false;
149 }
150 
memory_is_poisoned_4(unsigned long addr)151 static __always_inline bool memory_is_poisoned_4(unsigned long addr)
152 {
153 	u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
154 
155 	if (unlikely(*shadow_addr)) {
156 		if (memory_is_poisoned_1(addr + 3))
157 			return true;
158 
159 		/*
160 		 * If single shadow byte covers 4-byte access, we don't
161 		 * need to do anything more. Otherwise, test the first
162 		 * shadow byte.
163 		 */
164 		if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3))
165 			return false;
166 
167 		return unlikely(*(u8 *)shadow_addr);
168 	}
169 
170 	return false;
171 }
172 
memory_is_poisoned_8(unsigned long addr)173 static __always_inline bool memory_is_poisoned_8(unsigned long addr)
174 {
175 	u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
176 
177 	if (unlikely(*shadow_addr)) {
178 		if (memory_is_poisoned_1(addr + 7))
179 			return true;
180 
181 		/*
182 		 * If single shadow byte covers 8-byte access, we don't
183 		 * need to do anything more. Otherwise, test the first
184 		 * shadow byte.
185 		 */
186 		if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
187 			return false;
188 
189 		return unlikely(*(u8 *)shadow_addr);
190 	}
191 
192 	return false;
193 }
194 
memory_is_poisoned_16(unsigned long addr)195 static __always_inline bool memory_is_poisoned_16(unsigned long addr)
196 {
197 	u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr);
198 
199 	if (unlikely(*shadow_addr)) {
200 		u16 shadow_first_bytes = *(u16 *)shadow_addr;
201 
202 		if (unlikely(shadow_first_bytes))
203 			return true;
204 
205 		/*
206 		 * If two shadow bytes covers 16-byte access, we don't
207 		 * need to do anything more. Otherwise, test the last
208 		 * shadow byte.
209 		 */
210 		if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
211 			return false;
212 
213 		return memory_is_poisoned_1(addr + 15);
214 	}
215 
216 	return false;
217 }
218 
bytes_is_zero(const u8 * start,size_t size)219 static __always_inline unsigned long bytes_is_zero(const u8 *start,
220 					size_t size)
221 {
222 	while (size) {
223 		if (unlikely(*start))
224 			return (unsigned long)start;
225 		start++;
226 		size--;
227 	}
228 
229 	return 0;
230 }
231 
memory_is_zero(const void * start,const void * end)232 static __always_inline unsigned long memory_is_zero(const void *start,
233 						const void *end)
234 {
235 	unsigned int words;
236 	unsigned long ret;
237 	unsigned int prefix = (unsigned long)start % 8;
238 
239 	if (end - start <= 16)
240 		return bytes_is_zero(start, end - start);
241 
242 	if (prefix) {
243 		prefix = 8 - prefix;
244 		ret = bytes_is_zero(start, prefix);
245 		if (unlikely(ret))
246 			return ret;
247 		start += prefix;
248 	}
249 
250 	words = (end - start) / 8;
251 	while (words) {
252 		if (unlikely(*(u64 *)start))
253 			return bytes_is_zero(start, 8);
254 		start += 8;
255 		words--;
256 	}
257 
258 	return bytes_is_zero(start, (end - start) % 8);
259 }
260 
memory_is_poisoned_n(unsigned long addr,size_t size)261 static __always_inline bool memory_is_poisoned_n(unsigned long addr,
262 						size_t size)
263 {
264 	unsigned long ret;
265 
266 	ret = memory_is_zero(kasan_mem_to_shadow((void *)addr),
267 			kasan_mem_to_shadow((void *)addr + size - 1) + 1);
268 
269 	if (unlikely(ret)) {
270 		unsigned long last_byte = addr + size - 1;
271 		s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
272 
273 		if (unlikely(ret != (unsigned long)last_shadow ||
274 			((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
275 			return true;
276 	}
277 	return false;
278 }
279 
memory_is_poisoned(unsigned long addr,size_t size)280 static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
281 {
282 	if (__builtin_constant_p(size)) {
283 		switch (size) {
284 		case 1:
285 			return memory_is_poisoned_1(addr);
286 		case 2:
287 			return memory_is_poisoned_2(addr);
288 		case 4:
289 			return memory_is_poisoned_4(addr);
290 		case 8:
291 			return memory_is_poisoned_8(addr);
292 		case 16:
293 			return memory_is_poisoned_16(addr);
294 		default:
295 			BUILD_BUG();
296 		}
297 	}
298 
299 	return memory_is_poisoned_n(addr, size);
300 }
301 
check_memory_region_inline(unsigned long addr,size_t size,bool write,unsigned long ret_ip)302 static __always_inline void check_memory_region_inline(unsigned long addr,
303 						size_t size, bool write,
304 						unsigned long ret_ip)
305 {
306 	if (unlikely(size == 0))
307 		return;
308 
309 	if (unlikely((void *)addr <
310 		kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
311 		kasan_report(addr, size, write, ret_ip);
312 		return;
313 	}
314 
315 	if (likely(!memory_is_poisoned(addr, size)))
316 		return;
317 
318 	kasan_report(addr, size, write, ret_ip);
319 }
320 
check_memory_region(unsigned long addr,size_t size,bool write,unsigned long ret_ip)321 static void check_memory_region(unsigned long addr,
322 				size_t size, bool write,
323 				unsigned long ret_ip)
324 {
325 	check_memory_region_inline(addr, size, write, ret_ip);
326 }
327 
kasan_check_read(const void * p,unsigned int size)328 void kasan_check_read(const void *p, unsigned int size)
329 {
330 	check_memory_region((unsigned long)p, size, false, _RET_IP_);
331 }
332 EXPORT_SYMBOL(kasan_check_read);
333 
kasan_check_write(const void * p,unsigned int size)334 void kasan_check_write(const void *p, unsigned int size)
335 {
336 	check_memory_region((unsigned long)p, size, true, _RET_IP_);
337 }
338 EXPORT_SYMBOL(kasan_check_write);
339 
340 #undef memset
memset(void * addr,int c,size_t len)341 void *memset(void *addr, int c, size_t len)
342 {
343 	check_memory_region((unsigned long)addr, len, true, _RET_IP_);
344 
345 	return __memset(addr, c, len);
346 }
347 
348 #undef memmove
memmove(void * dest,const void * src,size_t len)349 void *memmove(void *dest, const void *src, size_t len)
350 {
351 	check_memory_region((unsigned long)src, len, false, _RET_IP_);
352 	check_memory_region((unsigned long)dest, len, true, _RET_IP_);
353 
354 	return __memmove(dest, src, len);
355 }
356 
357 #undef memcpy
memcpy(void * dest,const void * src,size_t len)358 void *memcpy(void *dest, const void *src, size_t len)
359 {
360 	check_memory_region((unsigned long)src, len, false, _RET_IP_);
361 	check_memory_region((unsigned long)dest, len, true, _RET_IP_);
362 
363 	return __memcpy(dest, src, len);
364 }
365 
kasan_alloc_pages(struct page * page,unsigned int order)366 void kasan_alloc_pages(struct page *page, unsigned int order)
367 {
368 	if (likely(!PageHighMem(page)))
369 		kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
370 }
371 
kasan_free_pages(struct page * page,unsigned int order)372 void kasan_free_pages(struct page *page, unsigned int order)
373 {
374 	if (likely(!PageHighMem(page)))
375 		kasan_poison_shadow(page_address(page),
376 				PAGE_SIZE << order,
377 				KASAN_FREE_PAGE);
378 }
379 
380 /*
381  * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
382  * For larger allocations larger redzones are used.
383  */
optimal_redzone(size_t object_size)384 static size_t optimal_redzone(size_t object_size)
385 {
386 	int rz =
387 		object_size <= 64        - 16   ? 16 :
388 		object_size <= 128       - 32   ? 32 :
389 		object_size <= 512       - 64   ? 64 :
390 		object_size <= 4096      - 128  ? 128 :
391 		object_size <= (1 << 14) - 256  ? 256 :
392 		object_size <= (1 << 15) - 512  ? 512 :
393 		object_size <= (1 << 16) - 1024 ? 1024 : 2048;
394 	return rz;
395 }
396 
kasan_cache_create(struct kmem_cache * cache,size_t * size,unsigned long * flags)397 void kasan_cache_create(struct kmem_cache *cache, size_t *size,
398 			unsigned long *flags)
399 {
400 	int redzone_adjust;
401 	int orig_size = *size;
402 
403 	/* Add alloc meta. */
404 	cache->kasan_info.alloc_meta_offset = *size;
405 	*size += sizeof(struct kasan_alloc_meta);
406 
407 	/* Add free meta. */
408 	if (cache->flags & SLAB_DESTROY_BY_RCU || cache->ctor ||
409 	    cache->object_size < sizeof(struct kasan_free_meta)) {
410 		cache->kasan_info.free_meta_offset = *size;
411 		*size += sizeof(struct kasan_free_meta);
412 	}
413 	redzone_adjust = optimal_redzone(cache->object_size) -
414 		(*size - cache->object_size);
415 
416 	if (redzone_adjust > 0)
417 		*size += redzone_adjust;
418 
419 	*size = min(KMALLOC_MAX_SIZE, max(*size, cache->object_size +
420 					optimal_redzone(cache->object_size)));
421 
422 	/*
423 	 * If the metadata doesn't fit, don't enable KASAN at all.
424 	 */
425 	if (*size <= cache->kasan_info.alloc_meta_offset ||
426 			*size <= cache->kasan_info.free_meta_offset) {
427 		cache->kasan_info.alloc_meta_offset = 0;
428 		cache->kasan_info.free_meta_offset = 0;
429 		*size = orig_size;
430 		return;
431 	}
432 
433 	*flags |= SLAB_KASAN;
434 }
435 
kasan_cache_shrink(struct kmem_cache * cache)436 void kasan_cache_shrink(struct kmem_cache *cache)
437 {
438 	quarantine_remove_cache(cache);
439 }
440 
kasan_cache_shutdown(struct kmem_cache * cache)441 void kasan_cache_shutdown(struct kmem_cache *cache)
442 {
443 	quarantine_remove_cache(cache);
444 }
445 
kasan_metadata_size(struct kmem_cache * cache)446 size_t kasan_metadata_size(struct kmem_cache *cache)
447 {
448 	return (cache->kasan_info.alloc_meta_offset ?
449 		sizeof(struct kasan_alloc_meta) : 0) +
450 		(cache->kasan_info.free_meta_offset ?
451 		sizeof(struct kasan_free_meta) : 0);
452 }
453 
kasan_poison_slab(struct page * page)454 void kasan_poison_slab(struct page *page)
455 {
456 	kasan_poison_shadow(page_address(page),
457 			PAGE_SIZE << compound_order(page),
458 			KASAN_KMALLOC_REDZONE);
459 }
460 
kasan_unpoison_object_data(struct kmem_cache * cache,void * object)461 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
462 {
463 	kasan_unpoison_shadow(object, cache->object_size);
464 }
465 
kasan_poison_object_data(struct kmem_cache * cache,void * object)466 void kasan_poison_object_data(struct kmem_cache *cache, void *object)
467 {
468 	kasan_poison_shadow(object,
469 			round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
470 			KASAN_KMALLOC_REDZONE);
471 }
472 
in_irqentry_text(unsigned long ptr)473 static inline int in_irqentry_text(unsigned long ptr)
474 {
475 	return (ptr >= (unsigned long)&__irqentry_text_start &&
476 		ptr < (unsigned long)&__irqentry_text_end) ||
477 		(ptr >= (unsigned long)&__softirqentry_text_start &&
478 		 ptr < (unsigned long)&__softirqentry_text_end);
479 }
480 
filter_irq_stacks(struct stack_trace * trace)481 static inline void filter_irq_stacks(struct stack_trace *trace)
482 {
483 	int i;
484 
485 	if (!trace->nr_entries)
486 		return;
487 	for (i = 0; i < trace->nr_entries; i++)
488 		if (in_irqentry_text(trace->entries[i])) {
489 			/* Include the irqentry function into the stack. */
490 			trace->nr_entries = i + 1;
491 			break;
492 		}
493 }
494 
save_stack(gfp_t flags)495 static inline depot_stack_handle_t save_stack(gfp_t flags)
496 {
497 	unsigned long entries[KASAN_STACK_DEPTH];
498 	struct stack_trace trace = {
499 		.nr_entries = 0,
500 		.entries = entries,
501 		.max_entries = KASAN_STACK_DEPTH,
502 		.skip = 0
503 	};
504 
505 	save_stack_trace(&trace);
506 	filter_irq_stacks(&trace);
507 	if (trace.nr_entries != 0 &&
508 	    trace.entries[trace.nr_entries-1] == ULONG_MAX)
509 		trace.nr_entries--;
510 
511 	return depot_save_stack(&trace, flags);
512 }
513 
set_track(struct kasan_track * track,gfp_t flags)514 static inline void set_track(struct kasan_track *track, gfp_t flags)
515 {
516 	track->pid = current->pid;
517 	track->stack = save_stack(flags);
518 }
519 
get_alloc_info(struct kmem_cache * cache,const void * object)520 struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
521 					const void *object)
522 {
523 	BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
524 	return (void *)object + cache->kasan_info.alloc_meta_offset;
525 }
526 
get_free_info(struct kmem_cache * cache,const void * object)527 struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
528 				      const void *object)
529 {
530 	BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
531 	return (void *)object + cache->kasan_info.free_meta_offset;
532 }
533 
kasan_init_slab_obj(struct kmem_cache * cache,const void * object)534 void kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
535 {
536 	struct kasan_alloc_meta *alloc_info;
537 
538 	if (!(cache->flags & SLAB_KASAN))
539 		return;
540 
541 	alloc_info = get_alloc_info(cache, object);
542 	__memset(alloc_info, 0, sizeof(*alloc_info));
543 }
544 
kasan_slab_alloc(struct kmem_cache * cache,void * object,gfp_t flags)545 void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
546 {
547 	kasan_kmalloc(cache, object, cache->object_size, flags);
548 }
549 
kasan_poison_slab_free(struct kmem_cache * cache,void * object)550 static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
551 {
552 	unsigned long size = cache->object_size;
553 	unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
554 
555 	/* RCU slabs could be legally used after free within the RCU period */
556 	if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
557 		return;
558 
559 	kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
560 }
561 
kasan_slab_free(struct kmem_cache * cache,void * object)562 bool kasan_slab_free(struct kmem_cache *cache, void *object)
563 {
564 	s8 shadow_byte;
565 
566 	/* RCU slabs could be legally used after free within the RCU period */
567 	if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
568 		return false;
569 
570 	shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
571 	if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
572 		kasan_report_double_free(cache, object,
573 				__builtin_return_address(1));
574 		return true;
575 	}
576 
577 	kasan_poison_slab_free(cache, object);
578 
579 	if (unlikely(!(cache->flags & SLAB_KASAN)))
580 		return false;
581 
582 	set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
583 	quarantine_put(get_free_info(cache, object), cache);
584 	return true;
585 }
586 
kasan_kmalloc(struct kmem_cache * cache,const void * object,size_t size,gfp_t flags)587 void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
588 		   gfp_t flags)
589 {
590 	unsigned long redzone_start;
591 	unsigned long redzone_end;
592 
593 	if (gfpflags_allow_blocking(flags))
594 		quarantine_reduce();
595 
596 	if (unlikely(object == NULL))
597 		return;
598 
599 	redzone_start = round_up((unsigned long)(object + size),
600 				KASAN_SHADOW_SCALE_SIZE);
601 	redzone_end = round_up((unsigned long)object + cache->object_size,
602 				KASAN_SHADOW_SCALE_SIZE);
603 
604 	kasan_unpoison_shadow(object, size);
605 	kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
606 		KASAN_KMALLOC_REDZONE);
607 
608 	if (cache->flags & SLAB_KASAN)
609 		set_track(&get_alloc_info(cache, object)->alloc_track, flags);
610 }
611 EXPORT_SYMBOL(kasan_kmalloc);
612 
kasan_kmalloc_large(const void * ptr,size_t size,gfp_t flags)613 void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
614 {
615 	struct page *page;
616 	unsigned long redzone_start;
617 	unsigned long redzone_end;
618 
619 	if (gfpflags_allow_blocking(flags))
620 		quarantine_reduce();
621 
622 	if (unlikely(ptr == NULL))
623 		return;
624 
625 	page = virt_to_page(ptr);
626 	redzone_start = round_up((unsigned long)(ptr + size),
627 				KASAN_SHADOW_SCALE_SIZE);
628 	redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
629 
630 	kasan_unpoison_shadow(ptr, size);
631 	kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
632 		KASAN_PAGE_REDZONE);
633 }
634 
kasan_krealloc(const void * object,size_t size,gfp_t flags)635 void kasan_krealloc(const void *object, size_t size, gfp_t flags)
636 {
637 	struct page *page;
638 
639 	if (unlikely(object == ZERO_SIZE_PTR))
640 		return;
641 
642 	page = virt_to_head_page(object);
643 
644 	if (unlikely(!PageSlab(page)))
645 		kasan_kmalloc_large(object, size, flags);
646 	else
647 		kasan_kmalloc(page->slab_cache, object, size, flags);
648 }
649 
kasan_poison_kfree(void * ptr)650 void kasan_poison_kfree(void *ptr)
651 {
652 	struct page *page;
653 
654 	page = virt_to_head_page(ptr);
655 
656 	if (unlikely(!PageSlab(page)))
657 		kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
658 				KASAN_FREE_PAGE);
659 	else
660 		kasan_poison_slab_free(page->slab_cache, ptr);
661 }
662 
kasan_kfree_large(const void * ptr)663 void kasan_kfree_large(const void *ptr)
664 {
665 	struct page *page = virt_to_page(ptr);
666 
667 	kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
668 			KASAN_FREE_PAGE);
669 }
670 
kasan_module_alloc(void * addr,size_t size)671 int kasan_module_alloc(void *addr, size_t size)
672 {
673 	void *ret;
674 	size_t shadow_size;
675 	unsigned long shadow_start;
676 
677 	shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
678 	shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
679 			PAGE_SIZE);
680 
681 	if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
682 		return -EINVAL;
683 
684 	ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
685 			shadow_start + shadow_size,
686 			GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
687 			PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
688 			__builtin_return_address(0));
689 
690 	if (ret) {
691 		find_vm_area(addr)->flags |= VM_KASAN;
692 		kmemleak_ignore(ret);
693 		return 0;
694 	}
695 
696 	return -ENOMEM;
697 }
698 
kasan_free_shadow(const struct vm_struct * vm)699 void kasan_free_shadow(const struct vm_struct *vm)
700 {
701 	if (vm->flags & VM_KASAN)
702 		vfree(kasan_mem_to_shadow(vm->addr));
703 }
704 
register_global(struct kasan_global * global)705 static void register_global(struct kasan_global *global)
706 {
707 	size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
708 
709 	kasan_unpoison_shadow(global->beg, global->size);
710 
711 	kasan_poison_shadow(global->beg + aligned_size,
712 		global->size_with_redzone - aligned_size,
713 		KASAN_GLOBAL_REDZONE);
714 }
715 
__asan_register_globals(struct kasan_global * globals,size_t size)716 void __asan_register_globals(struct kasan_global *globals, size_t size)
717 {
718 	int i;
719 
720 	for (i = 0; i < size; i++)
721 		register_global(&globals[i]);
722 }
723 EXPORT_SYMBOL(__asan_register_globals);
724 
__asan_unregister_globals(struct kasan_global * globals,size_t size)725 void __asan_unregister_globals(struct kasan_global *globals, size_t size)
726 {
727 }
728 EXPORT_SYMBOL(__asan_unregister_globals);
729 
730 #define DEFINE_ASAN_LOAD_STORE(size)					\
731 	void __asan_load##size(unsigned long addr)			\
732 	{								\
733 		check_memory_region_inline(addr, size, false, _RET_IP_);\
734 	}								\
735 	EXPORT_SYMBOL(__asan_load##size);				\
736 	__alias(__asan_load##size)					\
737 	void __asan_load##size##_noabort(unsigned long);		\
738 	EXPORT_SYMBOL(__asan_load##size##_noabort);			\
739 	void __asan_store##size(unsigned long addr)			\
740 	{								\
741 		check_memory_region_inline(addr, size, true, _RET_IP_);	\
742 	}								\
743 	EXPORT_SYMBOL(__asan_store##size);				\
744 	__alias(__asan_store##size)					\
745 	void __asan_store##size##_noabort(unsigned long);		\
746 	EXPORT_SYMBOL(__asan_store##size##_noabort)
747 
748 DEFINE_ASAN_LOAD_STORE(1);
749 DEFINE_ASAN_LOAD_STORE(2);
750 DEFINE_ASAN_LOAD_STORE(4);
751 DEFINE_ASAN_LOAD_STORE(8);
752 DEFINE_ASAN_LOAD_STORE(16);
753 
__asan_loadN(unsigned long addr,size_t size)754 void __asan_loadN(unsigned long addr, size_t size)
755 {
756 	check_memory_region(addr, size, false, _RET_IP_);
757 }
758 EXPORT_SYMBOL(__asan_loadN);
759 
760 __alias(__asan_loadN)
761 void __asan_loadN_noabort(unsigned long, size_t);
762 EXPORT_SYMBOL(__asan_loadN_noabort);
763 
__asan_storeN(unsigned long addr,size_t size)764 void __asan_storeN(unsigned long addr, size_t size)
765 {
766 	check_memory_region(addr, size, true, _RET_IP_);
767 }
768 EXPORT_SYMBOL(__asan_storeN);
769 
770 __alias(__asan_storeN)
771 void __asan_storeN_noabort(unsigned long, size_t);
772 EXPORT_SYMBOL(__asan_storeN_noabort);
773 
774 /* to shut up compiler complaints */
__asan_handle_no_return(void)775 void __asan_handle_no_return(void) {}
776 EXPORT_SYMBOL(__asan_handle_no_return);
777 
778 /* Emitted by compiler to poison large objects when they go out of scope. */
__asan_poison_stack_memory(const void * addr,size_t size)779 void __asan_poison_stack_memory(const void *addr, size_t size)
780 {
781 	/*
782 	 * Addr is KASAN_SHADOW_SCALE_SIZE-aligned and the object is surrounded
783 	 * by redzones, so we simply round up size to simplify logic.
784 	 */
785 	kasan_poison_shadow(addr, round_up(size, KASAN_SHADOW_SCALE_SIZE),
786 			    KASAN_USE_AFTER_SCOPE);
787 }
788 EXPORT_SYMBOL(__asan_poison_stack_memory);
789 
790 /* Emitted by compiler to unpoison large objects when they go into scope. */
__asan_unpoison_stack_memory(const void * addr,size_t size)791 void __asan_unpoison_stack_memory(const void *addr, size_t size)
792 {
793 	kasan_unpoison_shadow(addr, size);
794 }
795 EXPORT_SYMBOL(__asan_unpoison_stack_memory);
796 
797 /* Emitted by compiler to poison alloca()ed objects. */
__asan_alloca_poison(unsigned long addr,size_t size)798 void __asan_alloca_poison(unsigned long addr, size_t size)
799 {
800 	size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
801 	size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
802 			rounded_up_size;
803 	size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE);
804 
805 	const void *left_redzone = (const void *)(addr -
806 			KASAN_ALLOCA_REDZONE_SIZE);
807 	const void *right_redzone = (const void *)(addr + rounded_up_size);
808 
809 	WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));
810 
811 	kasan_unpoison_shadow((const void *)(addr + rounded_down_size),
812 			      size - rounded_down_size);
813 	kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
814 			KASAN_ALLOCA_LEFT);
815 	kasan_poison_shadow(right_redzone,
816 			padding_size + KASAN_ALLOCA_REDZONE_SIZE,
817 			KASAN_ALLOCA_RIGHT);
818 }
819 EXPORT_SYMBOL(__asan_alloca_poison);
820 
821 /* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
__asan_allocas_unpoison(const void * stack_top,const void * stack_bottom)822 void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
823 {
824 	if (unlikely(!stack_top || stack_top > stack_bottom))
825 		return;
826 
827 	kasan_unpoison_shadow(stack_top, stack_bottom - stack_top);
828 }
829 EXPORT_SYMBOL(__asan_allocas_unpoison);
830 
831 /* Emitted by the compiler to [un]poison local variables. */
832 #define DEFINE_ASAN_SET_SHADOW(byte) \
833 	void __asan_set_shadow_##byte(const void *addr, size_t size)	\
834 	{								\
835 		__memset((void *)addr, 0x##byte, size);			\
836 	}								\
837 	EXPORT_SYMBOL(__asan_set_shadow_##byte)
838 
839 DEFINE_ASAN_SET_SHADOW(00);
840 DEFINE_ASAN_SET_SHADOW(f1);
841 DEFINE_ASAN_SET_SHADOW(f2);
842 DEFINE_ASAN_SET_SHADOW(f3);
843 DEFINE_ASAN_SET_SHADOW(f5);
844 DEFINE_ASAN_SET_SHADOW(f8);
845 
846 #ifdef CONFIG_MEMORY_HOTPLUG
kasan_mem_notifier(struct notifier_block * nb,unsigned long action,void * data)847 static int kasan_mem_notifier(struct notifier_block *nb,
848 			unsigned long action, void *data)
849 {
850 	return (action == MEM_GOING_ONLINE) ? NOTIFY_BAD : NOTIFY_OK;
851 }
852 
kasan_memhotplug_init(void)853 static int __init kasan_memhotplug_init(void)
854 {
855 	pr_info("WARNING: KASAN doesn't support memory hot-add\n");
856 	pr_info("Memory hot-add will be disabled\n");
857 
858 	hotplug_memory_notifier(kasan_mem_notifier, 0);
859 
860 	return 0;
861 }
862 
863 module_init(kasan_memhotplug_init);
864 #endif
865