• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file contains core tag-based KASAN code.
4  *
5  * Copyright (c) 2018 Google, Inc.
6  * Author: Andrey Konovalov <andreyknvl@google.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  *
12  */
13 
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #define DISABLE_BRANCH_PROFILING
16 
17 #include <linux/export.h>
18 #include <linux/interrupt.h>
19 #include <linux/init.h>
20 #include <linux/kasan.h>
21 #include <linux/kernel.h>
22 #include <linux/kmemleak.h>
23 #include <linux/linkage.h>
24 #include <linux/memblock.h>
25 #include <linux/memory.h>
26 #include <linux/mm.h>
27 #include <linux/module.h>
28 #include <linux/printk.h>
29 #include <linux/random.h>
30 #include <linux/sched.h>
31 #include <linux/sched/task_stack.h>
32 #include <linux/slab.h>
33 #include <linux/stacktrace.h>
34 #include <linux/string.h>
35 #include <linux/types.h>
36 #include <linux/vmalloc.h>
37 #include <linux/bug.h>
38 
39 #include "kasan.h"
40 #include "../slab.h"
41 
42 static DEFINE_PER_CPU(u32, prng_state);
43 
kasan_init_tags(void)44 void kasan_init_tags(void)
45 {
46 	int cpu;
47 
48 	for_each_possible_cpu(cpu)
49 		per_cpu(prng_state, cpu) = (u32)get_cycles();
50 }
51 
52 /*
53  * If a preemption happens between this_cpu_read and this_cpu_write, the only
54  * side effect is that we'll give a few allocated in different contexts objects
55  * the same tag. Since tag-based KASAN is meant to be used a probabilistic
56  * bug-detection debug feature, this doesn't have significant negative impact.
57  *
58  * Ideally the tags use strong randomness to prevent any attempts to predict
59  * them during explicit exploit attempts. But strong randomness is expensive,
60  * and we did an intentional trade-off to use a PRNG. This non-atomic RMW
61  * sequence has in fact positive effect, since interrupts that randomly skew
62  * PRNG at unpredictable points do only good.
63  */
random_tag(void)64 u8 random_tag(void)
65 {
66 	u32 state = this_cpu_read(prng_state);
67 
68 	state = 1664525 * state + 1013904223;
69 	this_cpu_write(prng_state, state);
70 
71 	return (u8)(state % (KASAN_TAG_MAX + 1));
72 }
73 
kasan_reset_tag(const void * addr)74 void *kasan_reset_tag(const void *addr)
75 {
76 	return reset_tag(addr);
77 }
78 
check_memory_region(unsigned long addr,size_t size,bool write,unsigned long ret_ip)79 bool check_memory_region(unsigned long addr, size_t size, bool write,
80 				unsigned long ret_ip)
81 {
82 	u8 tag;
83 	u8 *shadow_first, *shadow_last, *shadow;
84 	void *untagged_addr;
85 
86 	if (unlikely(size == 0))
87 		return true;
88 
89 	tag = get_tag((const void *)addr);
90 
91 	/*
92 	 * Ignore accesses for pointers tagged with 0xff (native kernel
93 	 * pointer tag) to suppress false positives caused by kmap.
94 	 *
95 	 * Some kernel code was written to account for archs that don't keep
96 	 * high memory mapped all the time, but rather map and unmap particular
97 	 * pages when needed. Instead of storing a pointer to the kernel memory,
98 	 * this code saves the address of the page structure and offset within
99 	 * that page for later use. Those pages are then mapped and unmapped
100 	 * with kmap/kunmap when necessary and virt_to_page is used to get the
101 	 * virtual address of the page. For arm64 (that keeps the high memory
102 	 * mapped all the time), kmap is turned into a page_address call.
103 
104 	 * The issue is that with use of the page_address + virt_to_page
105 	 * sequence the top byte value of the original pointer gets lost (gets
106 	 * set to KASAN_TAG_KERNEL (0xFF)).
107 	 */
108 	if (tag == KASAN_TAG_KERNEL)
109 		return true;
110 
111 	untagged_addr = reset_tag((const void *)addr);
112 	if (unlikely(untagged_addr <
113 			kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
114 		kasan_report(addr, size, write, ret_ip);
115 		return false;
116 	}
117 	shadow_first = kasan_mem_to_shadow(untagged_addr);
118 	shadow_last = kasan_mem_to_shadow(untagged_addr + size - 1);
119 	for (shadow = shadow_first; shadow <= shadow_last; shadow++) {
120 		if (*shadow != tag) {
121 			kasan_report(addr, size, write, ret_ip);
122 			return false;
123 		}
124 	}
125 
126 	return true;
127 }
128 
129 #define DEFINE_HWASAN_LOAD_STORE(size)					\
130 	void __hwasan_load##size##_noabort(unsigned long addr)		\
131 	{								\
132 		check_memory_region(addr, size, false, _RET_IP_);	\
133 	}								\
134 	EXPORT_SYMBOL(__hwasan_load##size##_noabort);			\
135 	void __hwasan_store##size##_noabort(unsigned long addr)		\
136 	{								\
137 		check_memory_region(addr, size, true, _RET_IP_);	\
138 	}								\
139 	EXPORT_SYMBOL(__hwasan_store##size##_noabort)
140 
141 DEFINE_HWASAN_LOAD_STORE(1);
142 DEFINE_HWASAN_LOAD_STORE(2);
143 DEFINE_HWASAN_LOAD_STORE(4);
144 DEFINE_HWASAN_LOAD_STORE(8);
145 DEFINE_HWASAN_LOAD_STORE(16);
146 
__hwasan_loadN_noabort(unsigned long addr,unsigned long size)147 void __hwasan_loadN_noabort(unsigned long addr, unsigned long size)
148 {
149 	check_memory_region(addr, size, false, _RET_IP_);
150 }
151 EXPORT_SYMBOL(__hwasan_loadN_noabort);
152 
__hwasan_storeN_noabort(unsigned long addr,unsigned long size)153 void __hwasan_storeN_noabort(unsigned long addr, unsigned long size)
154 {
155 	check_memory_region(addr, size, true, _RET_IP_);
156 }
157 EXPORT_SYMBOL(__hwasan_storeN_noabort);
158 
__hwasan_tag_memory(unsigned long addr,u8 tag,unsigned long size)159 void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size)
160 {
161 	kasan_poison_shadow((void *)addr, size, tag);
162 }
163 EXPORT_SYMBOL(__hwasan_tag_memory);
164