1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * This file contains core hardware tag-based KASAN code.
4 *
5 * Copyright (c) 2020 Google, Inc.
6 * Author: Andrey Konovalov <andreyknvl@google.com>
7 */
8
9 #define pr_fmt(fmt) "kasan: " fmt
10
11 #include <linux/init.h>
12 #include <linux/kasan.h>
13 #include <linux/kernel.h>
14 #include <linux/memory.h>
15 #include <linux/mm.h>
16 #include <linux/static_key.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
19 #include <linux/vmalloc.h>
20
21 #include "kasan.h"
22
23 enum kasan_arg {
24 KASAN_ARG_DEFAULT,
25 KASAN_ARG_OFF,
26 KASAN_ARG_ON,
27 };
28
29 enum kasan_arg_mode {
30 KASAN_ARG_MODE_DEFAULT,
31 KASAN_ARG_MODE_SYNC,
32 KASAN_ARG_MODE_ASYNC,
33 KASAN_ARG_MODE_ASYMM,
34 };
35
36 enum kasan_arg_vmalloc {
37 KASAN_ARG_VMALLOC_DEFAULT,
38 KASAN_ARG_VMALLOC_OFF,
39 KASAN_ARG_VMALLOC_ON,
40 };
41
42 static enum kasan_arg kasan_arg __ro_after_init;
43 static enum kasan_arg_mode kasan_arg_mode __ro_after_init;
44 static enum kasan_arg_vmalloc kasan_arg_vmalloc __initdata;
45
46 /*
47 * Whether KASAN is enabled at all.
48 * The value remains false until KASAN is initialized by kasan_init_hw_tags().
49 */
50 DEFINE_STATIC_KEY_FALSE(kasan_flag_enabled);
51 EXPORT_SYMBOL(kasan_flag_enabled);
52
53 /*
54 * Whether the selected mode is synchronous, asynchronous, or asymmetric.
55 * Defaults to KASAN_MODE_SYNC.
56 */
57 enum kasan_mode kasan_mode __ro_after_init;
58 EXPORT_SYMBOL_GPL(kasan_mode);
59
60 /* Whether to enable vmalloc tagging. */
61 #ifdef CONFIG_KASAN_VMALLOC
62 DEFINE_STATIC_KEY_TRUE(kasan_flag_vmalloc);
63 #else
64 DEFINE_STATIC_KEY_FALSE(kasan_flag_vmalloc);
65 #endif
66 EXPORT_SYMBOL_GPL(kasan_flag_vmalloc);
67
68 #define PAGE_ALLOC_SAMPLE_DEFAULT 1
69 #define PAGE_ALLOC_SAMPLE_ORDER_DEFAULT 3
70
71 /*
72 * Sampling interval of page_alloc allocation (un)poisoning.
73 * Defaults to no sampling.
74 */
75 unsigned long kasan_page_alloc_sample = PAGE_ALLOC_SAMPLE_DEFAULT;
76
77 /*
78 * Minimum order of page_alloc allocations to be affected by sampling.
79 * The default value is chosen to match both
80 * PAGE_ALLOC_COSTLY_ORDER and SKB_FRAG_PAGE_ORDER.
81 */
82 unsigned int kasan_page_alloc_sample_order = PAGE_ALLOC_SAMPLE_ORDER_DEFAULT;
83
84 DEFINE_PER_CPU(long, kasan_page_alloc_skip);
85
86 /*
87 * Flush dcache after writing the tag for certain H/W to maintain cache coherence.
88 * The default value is chosen not to flush the cache.
89 */
90 DEFINE_STATIC_KEY_FALSE(kasan_inval_dcache);
91
92 /* kasan=off/on */
early_kasan_flag(char * arg)93 static int __init early_kasan_flag(char *arg)
94 {
95 if (!arg)
96 return -EINVAL;
97
98 if (!strcmp(arg, "off"))
99 kasan_arg = KASAN_ARG_OFF;
100 else if (!strcmp(arg, "on"))
101 kasan_arg = KASAN_ARG_ON;
102 else
103 return -EINVAL;
104
105 return 0;
106 }
107 early_param("kasan", early_kasan_flag);
108
109 /* kasan.mode=sync/async/asymm */
early_kasan_mode(char * arg)110 static int __init early_kasan_mode(char *arg)
111 {
112 if (!arg)
113 return -EINVAL;
114
115 if (!strcmp(arg, "sync"))
116 kasan_arg_mode = KASAN_ARG_MODE_SYNC;
117 else if (!strcmp(arg, "async"))
118 kasan_arg_mode = KASAN_ARG_MODE_ASYNC;
119 else if (!strcmp(arg, "asymm"))
120 kasan_arg_mode = KASAN_ARG_MODE_ASYMM;
121 else
122 return -EINVAL;
123
124 return 0;
125 }
126 early_param("kasan.mode", early_kasan_mode);
127
128 /* kasan.vmalloc=off/on */
early_kasan_flag_vmalloc(char * arg)129 static int __init early_kasan_flag_vmalloc(char *arg)
130 {
131 if (!arg)
132 return -EINVAL;
133
134 if (!IS_ENABLED(CONFIG_KASAN_VMALLOC))
135 return 0;
136
137 if (!strcmp(arg, "off"))
138 kasan_arg_vmalloc = KASAN_ARG_VMALLOC_OFF;
139 else if (!strcmp(arg, "on"))
140 kasan_arg_vmalloc = KASAN_ARG_VMALLOC_ON;
141 else
142 return -EINVAL;
143
144 return 0;
145 }
146 early_param("kasan.vmalloc", early_kasan_flag_vmalloc);
147
kasan_mode_info(void)148 static inline const char *kasan_mode_info(void)
149 {
150 if (kasan_mode == KASAN_MODE_ASYNC)
151 return "async";
152 else if (kasan_mode == KASAN_MODE_ASYMM)
153 return "asymm";
154 else
155 return "sync";
156 }
157
158 /* kasan.page_alloc.sample=<sampling interval> */
early_kasan_flag_page_alloc_sample(char * arg)159 static int __init early_kasan_flag_page_alloc_sample(char *arg)
160 {
161 int rv;
162
163 if (!arg)
164 return -EINVAL;
165
166 rv = kstrtoul(arg, 0, &kasan_page_alloc_sample);
167 if (rv)
168 return rv;
169
170 if (!kasan_page_alloc_sample || kasan_page_alloc_sample > LONG_MAX) {
171 kasan_page_alloc_sample = PAGE_ALLOC_SAMPLE_DEFAULT;
172 return -EINVAL;
173 }
174
175 return 0;
176 }
177 early_param("kasan.page_alloc.sample", early_kasan_flag_page_alloc_sample);
178
179 /* kasan.page_alloc.sample.order=<minimum page order> */
early_kasan_flag_page_alloc_sample_order(char * arg)180 static int __init early_kasan_flag_page_alloc_sample_order(char *arg)
181 {
182 int rv;
183
184 if (!arg)
185 return -EINVAL;
186
187 rv = kstrtouint(arg, 0, &kasan_page_alloc_sample_order);
188 if (rv)
189 return rv;
190
191 if (kasan_page_alloc_sample_order > INT_MAX) {
192 kasan_page_alloc_sample_order = PAGE_ALLOC_SAMPLE_ORDER_DEFAULT;
193 return -EINVAL;
194 }
195
196 return 0;
197 }
198 early_param("kasan.page_alloc.sample.order", early_kasan_flag_page_alloc_sample_order);
199
kasan_set_inval_dcache(char * arg)200 static int __init kasan_set_inval_dcache(char *arg)
201 {
202 static_branch_enable(&kasan_inval_dcache);
203
204 return 0;
205 }
206 early_param("kasan_inval_dcache", kasan_set_inval_dcache);
207
208 /*
209 * kasan_init_hw_tags_cpu() is called for each CPU.
210 * Not marked as __init as a CPU can be hot-plugged after boot.
211 */
kasan_init_hw_tags_cpu(void)212 void kasan_init_hw_tags_cpu(void)
213 {
214 /*
215 * There's no need to check that the hardware is MTE-capable here,
216 * as this function is only called for MTE-capable hardware.
217 */
218
219 /*
220 * If KASAN is disabled via command line, don't initialize it.
221 * When this function is called, kasan_flag_enabled is not yet
222 * set by kasan_init_hw_tags(). Thus, check kasan_arg instead.
223 */
224 if (kasan_arg == KASAN_ARG_OFF)
225 return;
226
227 /*
228 * Enable async or asymm modes only when explicitly requested
229 * through the command line.
230 */
231 kasan_enable_hw_tags();
232 }
233
234 /* kasan_init_hw_tags() is called once on boot CPU. */
kasan_init_hw_tags(void)235 void __init kasan_init_hw_tags(void)
236 {
237 /* If hardware doesn't support MTE, don't initialize KASAN. */
238 if (!system_supports_mte())
239 return;
240
241 /* If KASAN is disabled via command line, don't initialize it. */
242 if (kasan_arg == KASAN_ARG_OFF)
243 return;
244
245 switch (kasan_arg_mode) {
246 case KASAN_ARG_MODE_DEFAULT:
247 /* Default is specified by kasan_mode definition. */
248 break;
249 case KASAN_ARG_MODE_SYNC:
250 kasan_mode = KASAN_MODE_SYNC;
251 break;
252 case KASAN_ARG_MODE_ASYNC:
253 kasan_mode = KASAN_MODE_ASYNC;
254 break;
255 case KASAN_ARG_MODE_ASYMM:
256 kasan_mode = KASAN_MODE_ASYMM;
257 break;
258 }
259
260 switch (kasan_arg_vmalloc) {
261 case KASAN_ARG_VMALLOC_DEFAULT:
262 /* Default is specified by kasan_flag_vmalloc definition. */
263 break;
264 case KASAN_ARG_VMALLOC_OFF:
265 static_branch_disable(&kasan_flag_vmalloc);
266 break;
267 case KASAN_ARG_VMALLOC_ON:
268 static_branch_enable(&kasan_flag_vmalloc);
269 break;
270 }
271
272 kasan_init_tags();
273
274 /* KASAN is now initialized, enable it. */
275 static_branch_enable(&kasan_flag_enabled);
276
277 pr_info("KernelAddressSanitizer initialized (hw-tags, mode=%s, vmalloc=%s, stacktrace=%s)\n",
278 kasan_mode_info(),
279 kasan_vmalloc_enabled() ? "on" : "off",
280 kasan_stack_collection_enabled() ? "on" : "off");
281 }
282
283 #ifdef CONFIG_KASAN_VMALLOC
284
unpoison_vmalloc_pages(const void * addr,u8 tag)285 static void unpoison_vmalloc_pages(const void *addr, u8 tag)
286 {
287 struct vm_struct *area;
288 int i;
289
290 /*
291 * As hardware tag-based KASAN only tags VM_ALLOC vmalloc allocations
292 * (see the comment in __kasan_unpoison_vmalloc), all of the pages
293 * should belong to a single area.
294 */
295 area = find_vm_area((void *)addr);
296 if (WARN_ON(!area))
297 return;
298
299 for (i = 0; i < area->nr_pages; i++) {
300 struct page *page = area->pages[i];
301
302 page_kasan_tag_set(page, tag);
303 }
304 }
305
init_vmalloc_pages(const void * start,unsigned long size)306 static void init_vmalloc_pages(const void *start, unsigned long size)
307 {
308 const void *addr;
309
310 for (addr = start; addr < start + size; addr += PAGE_SIZE) {
311 struct page *page = vmalloc_to_page(addr);
312
313 clear_highpage_kasan_tagged(page);
314 }
315 }
316
__kasan_unpoison_vmalloc(const void * start,unsigned long size,kasan_vmalloc_flags_t flags)317 void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
318 kasan_vmalloc_flags_t flags)
319 {
320 u8 tag;
321 unsigned long redzone_start, redzone_size;
322
323 if (!kasan_vmalloc_enabled()) {
324 if (flags & KASAN_VMALLOC_INIT)
325 init_vmalloc_pages(start, size);
326 return (void *)start;
327 }
328
329 /*
330 * Don't tag non-VM_ALLOC mappings, as:
331 *
332 * 1. Unlike the software KASAN modes, hardware tag-based KASAN only
333 * supports tagging physical memory. Therefore, it can only tag a
334 * single mapping of normal physical pages.
335 * 2. Hardware tag-based KASAN can only tag memory mapped with special
336 * mapping protection bits, see arch_vmap_pgprot_tagged().
337 * As non-VM_ALLOC mappings can be mapped outside of vmalloc code,
338 * providing these bits would require tracking all non-VM_ALLOC
339 * mappers.
340 *
341 * Thus, for VM_ALLOC mappings, hardware tag-based KASAN only tags
342 * the first virtual mapping, which is created by vmalloc().
343 * Tagging the page_alloc memory backing that vmalloc() allocation is
344 * skipped, see ___GFP_SKIP_KASAN.
345 *
346 * For non-VM_ALLOC allocations, page_alloc memory is tagged as usual.
347 */
348 if (!(flags & KASAN_VMALLOC_VM_ALLOC)) {
349 WARN_ON(flags & KASAN_VMALLOC_INIT);
350 return (void *)start;
351 }
352
353 /*
354 * Don't tag executable memory.
355 * The kernel doesn't tolerate having the PC register tagged.
356 */
357 if (!(flags & KASAN_VMALLOC_PROT_NORMAL)) {
358 WARN_ON(flags & KASAN_VMALLOC_INIT);
359 return (void *)start;
360 }
361
362 tag = kasan_random_tag();
363 start = set_tag(start, tag);
364
365 /* Unpoison and initialize memory up to size. */
366 kasan_unpoison(start, size, flags & KASAN_VMALLOC_INIT);
367
368 /*
369 * Explicitly poison and initialize the in-page vmalloc() redzone.
370 * Unlike software KASAN modes, hardware tag-based KASAN doesn't
371 * unpoison memory when populating shadow for vmalloc() space.
372 */
373 redzone_start = round_up((unsigned long)start + size,
374 KASAN_GRANULE_SIZE);
375 redzone_size = round_up(redzone_start, PAGE_SIZE) - redzone_start;
376 kasan_poison((void *)redzone_start, redzone_size, KASAN_TAG_INVALID,
377 flags & KASAN_VMALLOC_INIT);
378
379 /*
380 * Set per-page tag flags to allow accessing physical memory for the
381 * vmalloc() mapping through page_address(vmalloc_to_page()).
382 */
383 unpoison_vmalloc_pages(start, tag);
384
385 return (void *)start;
386 }
387
__kasan_poison_vmalloc(const void * start,unsigned long size)388 void __kasan_poison_vmalloc(const void *start, unsigned long size)
389 {
390 /*
391 * No tagging here.
392 * The physical pages backing the vmalloc() allocation are poisoned
393 * through the usual page_alloc paths.
394 */
395 }
396
397 #endif
398
kasan_enable_hw_tags(void)399 void kasan_enable_hw_tags(void)
400 {
401 if (kasan_arg_mode == KASAN_ARG_MODE_ASYNC)
402 hw_enable_tag_checks_async();
403 else if (kasan_arg_mode == KASAN_ARG_MODE_ASYMM)
404 hw_enable_tag_checks_asymm();
405 else
406 hw_enable_tag_checks_sync();
407 }
408
409 #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
410
411 EXPORT_SYMBOL_GPL(kasan_enable_hw_tags);
412
kasan_force_async_fault(void)413 void kasan_force_async_fault(void)
414 {
415 hw_force_async_tag_fault();
416 }
417 EXPORT_SYMBOL_GPL(kasan_force_async_fault);
418
419 #endif
420