1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __MM_KASAN_KASAN_H
3 #define __MM_KASAN_KASAN_H
4
5 #include <linux/atomic.h>
6 #include <linux/kasan.h>
7 #include <linux/kasan-tags.h>
8 #include <linux/kfence.h>
9 #include <linux/stackdepot.h>
10 #include <asm/cacheflush.h>
11
12 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
13
14 #include <linux/static_key.h>
15
16 DECLARE_STATIC_KEY_TRUE(kasan_flag_stacktrace);
17
kasan_stack_collection_enabled(void)18 static inline bool kasan_stack_collection_enabled(void)
19 {
20 return static_branch_unlikely(&kasan_flag_stacktrace);
21 }
22
23 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
24
kasan_stack_collection_enabled(void)25 static inline bool kasan_stack_collection_enabled(void)
26 {
27 return true;
28 }
29
30 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
31
32 #ifdef CONFIG_KASAN_HW_TAGS
33
34 #include "../slab.h"
35
36 DECLARE_STATIC_KEY_TRUE(kasan_flag_vmalloc);
37
38 enum kasan_mode {
39 KASAN_MODE_SYNC,
40 KASAN_MODE_ASYNC,
41 KASAN_MODE_ASYMM,
42 };
43
44 extern enum kasan_mode kasan_mode __ro_after_init;
45
46 extern unsigned long kasan_page_alloc_sample;
47 extern unsigned int kasan_page_alloc_sample_order;
48 DECLARE_PER_CPU(long, kasan_page_alloc_skip);
49
kasan_vmalloc_enabled(void)50 static inline bool kasan_vmalloc_enabled(void)
51 {
52 /* Static branch is never enabled with CONFIG_KASAN_VMALLOC disabled. */
53 return static_branch_likely(&kasan_flag_vmalloc);
54 }
55
kasan_async_fault_possible(void)56 static inline bool kasan_async_fault_possible(void)
57 {
58 return kasan_mode == KASAN_MODE_ASYNC || kasan_mode == KASAN_MODE_ASYMM;
59 }
60
kasan_sync_fault_possible(void)61 static inline bool kasan_sync_fault_possible(void)
62 {
63 return kasan_mode == KASAN_MODE_SYNC || kasan_mode == KASAN_MODE_ASYMM;
64 }
65
kasan_sample_page_alloc(unsigned int order)66 static inline bool kasan_sample_page_alloc(unsigned int order)
67 {
68 /* Fast-path for when sampling is disabled. */
69 if (kasan_page_alloc_sample == 1)
70 return true;
71
72 if (order < kasan_page_alloc_sample_order)
73 return true;
74
75 if (this_cpu_dec_return(kasan_page_alloc_skip) < 0) {
76 this_cpu_write(kasan_page_alloc_skip,
77 kasan_page_alloc_sample - 1);
78 return true;
79 }
80
81 return false;
82 }
83
84 #else /* CONFIG_KASAN_HW_TAGS */
85
kasan_vmalloc_enabled(void)86 static inline bool kasan_vmalloc_enabled(void)
87 {
88 return IS_ENABLED(CONFIG_KASAN_VMALLOC);
89 }
90
kasan_async_fault_possible(void)91 static inline bool kasan_async_fault_possible(void)
92 {
93 return false;
94 }
95
kasan_sync_fault_possible(void)96 static inline bool kasan_sync_fault_possible(void)
97 {
98 return true;
99 }
100
kasan_sample_page_alloc(unsigned int order)101 static inline bool kasan_sample_page_alloc(unsigned int order)
102 {
103 return true;
104 }
105
106 #endif /* CONFIG_KASAN_HW_TAGS */
107
108 #ifdef CONFIG_KASAN_GENERIC
109
110 /*
111 * Generic KASAN uses per-object metadata to store alloc and free stack traces
112 * and the quarantine link.
113 */
kasan_requires_meta(void)114 static inline bool kasan_requires_meta(void)
115 {
116 return true;
117 }
118
119 #else /* CONFIG_KASAN_GENERIC */
120
121 /*
122 * Tag-based KASAN modes do not use per-object metadata: they use the stack
123 * ring to store alloc and free stack traces and do not use qurantine.
124 */
kasan_requires_meta(void)125 static inline bool kasan_requires_meta(void)
126 {
127 return false;
128 }
129
130 #endif /* CONFIG_KASAN_GENERIC */
131
132 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
133 #define KASAN_GRANULE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
134 #else
135 #include <asm/mte-kasan.h>
136 #define KASAN_GRANULE_SIZE MTE_GRANULE_SIZE
137 #endif
138
139 #define KASAN_GRANULE_MASK (KASAN_GRANULE_SIZE - 1)
140
141 #define KASAN_MEMORY_PER_SHADOW_PAGE (KASAN_GRANULE_SIZE << PAGE_SHIFT)
142
143 #ifdef CONFIG_KASAN_GENERIC
144 #define KASAN_PAGE_FREE 0xFF /* freed page */
145 #define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocation */
146 #define KASAN_SLAB_REDZONE 0xFC /* redzone for slab object */
147 #define KASAN_SLAB_FREE 0xFB /* freed slab object */
148 #define KASAN_VMALLOC_INVALID 0xF8 /* inaccessible space in vmap area */
149 #else
150 #define KASAN_PAGE_FREE KASAN_TAG_INVALID
151 #define KASAN_PAGE_REDZONE KASAN_TAG_INVALID
152 #define KASAN_SLAB_REDZONE KASAN_TAG_INVALID
153 #define KASAN_SLAB_FREE KASAN_TAG_INVALID
154 #define KASAN_VMALLOC_INVALID KASAN_TAG_INVALID /* only used for SW_TAGS */
155 #endif
156
157 #ifdef CONFIG_KASAN_GENERIC
158
159 #define KASAN_SLAB_FREE_META 0xFA /* freed slab object with free meta */
160 #define KASAN_GLOBAL_REDZONE 0xF9 /* redzone for global variable */
161
162 /* Stack redzone shadow values. Compiler ABI, do not change. */
163 #define KASAN_STACK_LEFT 0xF1
164 #define KASAN_STACK_MID 0xF2
165 #define KASAN_STACK_RIGHT 0xF3
166 #define KASAN_STACK_PARTIAL 0xF4
167
168 /* alloca redzone shadow values. */
169 #define KASAN_ALLOCA_LEFT 0xCA
170 #define KASAN_ALLOCA_RIGHT 0xCB
171
172 /* alloca redzone size. Compiler ABI, do not change. */
173 #define KASAN_ALLOCA_REDZONE_SIZE 32
174
175 /* Stack frame marker. Compiler ABI, do not change. */
176 #define KASAN_CURRENT_STACK_FRAME_MAGIC 0x41B58AB3
177
178 /* Dummy value to avoid breaking randconfig/all*config builds. */
179 #ifndef KASAN_ABI_VERSION
180 #define KASAN_ABI_VERSION 1
181 #endif
182
183 #endif /* CONFIG_KASAN_GENERIC */
184
185 /* Metadata layout customization. */
186 #define META_BYTES_PER_BLOCK 1
187 #define META_BLOCKS_PER_ROW 16
188 #define META_BYTES_PER_ROW (META_BLOCKS_PER_ROW * META_BYTES_PER_BLOCK)
189 #define META_MEM_BYTES_PER_ROW (META_BYTES_PER_ROW * KASAN_GRANULE_SIZE)
190 #define META_ROWS_AROUND_ADDR 2
191
192 #define KASAN_STACK_DEPTH 64
193
194 struct kasan_track {
195 u32 pid;
196 depot_stack_handle_t stack;
197 #ifdef CONFIG_KASAN_EXTRA_INFO
198 u64 cpu:20;
199 u64 timestamp:44;
200 #endif /* CONFIG_KASAN_EXTRA_INFO */
201 };
202
203 enum kasan_report_type {
204 KASAN_REPORT_ACCESS,
205 KASAN_REPORT_INVALID_FREE,
206 KASAN_REPORT_DOUBLE_FREE,
207 };
208
209 struct kasan_report_info {
210 /* Filled in by kasan_report_*(). */
211 enum kasan_report_type type;
212 const void *access_addr;
213 size_t access_size;
214 bool is_write;
215 unsigned long ip;
216
217 /* Filled in by the common reporting code. */
218 const void *first_bad_addr;
219 struct kmem_cache *cache;
220 void *object;
221 size_t alloc_size;
222
223 /* Filled in by the mode-specific reporting code. */
224 const char *bug_type;
225 struct kasan_track alloc_track;
226 struct kasan_track free_track;
227 };
228
229 /* Do not change the struct layout: compiler ABI. */
230 struct kasan_source_location {
231 const char *filename;
232 int line_no;
233 int column_no;
234 };
235
236 /* Do not change the struct layout: compiler ABI. */
237 struct kasan_global {
238 const void *beg; /* Address of the beginning of the global variable. */
239 size_t size; /* Size of the global variable. */
240 size_t size_with_redzone; /* Size of the variable + size of the redzone. 32 bytes aligned. */
241 const void *name;
242 const void *module_name; /* Name of the module where the global variable is declared. */
243 unsigned long has_dynamic_init; /* This is needed for C++. */
244 #if KASAN_ABI_VERSION >= 4
245 struct kasan_source_location *location;
246 #endif
247 #if KASAN_ABI_VERSION >= 5
248 char *odr_indicator;
249 #endif
250 };
251
252 /* Structures for keeping alloc and free meta. */
253
254 #ifdef CONFIG_KASAN_GENERIC
255
256 /*
257 * Alloc meta contains the allocation-related information about a slab object.
258 * Alloc meta is saved when an object is allocated and is kept until either the
259 * object returns to the slab freelist (leaves quarantine for quarantined
260 * objects or gets freed for the non-quarantined ones) or reallocated via
261 * krealloc or through a mempool.
262 * Alloc meta is stored inside of the object's redzone.
263 * Alloc meta is considered valid whenever it contains non-zero data.
264 */
265 struct kasan_alloc_meta {
266 struct kasan_track alloc_track;
267 /* Free track is stored in kasan_free_meta. */
268 depot_stack_handle_t aux_stack[2];
269 };
270
271 struct qlist_node {
272 struct qlist_node *next;
273 };
274
275 /*
276 * Free meta is stored either in the object itself or in the redzone after the
277 * object. In the former case, free meta offset is 0. In the latter case, the
278 * offset is between 0 and INT_MAX. INT_MAX marks that free meta is not present.
279 */
280 #define KASAN_NO_FREE_META INT_MAX
281
282 /*
283 * Free meta contains the freeing-related information about a slab object.
284 * Free meta is only kept for quarantined objects and for mempool objects until
285 * the object gets allocated again.
286 * Free meta is stored within the object's memory.
287 * Free meta is considered valid whenever the value of the shadow byte that
288 * corresponds to the first 8 bytes of the object is KASAN_SLAB_FREE_META.
289 */
290 struct kasan_free_meta {
291 struct qlist_node quarantine_link;
292 struct kasan_track free_track;
293 };
294
295 #endif /* CONFIG_KASAN_GENERIC */
296
297 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
298
299 struct kasan_stack_ring_entry {
300 void *ptr;
301 size_t size;
302 struct kasan_track track;
303 bool is_free;
304 };
305
306 struct kasan_stack_ring {
307 rwlock_t lock;
308 size_t size;
309 atomic64_t pos;
310 struct kasan_stack_ring_entry *entries;
311 };
312
313 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
314
315 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
316
addr_in_shadow(const void * addr)317 static __always_inline bool addr_in_shadow(const void *addr)
318 {
319 return addr >= (void *)KASAN_SHADOW_START &&
320 addr < (void *)KASAN_SHADOW_END;
321 }
322
323 #ifndef kasan_shadow_to_mem
kasan_shadow_to_mem(const void * shadow_addr)324 static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
325 {
326 return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET)
327 << KASAN_SHADOW_SCALE_SHIFT);
328 }
329 #endif
330
331 #ifndef addr_has_metadata
addr_has_metadata(const void * addr)332 static __always_inline bool addr_has_metadata(const void *addr)
333 {
334 return (kasan_reset_tag(addr) >=
335 kasan_shadow_to_mem((void *)KASAN_SHADOW_START));
336 }
337 #endif
338
339 /**
340 * kasan_check_range - Check memory region, and report if invalid access.
341 * @addr: the accessed address
342 * @size: the accessed size
343 * @write: true if access is a write access
344 * @ret_ip: return address
345 * @return: true if access was valid, false if invalid
346 */
347 bool kasan_check_range(const void *addr, size_t size, bool write,
348 unsigned long ret_ip);
349
350 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
351
addr_has_metadata(const void * addr)352 static __always_inline bool addr_has_metadata(const void *addr)
353 {
354 return (is_vmalloc_addr(addr) || virt_addr_valid(addr));
355 }
356
357 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
358
359 const void *kasan_find_first_bad_addr(const void *addr, size_t size);
360 size_t kasan_get_alloc_size(void *object, struct kmem_cache *cache);
361 void kasan_complete_mode_report_info(struct kasan_report_info *info);
362 void kasan_metadata_fetch_row(char *buffer, void *row);
363
364 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
365 void kasan_print_tags(u8 addr_tag, const void *addr);
366 #else
kasan_print_tags(u8 addr_tag,const void * addr)367 static inline void kasan_print_tags(u8 addr_tag, const void *addr) { }
368 #endif
369
370 #if defined(CONFIG_KASAN_STACK)
371 void kasan_print_address_stack_frame(const void *addr);
372 #else
kasan_print_address_stack_frame(const void * addr)373 static inline void kasan_print_address_stack_frame(const void *addr) { }
374 #endif
375
376 #ifdef CONFIG_KASAN_GENERIC
377 void kasan_print_aux_stacks(struct kmem_cache *cache, const void *object);
378 #else
kasan_print_aux_stacks(struct kmem_cache * cache,const void * object)379 static inline void kasan_print_aux_stacks(struct kmem_cache *cache, const void *object) { }
380 #endif
381
382 bool kasan_report(const void *addr, size_t size,
383 bool is_write, unsigned long ip);
384 void kasan_report_invalid_free(void *object, unsigned long ip, enum kasan_report_type type);
385
386 struct slab *kasan_addr_to_slab(const void *addr);
387
388 #ifdef CONFIG_KASAN_GENERIC
389 struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
390 const void *object);
391 struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
392 const void *object);
393 void kasan_init_object_meta(struct kmem_cache *cache, const void *object);
394 #else
kasan_init_object_meta(struct kmem_cache * cache,const void * object)395 static inline void kasan_init_object_meta(struct kmem_cache *cache, const void *object) { }
396 #endif
397
398 depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags);
399 void kasan_set_track(struct kasan_track *track, depot_stack_handle_t stack);
400 void kasan_save_track(struct kasan_track *track, gfp_t flags);
401 void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags);
402 void kasan_save_free_info(struct kmem_cache *cache, void *object);
403
404 #ifdef CONFIG_KASAN_GENERIC
405 bool kasan_quarantine_put(struct kmem_cache *cache, void *object);
406 void kasan_quarantine_reduce(void);
407 void kasan_quarantine_remove_cache(struct kmem_cache *cache);
408 #else
kasan_quarantine_put(struct kmem_cache * cache,void * object)409 static inline bool kasan_quarantine_put(struct kmem_cache *cache, void *object) { return false; }
kasan_quarantine_reduce(void)410 static inline void kasan_quarantine_reduce(void) { }
kasan_quarantine_remove_cache(struct kmem_cache * cache)411 static inline void kasan_quarantine_remove_cache(struct kmem_cache *cache) { }
412 #endif
413
414 #ifndef arch_kasan_set_tag
arch_kasan_set_tag(const void * addr,u8 tag)415 static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
416 {
417 return addr;
418 }
419 #endif
420 #ifndef arch_kasan_get_tag
421 #define arch_kasan_get_tag(addr) 0
422 #endif
423
424 #define set_tag(addr, tag) ((void *)arch_kasan_set_tag((addr), (tag)))
425 #define get_tag(addr) arch_kasan_get_tag(addr)
426
427 #ifdef CONFIG_KASAN_HW_TAGS
428
429 #define hw_enable_tag_checks_sync() arch_enable_tag_checks_sync()
430 #define hw_enable_tag_checks_async() arch_enable_tag_checks_async()
431 #define hw_enable_tag_checks_asymm() arch_enable_tag_checks_asymm()
432 #define hw_suppress_tag_checks_start() arch_suppress_tag_checks_start()
433 #define hw_suppress_tag_checks_stop() arch_suppress_tag_checks_stop()
434 #define hw_force_async_tag_fault() arch_force_async_tag_fault()
435 #define hw_get_random_tag() arch_get_random_tag()
436 #define hw_get_mem_tag(addr) arch_get_mem_tag(addr)
437 #define hw_set_mem_tag_range(addr, size, tag, init) \
438 arch_set_mem_tag_range((addr), (size), (tag), (init))
439
440 void kasan_enable_hw_tags(void);
441
442 #else /* CONFIG_KASAN_HW_TAGS */
443
kasan_enable_hw_tags(void)444 static inline void kasan_enable_hw_tags(void) { }
445
446 #endif /* CONFIG_KASAN_HW_TAGS */
447
448 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
449 void __init kasan_init_tags(void);
450 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
451
452 #if defined(CONFIG_KASAN_HW_TAGS) && IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
453
454 void kasan_force_async_fault(void);
455
456 #else /* CONFIG_KASAN_HW_TAGS && CONFIG_KASAN_KUNIT_TEST */
457
kasan_force_async_fault(void)458 static inline void kasan_force_async_fault(void) { }
459
460 #endif /* CONFIG_KASAN_HW_TAGS && CONFIG_KASAN_KUNIT_TEST */
461
462 #ifdef CONFIG_KASAN_SW_TAGS
463 u8 kasan_random_tag(void);
464 #elif defined(CONFIG_KASAN_HW_TAGS)
kasan_random_tag(void)465 static inline u8 kasan_random_tag(void) { return hw_get_random_tag(); }
466 #else
kasan_random_tag(void)467 static inline u8 kasan_random_tag(void) { return 0; }
468 #endif
469
470 #ifdef CONFIG_KASAN_HW_TAGS
471
472 DECLARE_STATIC_KEY_FALSE(kasan_inval_dcache);
473
kasan_poison(const void * addr,size_t size,u8 value,bool init)474 static inline void kasan_poison(const void *addr, size_t size, u8 value, bool init)
475 {
476 addr = kasan_reset_tag(addr);
477
478 if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
479 return;
480 if (WARN_ON(size & KASAN_GRANULE_MASK))
481 return;
482
483 hw_set_mem_tag_range((void *)addr, size, value, init);
484
485 if (static_branch_unlikely(&kasan_inval_dcache) && size)
486 dcache_clean_inval_poc((unsigned long)addr, (unsigned long)addr + size);
487 }
488
kasan_unpoison(const void * addr,size_t size,bool init)489 static inline void kasan_unpoison(const void *addr, size_t size, bool init)
490 {
491 u8 tag = get_tag(addr);
492
493 addr = kasan_reset_tag(addr);
494
495 if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
496 return;
497 size = round_up(size, KASAN_GRANULE_SIZE);
498
499 hw_set_mem_tag_range((void *)addr, size, tag, init);
500
501 if (static_branch_unlikely(&kasan_inval_dcache) && size)
502 dcache_clean_inval_poc((unsigned long)addr, (unsigned long)addr + size);
503 }
504
kasan_byte_accessible(const void * addr)505 static inline bool kasan_byte_accessible(const void *addr)
506 {
507 u8 ptr_tag = get_tag(addr);
508 u8 mem_tag = hw_get_mem_tag((void *)addr);
509
510 return ptr_tag == KASAN_TAG_KERNEL || ptr_tag == mem_tag;
511 }
512
513 #else /* CONFIG_KASAN_HW_TAGS */
514
515 /**
516 * kasan_poison - mark the memory range as inaccessible
517 * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
518 * @size - range size, must be aligned to KASAN_GRANULE_SIZE
519 * @value - value that's written to metadata for the range
520 * @init - whether to initialize the memory range (only for hardware tag-based)
521 */
522 void kasan_poison(const void *addr, size_t size, u8 value, bool init);
523
524 /**
525 * kasan_unpoison - mark the memory range as accessible
526 * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
527 * @size - range size, can be unaligned
528 * @init - whether to initialize the memory range (only for hardware tag-based)
529 *
530 * For the tag-based modes, the @size gets aligned to KASAN_GRANULE_SIZE before
531 * marking the range.
532 * For the generic mode, the last granule of the memory range gets partially
533 * unpoisoned based on the @size.
534 */
535 void kasan_unpoison(const void *addr, size_t size, bool init);
536
537 bool kasan_byte_accessible(const void *addr);
538
539 #endif /* CONFIG_KASAN_HW_TAGS */
540
541 #ifdef CONFIG_KASAN_GENERIC
542
543 /**
544 * kasan_poison_last_granule - mark the last granule of the memory range as
545 * inaccessible
546 * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
547 * @size - range size
548 *
549 * This function is only available for the generic mode, as it's the only mode
550 * that has partially poisoned memory granules.
551 */
552 void kasan_poison_last_granule(const void *address, size_t size);
553
554 #else /* CONFIG_KASAN_GENERIC */
555
kasan_poison_last_granule(const void * address,size_t size)556 static inline void kasan_poison_last_granule(const void *address, size_t size) { }
557
558 #endif /* CONFIG_KASAN_GENERIC */
559
560 #ifndef kasan_arch_is_ready
kasan_arch_is_ready(void)561 static inline bool kasan_arch_is_ready(void) { return true; }
562 #elif !defined(CONFIG_KASAN_GENERIC) || !defined(CONFIG_KASAN_OUTLINE)
563 #error kasan_arch_is_ready only works in KASAN generic outline mode!
564 #endif
565
566 #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
567
568 void kasan_kunit_test_suite_start(void);
569 void kasan_kunit_test_suite_end(void);
570
571 #ifdef CONFIG_RUST
572 char kasan_test_rust_uaf(void);
573 #else
kasan_test_rust_uaf(void)574 static inline char kasan_test_rust_uaf(void) { return '\0'; }
575 #endif
576
577 #else /* CONFIG_KASAN_KUNIT_TEST */
578
kasan_kunit_test_suite_start(void)579 static inline void kasan_kunit_test_suite_start(void) { }
kasan_kunit_test_suite_end(void)580 static inline void kasan_kunit_test_suite_end(void) { }
581
582 #endif /* CONFIG_KASAN_KUNIT_TEST */
583
584 #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST) || IS_ENABLED(CONFIG_KASAN_MODULE_TEST)
585
586 bool kasan_save_enable_multi_shot(void);
587 void kasan_restore_multi_shot(bool enabled);
588
589 #endif
590
591 /*
592 * Exported functions for interfaces called from assembly or from generated
593 * code. Declared here to avoid warnings about missing declarations.
594 */
595
596 void __asan_register_globals(void *globals, ssize_t size);
597 void __asan_unregister_globals(void *globals, ssize_t size);
598 void __asan_handle_no_return(void);
599 void __asan_alloca_poison(void *, ssize_t size);
600 void __asan_allocas_unpoison(void *stack_top, ssize_t stack_bottom);
601
602 void __asan_load1(void *);
603 void __asan_store1(void *);
604 void __asan_load2(void *);
605 void __asan_store2(void *);
606 void __asan_load4(void *);
607 void __asan_store4(void *);
608 void __asan_load8(void *);
609 void __asan_store8(void *);
610 void __asan_load16(void *);
611 void __asan_store16(void *);
612 void __asan_loadN(void *, ssize_t size);
613 void __asan_storeN(void *, ssize_t size);
614
615 void __asan_load1_noabort(void *);
616 void __asan_store1_noabort(void *);
617 void __asan_load2_noabort(void *);
618 void __asan_store2_noabort(void *);
619 void __asan_load4_noabort(void *);
620 void __asan_store4_noabort(void *);
621 void __asan_load8_noabort(void *);
622 void __asan_store8_noabort(void *);
623 void __asan_load16_noabort(void *);
624 void __asan_store16_noabort(void *);
625 void __asan_loadN_noabort(void *, ssize_t size);
626 void __asan_storeN_noabort(void *, ssize_t size);
627
628 void __asan_report_load1_noabort(void *);
629 void __asan_report_store1_noabort(void *);
630 void __asan_report_load2_noabort(void *);
631 void __asan_report_store2_noabort(void *);
632 void __asan_report_load4_noabort(void *);
633 void __asan_report_store4_noabort(void *);
634 void __asan_report_load8_noabort(void *);
635 void __asan_report_store8_noabort(void *);
636 void __asan_report_load16_noabort(void *);
637 void __asan_report_store16_noabort(void *);
638 void __asan_report_load_n_noabort(void *, ssize_t size);
639 void __asan_report_store_n_noabort(void *, ssize_t size);
640
641 void __asan_set_shadow_00(const void *addr, ssize_t size);
642 void __asan_set_shadow_f1(const void *addr, ssize_t size);
643 void __asan_set_shadow_f2(const void *addr, ssize_t size);
644 void __asan_set_shadow_f3(const void *addr, ssize_t size);
645 void __asan_set_shadow_f5(const void *addr, ssize_t size);
646 void __asan_set_shadow_f8(const void *addr, ssize_t size);
647
648 void *__asan_memset(void *addr, int c, ssize_t len);
649 void *__asan_memmove(void *dest, const void *src, ssize_t len);
650 void *__asan_memcpy(void *dest, const void *src, ssize_t len);
651
652 void __hwasan_load1_noabort(void *);
653 void __hwasan_store1_noabort(void *);
654 void __hwasan_load2_noabort(void *);
655 void __hwasan_store2_noabort(void *);
656 void __hwasan_load4_noabort(void *);
657 void __hwasan_store4_noabort(void *);
658 void __hwasan_load8_noabort(void *);
659 void __hwasan_store8_noabort(void *);
660 void __hwasan_load16_noabort(void *);
661 void __hwasan_store16_noabort(void *);
662 void __hwasan_loadN_noabort(void *, ssize_t size);
663 void __hwasan_storeN_noabort(void *, ssize_t size);
664
665 void __hwasan_tag_memory(void *, u8 tag, ssize_t size);
666
667 void *__hwasan_memset(void *addr, int c, ssize_t len);
668 void *__hwasan_memmove(void *dest, const void *src, ssize_t len);
669 void *__hwasan_memcpy(void *dest, const void *src, ssize_t len);
670
671 void kasan_tag_mismatch(void *addr, unsigned long access_info,
672 unsigned long ret_ip);
673
674 #endif /* __MM_KASAN_KASAN_H */
675