1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (C) 2020 ARM Ltd.
4 */
5 #ifndef __ASM_MTE_KASAN_H
6 #define __ASM_MTE_KASAN_H
7
8 #include <asm/compiler.h>
9 #include <asm/mte-def.h>
10
11 #ifndef __ASSEMBLY__
12
13 #include <linux/types.h>
14
15 #ifdef CONFIG_ARM64_MTE
16
17 /*
18 * These functions are meant to be only used from KASAN runtime through
19 * the arch_*() interface defined in asm/memory.h.
20 * These functions don't include system_supports_mte() checks,
21 * as KASAN only calls them when MTE is supported and enabled.
22 */
23
mte_get_ptr_tag(void * ptr)24 static inline u8 mte_get_ptr_tag(void *ptr)
25 {
26 /* Note: The format of KASAN tags is 0xF<x> */
27 u8 tag = 0xF0 | (u8)(((u64)(ptr)) >> MTE_TAG_SHIFT);
28
29 return tag;
30 }
31
32 /* Get allocation tag for the address. */
mte_get_mem_tag(void * addr)33 static inline u8 mte_get_mem_tag(void *addr)
34 {
35 asm(__MTE_PREAMBLE "ldg %0, [%0]"
36 : "+r" (addr));
37
38 return mte_get_ptr_tag(addr);
39 }
40
41 /* Generate a random tag. */
mte_get_random_tag(void)42 static inline u8 mte_get_random_tag(void)
43 {
44 void *addr;
45
46 asm(__MTE_PREAMBLE "irg %0, %0"
47 : "=r" (addr));
48
49 return mte_get_ptr_tag(addr);
50 }
51
__stg_post(u64 p)52 static inline u64 __stg_post(u64 p)
53 {
54 asm volatile(__MTE_PREAMBLE "stg %0, [%0], #16"
55 : "+r"(p)
56 :
57 : "memory");
58 return p;
59 }
60
__stzg_post(u64 p)61 static inline u64 __stzg_post(u64 p)
62 {
63 asm volatile(__MTE_PREAMBLE "stzg %0, [%0], #16"
64 : "+r"(p)
65 :
66 : "memory");
67 return p;
68 }
69
__dc_gva(u64 p)70 static inline void __dc_gva(u64 p)
71 {
72 asm volatile(__MTE_PREAMBLE "dc gva, %0" : : "r"(p) : "memory");
73 }
74
__dc_gzva(u64 p)75 static inline void __dc_gzva(u64 p)
76 {
77 asm volatile(__MTE_PREAMBLE "dc gzva, %0" : : "r"(p) : "memory");
78 }
79
80 /*
81 * Assign allocation tags for a region of memory based on the pointer tag.
82 * Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and
83 * size must be MTE_GRANULE_SIZE aligned.
84 */
mte_set_mem_tag_range(void * addr,size_t size,u8 tag,bool init)85 static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag,
86 bool init)
87 {
88 u64 curr, mask, dczid, dczid_bs, dczid_dzp, end1, end2, end3;
89
90 /* Read DC G(Z)VA block size from the system register. */
91 dczid = read_cpuid(DCZID_EL0);
92 dczid_bs = 4ul << (dczid & 0xf);
93 dczid_dzp = (dczid >> 4) & 1;
94
95 curr = (u64)__tag_set(addr, tag);
96 mask = dczid_bs - 1;
97 /* STG/STZG up to the end of the first block. */
98 end1 = curr | mask;
99 end3 = curr + size;
100 /* DC GVA / GZVA in [end1, end2) */
101 end2 = end3 & ~mask;
102
103 /*
104 * The following code uses STG on the first DC GVA block even if the
105 * start address is aligned - it appears to be faster than an alignment
106 * check + conditional branch. Also, if the range size is at least 2 DC
107 * GVA blocks, the first two loops can use post-condition to save one
108 * branch each.
109 */
110 #define SET_MEMTAG_RANGE(stg_post, dc_gva) \
111 do { \
112 if (!dczid_dzp && size >= 2 * dczid_bs) {\
113 do { \
114 curr = stg_post(curr); \
115 } while (curr < end1); \
116 \
117 do { \
118 dc_gva(curr); \
119 curr += dczid_bs; \
120 } while (curr < end2); \
121 } \
122 \
123 while (curr < end3) \
124 curr = stg_post(curr); \
125 } while (0)
126
127 if (init)
128 SET_MEMTAG_RANGE(__stzg_post, __dc_gzva);
129 else
130 SET_MEMTAG_RANGE(__stg_post, __dc_gva);
131 #undef SET_MEMTAG_RANGE
132 }
133
134 void mte_enable_kernel_sync(void);
135 void mte_enable_kernel_async(void);
136 void mte_enable_kernel_asymm(void);
137
138 #else /* CONFIG_ARM64_MTE */
139
mte_get_ptr_tag(void * ptr)140 static inline u8 mte_get_ptr_tag(void *ptr)
141 {
142 return 0xFF;
143 }
144
mte_get_mem_tag(void * addr)145 static inline u8 mte_get_mem_tag(void *addr)
146 {
147 return 0xFF;
148 }
149
mte_get_random_tag(void)150 static inline u8 mte_get_random_tag(void)
151 {
152 return 0xFF;
153 }
154
mte_set_mem_tag_range(void * addr,size_t size,u8 tag,bool init)155 static inline void mte_set_mem_tag_range(void *addr, size_t size,
156 u8 tag, bool init)
157 {
158 }
159
mte_enable_kernel_sync(void)160 static inline void mte_enable_kernel_sync(void)
161 {
162 }
163
mte_enable_kernel_async(void)164 static inline void mte_enable_kernel_async(void)
165 {
166 }
167
mte_enable_kernel_asymm(void)168 static inline void mte_enable_kernel_asymm(void)
169 {
170 }
171
172 #endif /* CONFIG_ARM64_MTE */
173
174 #endif /* __ASSEMBLY__ */
175
176 #endif /* __ASM_MTE_KASAN_H */
177