1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2020 ARM Limited
3
4 #include <fcntl.h>
5 #include <sched.h>
6 #include <signal.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <unistd.h>
10
11 #include <linux/auxvec.h>
12 #include <sys/auxv.h>
13 #include <sys/mman.h>
14 #include <sys/prctl.h>
15
16 #include <asm/hwcap.h>
17
18 #include "kselftest.h"
19 #include "mte_common_util.h"
20 #include "mte_def.h"
21
22 #define INIT_BUFFER_SIZE 256
23
24 struct mte_fault_cxt cur_mte_cxt;
25 static unsigned int mte_cur_mode;
26 static unsigned int mte_cur_pstate_tco;
27
mte_default_handler(int signum,siginfo_t * si,void * uc)28 void mte_default_handler(int signum, siginfo_t *si, void *uc)
29 {
30 unsigned long addr = (unsigned long)si->si_addr;
31
32 if (signum == SIGSEGV) {
33 #ifdef DEBUG
34 ksft_print_msg("INFO: SIGSEGV signal at pc=%lx, fault addr=%lx, si_code=%lx\n",
35 ((ucontext_t *)uc)->uc_mcontext.pc, addr, si->si_code);
36 #endif
37 if (si->si_code == SEGV_MTEAERR) {
38 if (cur_mte_cxt.trig_si_code == si->si_code)
39 cur_mte_cxt.fault_valid = true;
40 return;
41 }
42 /* Compare the context for precise error */
43 else if (si->si_code == SEGV_MTESERR) {
44 if (cur_mte_cxt.trig_si_code == si->si_code &&
45 ((cur_mte_cxt.trig_range >= 0 &&
46 addr >= MT_CLEAR_TAG(cur_mte_cxt.trig_addr) &&
47 addr <= (MT_CLEAR_TAG(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)) ||
48 (cur_mte_cxt.trig_range < 0 &&
49 addr <= MT_CLEAR_TAG(cur_mte_cxt.trig_addr) &&
50 addr >= (MT_CLEAR_TAG(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)))) {
51 cur_mte_cxt.fault_valid = true;
52 /* Adjust the pc by 4 */
53 ((ucontext_t *)uc)->uc_mcontext.pc += 4;
54 } else {
55 ksft_print_msg("Invalid MTE synchronous exception caught!\n");
56 exit(1);
57 }
58 } else {
59 ksft_print_msg("Unknown SIGSEGV exception caught!\n");
60 exit(1);
61 }
62 } else if (signum == SIGBUS) {
63 ksft_print_msg("INFO: SIGBUS signal at pc=%lx, fault addr=%lx, si_code=%lx\n",
64 ((ucontext_t *)uc)->uc_mcontext.pc, addr, si->si_code);
65 if ((cur_mte_cxt.trig_range >= 0 &&
66 addr >= MT_CLEAR_TAG(cur_mte_cxt.trig_addr) &&
67 addr <= (MT_CLEAR_TAG(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)) ||
68 (cur_mte_cxt.trig_range < 0 &&
69 addr <= MT_CLEAR_TAG(cur_mte_cxt.trig_addr) &&
70 addr >= (MT_CLEAR_TAG(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range))) {
71 cur_mte_cxt.fault_valid = true;
72 /* Adjust the pc by 4 */
73 ((ucontext_t *)uc)->uc_mcontext.pc += 4;
74 }
75 }
76 }
77
mte_register_signal(int signal,void (* handler)(int,siginfo_t *,void *))78 void mte_register_signal(int signal, void (*handler)(int, siginfo_t *, void *))
79 {
80 struct sigaction sa;
81
82 sa.sa_sigaction = handler;
83 sa.sa_flags = SA_SIGINFO;
84 sigemptyset(&sa.sa_mask);
85 sigaction(signal, &sa, NULL);
86 }
87
mte_wait_after_trig(void)88 void mte_wait_after_trig(void)
89 {
90 sched_yield();
91 }
92
mte_insert_tags(void * ptr,size_t size)93 void *mte_insert_tags(void *ptr, size_t size)
94 {
95 void *tag_ptr;
96 int align_size;
97
98 if (!ptr || (unsigned long)(ptr) & MT_ALIGN_GRANULE) {
99 ksft_print_msg("FAIL: Addr=%lx: invalid\n", ptr);
100 return NULL;
101 }
102 align_size = MT_ALIGN_UP(size);
103 tag_ptr = mte_insert_random_tag(ptr);
104 mte_set_tag_address_range(tag_ptr, align_size);
105 return tag_ptr;
106 }
107
mte_clear_tags(void * ptr,size_t size)108 void mte_clear_tags(void *ptr, size_t size)
109 {
110 if (!ptr || (unsigned long)(ptr) & MT_ALIGN_GRANULE) {
111 ksft_print_msg("FAIL: Addr=%lx: invalid\n", ptr);
112 return;
113 }
114 size = MT_ALIGN_UP(size);
115 ptr = (void *)MT_CLEAR_TAG((unsigned long)ptr);
116 mte_clear_tag_address_range(ptr, size);
117 }
118
__mte_allocate_memory_range(size_t size,int mem_type,int mapping,size_t range_before,size_t range_after,bool tags,int fd)119 static void *__mte_allocate_memory_range(size_t size, int mem_type, int mapping,
120 size_t range_before, size_t range_after,
121 bool tags, int fd)
122 {
123 void *ptr;
124 int prot_flag, map_flag;
125 size_t entire_size = size + range_before + range_after;
126
127 if (mem_type != USE_MALLOC && mem_type != USE_MMAP &&
128 mem_type != USE_MPROTECT) {
129 ksft_print_msg("FAIL: Invalid allocate request\n");
130 return NULL;
131 }
132 if (mem_type == USE_MALLOC)
133 return malloc(entire_size) + range_before;
134
135 prot_flag = PROT_READ | PROT_WRITE;
136 if (mem_type == USE_MMAP)
137 prot_flag |= PROT_MTE;
138
139 map_flag = mapping;
140 if (fd == -1)
141 map_flag = MAP_ANONYMOUS | map_flag;
142 if (!(mapping & MAP_SHARED))
143 map_flag |= MAP_PRIVATE;
144 ptr = mmap(NULL, entire_size, prot_flag, map_flag, fd, 0);
145 if (ptr == MAP_FAILED) {
146 ksft_print_msg("FAIL: mmap allocation\n");
147 return NULL;
148 }
149 if (mem_type == USE_MPROTECT) {
150 if (mprotect(ptr, entire_size, prot_flag | PROT_MTE)) {
151 munmap(ptr, size);
152 ksft_print_msg("FAIL: mprotect PROT_MTE property\n");
153 return NULL;
154 }
155 }
156 if (tags)
157 ptr = mte_insert_tags(ptr + range_before, size);
158 return ptr;
159 }
160
mte_allocate_memory_tag_range(size_t size,int mem_type,int mapping,size_t range_before,size_t range_after)161 void *mte_allocate_memory_tag_range(size_t size, int mem_type, int mapping,
162 size_t range_before, size_t range_after)
163 {
164 return __mte_allocate_memory_range(size, mem_type, mapping, range_before,
165 range_after, true, -1);
166 }
167
mte_allocate_memory(size_t size,int mem_type,int mapping,bool tags)168 void *mte_allocate_memory(size_t size, int mem_type, int mapping, bool tags)
169 {
170 return __mte_allocate_memory_range(size, mem_type, mapping, 0, 0, tags, -1);
171 }
172
mte_allocate_file_memory(size_t size,int mem_type,int mapping,bool tags,int fd)173 void *mte_allocate_file_memory(size_t size, int mem_type, int mapping, bool tags, int fd)
174 {
175 int index;
176 char buffer[INIT_BUFFER_SIZE];
177
178 if (mem_type != USE_MPROTECT && mem_type != USE_MMAP) {
179 ksft_print_msg("FAIL: Invalid mmap file request\n");
180 return NULL;
181 }
182 /* Initialize the file for mappable size */
183 lseek(fd, 0, SEEK_SET);
184 for (index = INIT_BUFFER_SIZE; index < size; index += INIT_BUFFER_SIZE)
185 write(fd, buffer, INIT_BUFFER_SIZE);
186 index -= INIT_BUFFER_SIZE;
187 write(fd, buffer, size - index);
188 return __mte_allocate_memory_range(size, mem_type, mapping, 0, 0, tags, fd);
189 }
190
mte_allocate_file_memory_tag_range(size_t size,int mem_type,int mapping,size_t range_before,size_t range_after,int fd)191 void *mte_allocate_file_memory_tag_range(size_t size, int mem_type, int mapping,
192 size_t range_before, size_t range_after, int fd)
193 {
194 int index;
195 char buffer[INIT_BUFFER_SIZE];
196 int map_size = size + range_before + range_after;
197
198 if (mem_type != USE_MPROTECT && mem_type != USE_MMAP) {
199 ksft_print_msg("FAIL: Invalid mmap file request\n");
200 return NULL;
201 }
202 /* Initialize the file for mappable size */
203 lseek(fd, 0, SEEK_SET);
204 for (index = INIT_BUFFER_SIZE; index < map_size; index += INIT_BUFFER_SIZE)
205 write(fd, buffer, INIT_BUFFER_SIZE);
206 index -= INIT_BUFFER_SIZE;
207 write(fd, buffer, map_size - index);
208 return __mte_allocate_memory_range(size, mem_type, mapping, range_before,
209 range_after, true, fd);
210 }
211
__mte_free_memory_range(void * ptr,size_t size,int mem_type,size_t range_before,size_t range_after,bool tags)212 static void __mte_free_memory_range(void *ptr, size_t size, int mem_type,
213 size_t range_before, size_t range_after, bool tags)
214 {
215 switch (mem_type) {
216 case USE_MALLOC:
217 free(ptr - range_before);
218 break;
219 case USE_MMAP:
220 case USE_MPROTECT:
221 if (tags)
222 mte_clear_tags(ptr, size);
223 munmap(ptr - range_before, size + range_before + range_after);
224 break;
225 default:
226 ksft_print_msg("FAIL: Invalid free request\n");
227 break;
228 }
229 }
230
mte_free_memory_tag_range(void * ptr,size_t size,int mem_type,size_t range_before,size_t range_after)231 void mte_free_memory_tag_range(void *ptr, size_t size, int mem_type,
232 size_t range_before, size_t range_after)
233 {
234 __mte_free_memory_range(ptr, size, mem_type, range_before, range_after, true);
235 }
236
mte_free_memory(void * ptr,size_t size,int mem_type,bool tags)237 void mte_free_memory(void *ptr, size_t size, int mem_type, bool tags)
238 {
239 __mte_free_memory_range(ptr, size, mem_type, 0, 0, tags);
240 }
241
mte_initialize_current_context(int mode,uintptr_t ptr,ssize_t range)242 void mte_initialize_current_context(int mode, uintptr_t ptr, ssize_t range)
243 {
244 cur_mte_cxt.fault_valid = false;
245 cur_mte_cxt.trig_addr = ptr;
246 cur_mte_cxt.trig_range = range;
247 if (mode == MTE_SYNC_ERR)
248 cur_mte_cxt.trig_si_code = SEGV_MTESERR;
249 else if (mode == MTE_ASYNC_ERR)
250 cur_mte_cxt.trig_si_code = SEGV_MTEAERR;
251 else
252 cur_mte_cxt.trig_si_code = 0;
253 }
254
mte_switch_mode(int mte_option,unsigned long incl_mask)255 int mte_switch_mode(int mte_option, unsigned long incl_mask)
256 {
257 unsigned long en = 0;
258
259 if (!(mte_option == MTE_SYNC_ERR || mte_option == MTE_ASYNC_ERR ||
260 mte_option == MTE_NONE_ERR || incl_mask <= MTE_ALLOW_NON_ZERO_TAG)) {
261 ksft_print_msg("FAIL: Invalid mte config option\n");
262 return -EINVAL;
263 }
264 en = PR_TAGGED_ADDR_ENABLE;
265 if (mte_option == MTE_SYNC_ERR)
266 en |= PR_MTE_TCF_SYNC;
267 else if (mte_option == MTE_ASYNC_ERR)
268 en |= PR_MTE_TCF_ASYNC;
269 else if (mte_option == MTE_NONE_ERR)
270 en |= PR_MTE_TCF_NONE;
271
272 en |= (incl_mask << PR_MTE_TAG_SHIFT);
273 /* Enable address tagging ABI, mte error reporting mode and tag inclusion mask. */
274 if (!prctl(PR_SET_TAGGED_ADDR_CTRL, en, 0, 0, 0) == 0) {
275 ksft_print_msg("FAIL:prctl PR_SET_TAGGED_ADDR_CTRL for mte mode\n");
276 return -EINVAL;
277 }
278 return 0;
279 }
280
mte_default_setup(void)281 int mte_default_setup(void)
282 {
283 unsigned long hwcaps2 = getauxval(AT_HWCAP2);
284 unsigned long en = 0;
285 int ret;
286
287 if (!(hwcaps2 & HWCAP2_MTE)) {
288 ksft_print_msg("SKIP: MTE features unavailable\n");
289 return KSFT_SKIP;
290 }
291 /* Get current mte mode */
292 ret = prctl(PR_GET_TAGGED_ADDR_CTRL, en, 0, 0, 0);
293 if (ret < 0) {
294 ksft_print_msg("FAIL:prctl PR_GET_TAGGED_ADDR_CTRL with error =%d\n", ret);
295 return KSFT_FAIL;
296 }
297 if (ret & PR_MTE_TCF_SYNC)
298 mte_cur_mode = MTE_SYNC_ERR;
299 else if (ret & PR_MTE_TCF_ASYNC)
300 mte_cur_mode = MTE_ASYNC_ERR;
301 else if (ret & PR_MTE_TCF_NONE)
302 mte_cur_mode = MTE_NONE_ERR;
303
304 mte_cur_pstate_tco = mte_get_pstate_tco();
305 /* Disable PSTATE.TCO */
306 mte_disable_pstate_tco();
307 return 0;
308 }
309
mte_restore_setup(void)310 void mte_restore_setup(void)
311 {
312 mte_switch_mode(mte_cur_mode, MTE_ALLOW_NON_ZERO_TAG);
313 if (mte_cur_pstate_tco == MT_PSTATE_TCO_EN)
314 mte_enable_pstate_tco();
315 else if (mte_cur_pstate_tco == MT_PSTATE_TCO_DIS)
316 mte_disable_pstate_tco();
317 }
318
create_temp_file(void)319 int create_temp_file(void)
320 {
321 int fd;
322 char filename[] = "/dev/shm/tmp_XXXXXX";
323
324 /* Create a file in the tmpfs filesystem */
325 fd = mkstemp(&filename[0]);
326 if (fd == -1) {
327 ksft_print_msg("FAIL: Unable to open temporary file\n");
328 return 0;
329 }
330 unlink(&filename[0]);
331 return fd;
332 }
333