• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2020 ARM Limited
3 
4 #define _GNU_SOURCE
5 
6 #include <stddef.h>
7 #include <stdio.h>
8 #include <string.h>
9 
10 #include "kselftest.h"
11 #include "mte_common_util.h"
12 #include "mte_def.h"
13 
14 #define OVERFLOW_RANGE MT_GRANULE_SIZE
15 
16 static int sizes[] = {
17 	1, 555, 1033, MT_GRANULE_SIZE - 1, MT_GRANULE_SIZE,
18 	/* page size - 1*/ 0, /* page_size */ 0, /* page size + 1 */ 0
19 };
20 
21 enum mte_block_test_alloc {
22 	UNTAGGED_TAGGED,
23 	TAGGED_UNTAGGED,
24 	TAGGED_TAGGED,
25 	BLOCK_ALLOC_MAX,
26 };
27 
check_buffer_by_byte(int mem_type,int mode)28 static int check_buffer_by_byte(int mem_type, int mode)
29 {
30 	char *ptr;
31 	int i, j, item;
32 	bool err;
33 
34 	mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
35 	item = sizeof(sizes)/sizeof(int);
36 
37 	for (i = 0; i < item; i++) {
38 		ptr = (char *)mte_allocate_memory(sizes[i], mem_type, 0, true);
39 		if (check_allocated_memory(ptr, sizes[i], mem_type, true) != KSFT_PASS)
40 			return KSFT_FAIL;
41 		mte_initialize_current_context(mode, (uintptr_t)ptr, sizes[i]);
42 		/* Set some value in tagged memory */
43 		for (j = 0; j < sizes[i]; j++)
44 			ptr[j] = '1';
45 		mte_wait_after_trig();
46 		err = cur_mte_cxt.fault_valid;
47 		/* Check the buffer whether it is filled. */
48 		for (j = 0; j < sizes[i] && !err; j++) {
49 			if (ptr[j] != '1')
50 				err = true;
51 		}
52 		mte_free_memory((void *)ptr, sizes[i], mem_type, true);
53 
54 		if (err)
55 			break;
56 	}
57 	if (!err)
58 		return KSFT_PASS;
59 	else
60 		return KSFT_FAIL;
61 }
62 
check_buffer_underflow_by_byte(int mem_type,int mode,int underflow_range)63 static int check_buffer_underflow_by_byte(int mem_type, int mode,
64 					  int underflow_range)
65 {
66 	char *ptr;
67 	int i, j, item, last_index;
68 	bool err;
69 	char *und_ptr = NULL;
70 
71 	mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
72 	item = sizeof(sizes)/sizeof(int);
73 	for (i = 0; i < item; i++) {
74 		ptr = (char *)mte_allocate_memory_tag_range(sizes[i], mem_type, 0,
75 							    underflow_range, 0);
76 		if (check_allocated_memory_range(ptr, sizes[i], mem_type,
77 					       underflow_range, 0) != KSFT_PASS)
78 			return KSFT_FAIL;
79 
80 		mte_initialize_current_context(mode, (uintptr_t)ptr, -underflow_range);
81 		last_index = 0;
82 		/* Set some value in tagged memory and make the buffer underflow */
83 		for (j = sizes[i] - 1; (j >= -underflow_range) &&
84 				       (cur_mte_cxt.fault_valid == false); j--) {
85 			ptr[j] = '1';
86 			last_index = j;
87 		}
88 		mte_wait_after_trig();
89 		err = false;
90 		/* Check whether the buffer is filled */
91 		for (j = 0; j < sizes[i]; j++) {
92 			if (ptr[j] != '1') {
93 				err = true;
94 				ksft_print_msg("Buffer is not filled at index:%d of ptr:0x%lx\n",
95 						j, ptr);
96 				break;
97 			}
98 		}
99 		if (err)
100 			goto check_buffer_underflow_by_byte_err;
101 
102 		switch (mode) {
103 		case MTE_NONE_ERR:
104 			if (cur_mte_cxt.fault_valid == true || last_index != -underflow_range) {
105 				err = true;
106 				break;
107 			}
108 			/* There were no fault so the underflow area should be filled */
109 			und_ptr = (char *) MT_CLEAR_TAG((size_t) ptr - underflow_range);
110 			for (j = 0 ; j < underflow_range; j++) {
111 				if (und_ptr[j] != '1') {
112 					err = true;
113 					break;
114 				}
115 			}
116 			break;
117 		case MTE_ASYNC_ERR:
118 			/* Imprecise fault should occur otherwise return error */
119 			if (cur_mte_cxt.fault_valid == false) {
120 				err = true;
121 				break;
122 			}
123 			/*
124 			 * The imprecise fault is checked after the write to the buffer,
125 			 * so the underflow area before the fault should be filled.
126 			 */
127 			und_ptr = (char *) MT_CLEAR_TAG((size_t) ptr);
128 			for (j = last_index ; j < 0 ; j++) {
129 				if (und_ptr[j] != '1') {
130 					err = true;
131 					break;
132 				}
133 			}
134 			break;
135 		case MTE_SYNC_ERR:
136 			/* Precise fault should occur otherwise return error */
137 			if (!cur_mte_cxt.fault_valid || (last_index != (-1))) {
138 				err = true;
139 				break;
140 			}
141 			/* Underflow area should not be filled */
142 			und_ptr = (char *) MT_CLEAR_TAG((size_t) ptr);
143 			if (und_ptr[-1] == '1')
144 				err = true;
145 			break;
146 		default:
147 			err = true;
148 		break;
149 		}
150 check_buffer_underflow_by_byte_err:
151 		mte_free_memory_tag_range((void *)ptr, sizes[i], mem_type, underflow_range, 0);
152 		if (err)
153 			break;
154 	}
155 	return (err ? KSFT_FAIL : KSFT_PASS);
156 }
157 
check_buffer_overflow_by_byte(int mem_type,int mode,int overflow_range)158 static int check_buffer_overflow_by_byte(int mem_type, int mode,
159 					  int overflow_range)
160 {
161 	char *ptr;
162 	int i, j, item, last_index;
163 	bool err;
164 	size_t tagged_size, overflow_size;
165 	char *over_ptr = NULL;
166 
167 	mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
168 	item = sizeof(sizes)/sizeof(int);
169 	for (i = 0; i < item; i++) {
170 		ptr = (char *)mte_allocate_memory_tag_range(sizes[i], mem_type, 0,
171 							    0, overflow_range);
172 		if (check_allocated_memory_range(ptr, sizes[i], mem_type,
173 						 0, overflow_range) != KSFT_PASS)
174 			return KSFT_FAIL;
175 
176 		tagged_size = MT_ALIGN_UP(sizes[i]);
177 
178 		mte_initialize_current_context(mode, (uintptr_t)ptr, sizes[i] + overflow_range);
179 
180 		/* Set some value in tagged memory and make the buffer underflow */
181 		for (j = 0, last_index = 0 ; (j < (sizes[i] + overflow_range)) &&
182 					     (cur_mte_cxt.fault_valid == false); j++) {
183 			ptr[j] = '1';
184 			last_index = j;
185 		}
186 		mte_wait_after_trig();
187 		err = false;
188 		/* Check whether the buffer is filled */
189 		for (j = 0; j < sizes[i]; j++) {
190 			if (ptr[j] != '1') {
191 				err = true;
192 				ksft_print_msg("Buffer is not filled at index:%d of ptr:0x%lx\n",
193 						j, ptr);
194 				break;
195 			}
196 		}
197 		if (err)
198 			goto check_buffer_overflow_by_byte_err;
199 
200 		overflow_size = overflow_range - (tagged_size - sizes[i]);
201 
202 		switch (mode) {
203 		case MTE_NONE_ERR:
204 			if ((cur_mte_cxt.fault_valid == true) ||
205 			    (last_index != (sizes[i] + overflow_range - 1))) {
206 				err = true;
207 				break;
208 			}
209 			/* There were no fault so the overflow area should be filled */
210 			over_ptr = (char *) MT_CLEAR_TAG((size_t) ptr + tagged_size);
211 			for (j = 0 ; j < overflow_size; j++) {
212 				if (over_ptr[j] != '1') {
213 					err = true;
214 					break;
215 				}
216 			}
217 			break;
218 		case MTE_ASYNC_ERR:
219 			/* Imprecise fault should occur otherwise return error */
220 			if (cur_mte_cxt.fault_valid == false) {
221 				err = true;
222 				break;
223 			}
224 			/*
225 			 * The imprecise fault is checked after the write to the buffer,
226 			 * so the overflow area should be filled before the fault.
227 			 */
228 			over_ptr = (char *) MT_CLEAR_TAG((size_t) ptr);
229 			for (j = tagged_size ; j < last_index; j++) {
230 				if (over_ptr[j] != '1') {
231 					err = true;
232 					break;
233 				}
234 			}
235 			break;
236 		case MTE_SYNC_ERR:
237 			/* Precise fault should occur otherwise return error */
238 			if (!cur_mte_cxt.fault_valid || (last_index != tagged_size)) {
239 				err = true;
240 				break;
241 			}
242 			/* Underflow area should not be filled */
243 			over_ptr = (char *) MT_CLEAR_TAG((size_t) ptr + tagged_size);
244 			for (j = 0 ; j < overflow_size; j++) {
245 				if (over_ptr[j] == '1')
246 					err = true;
247 			}
248 			break;
249 		default:
250 			err = true;
251 		break;
252 		}
253 check_buffer_overflow_by_byte_err:
254 		mte_free_memory_tag_range((void *)ptr, sizes[i], mem_type, 0, overflow_range);
255 		if (err)
256 			break;
257 	}
258 	return (err ? KSFT_FAIL : KSFT_PASS);
259 }
260 
check_buffer_by_block_iterate(int mem_type,int mode,size_t size)261 static int check_buffer_by_block_iterate(int mem_type, int mode, size_t size)
262 {
263 	char *src, *dst;
264 	int j, result = KSFT_PASS;
265 	enum mte_block_test_alloc alloc_type = UNTAGGED_TAGGED;
266 
267 	for (alloc_type = UNTAGGED_TAGGED; alloc_type < (int) BLOCK_ALLOC_MAX; alloc_type++) {
268 		switch (alloc_type) {
269 		case UNTAGGED_TAGGED:
270 			src = (char *)mte_allocate_memory(size, mem_type, 0, false);
271 			if (check_allocated_memory(src, size, mem_type, false) != KSFT_PASS)
272 				return KSFT_FAIL;
273 
274 			dst = (char *)mte_allocate_memory(size, mem_type, 0, true);
275 			if (check_allocated_memory(dst, size, mem_type, true) != KSFT_PASS) {
276 				mte_free_memory((void *)src, size, mem_type, false);
277 				return KSFT_FAIL;
278 			}
279 
280 			break;
281 		case TAGGED_UNTAGGED:
282 			dst = (char *)mte_allocate_memory(size, mem_type, 0, false);
283 			if (check_allocated_memory(dst, size, mem_type, false) != KSFT_PASS)
284 				return KSFT_FAIL;
285 
286 			src = (char *)mte_allocate_memory(size, mem_type, 0, true);
287 			if (check_allocated_memory(src, size, mem_type, true) != KSFT_PASS) {
288 				mte_free_memory((void *)dst, size, mem_type, false);
289 				return KSFT_FAIL;
290 			}
291 			break;
292 		case TAGGED_TAGGED:
293 			src = (char *)mte_allocate_memory(size, mem_type, 0, true);
294 			if (check_allocated_memory(src, size, mem_type, true) != KSFT_PASS)
295 				return KSFT_FAIL;
296 
297 			dst = (char *)mte_allocate_memory(size, mem_type, 0, true);
298 			if (check_allocated_memory(dst, size, mem_type, true) != KSFT_PASS) {
299 				mte_free_memory((void *)src, size, mem_type, true);
300 				return KSFT_FAIL;
301 			}
302 			break;
303 		default:
304 			return KSFT_FAIL;
305 		}
306 
307 		cur_mte_cxt.fault_valid = false;
308 		result = KSFT_PASS;
309 		mte_initialize_current_context(mode, (uintptr_t)dst, size);
310 		/* Set some value in memory and copy*/
311 		memset((void *)src, (int)'1', size);
312 		memcpy((void *)dst, (void *)src, size);
313 		mte_wait_after_trig();
314 		if (cur_mte_cxt.fault_valid) {
315 			result = KSFT_FAIL;
316 			goto check_buffer_by_block_err;
317 		}
318 		/* Check the buffer whether it is filled. */
319 		for (j = 0; j < size; j++) {
320 			if (src[j] != dst[j] || src[j] != '1') {
321 				result = KSFT_FAIL;
322 				break;
323 			}
324 		}
325 check_buffer_by_block_err:
326 		mte_free_memory((void *)src, size, mem_type,
327 				MT_FETCH_TAG((uintptr_t)src) ? true : false);
328 		mte_free_memory((void *)dst, size, mem_type,
329 				MT_FETCH_TAG((uintptr_t)dst) ? true : false);
330 		if (result != KSFT_PASS)
331 			return result;
332 	}
333 	return result;
334 }
335 
check_buffer_by_block(int mem_type,int mode)336 static int check_buffer_by_block(int mem_type, int mode)
337 {
338 	int i, item, result = KSFT_PASS;
339 
340 	mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
341 	item = sizeof(sizes)/sizeof(int);
342 	cur_mte_cxt.fault_valid = false;
343 	for (i = 0; i < item; i++) {
344 		result = check_buffer_by_block_iterate(mem_type, mode, sizes[i]);
345 		if (result != KSFT_PASS)
346 			break;
347 	}
348 	return result;
349 }
350 
compare_memory_tags(char * ptr,size_t size,int tag)351 static int compare_memory_tags(char *ptr, size_t size, int tag)
352 {
353 	int i, new_tag;
354 
355 	for (i = 0 ; i < size ; i += MT_GRANULE_SIZE) {
356 		new_tag = MT_FETCH_TAG((uintptr_t)(mte_get_tag_address(ptr + i)));
357 		if (tag != new_tag) {
358 			ksft_print_msg("FAIL: child mte tag mismatch\n");
359 			return KSFT_FAIL;
360 		}
361 	}
362 	return KSFT_PASS;
363 }
364 
check_memory_initial_tags(int mem_type,int mode,int mapping)365 static int check_memory_initial_tags(int mem_type, int mode, int mapping)
366 {
367 	char *ptr;
368 	int run, fd;
369 	int total = sizeof(sizes)/sizeof(int);
370 
371 	mte_switch_mode(mode, MTE_ALLOW_NON_ZERO_TAG);
372 	for (run = 0; run < total; run++) {
373 		/* check initial tags for anonymous mmap */
374 		ptr = (char *)mte_allocate_memory(sizes[run], mem_type, mapping, false);
375 		if (check_allocated_memory(ptr, sizes[run], mem_type, false) != KSFT_PASS)
376 			return KSFT_FAIL;
377 		if (compare_memory_tags(ptr, sizes[run], 0) != KSFT_PASS) {
378 			mte_free_memory((void *)ptr, sizes[run], mem_type, false);
379 			return KSFT_FAIL;
380 		}
381 		mte_free_memory((void *)ptr, sizes[run], mem_type, false);
382 
383 		/* check initial tags for file mmap */
384 		fd = create_temp_file();
385 		if (fd == -1)
386 			return KSFT_FAIL;
387 		ptr = (char *)mte_allocate_file_memory(sizes[run], mem_type, mapping, false, fd);
388 		if (check_allocated_memory(ptr, sizes[run], mem_type, false) != KSFT_PASS) {
389 			close(fd);
390 			return KSFT_FAIL;
391 		}
392 		if (compare_memory_tags(ptr, sizes[run], 0) != KSFT_PASS) {
393 			mte_free_memory((void *)ptr, sizes[run], mem_type, false);
394 			close(fd);
395 			return KSFT_FAIL;
396 		}
397 		mte_free_memory((void *)ptr, sizes[run], mem_type, false);
398 		close(fd);
399 	}
400 	return KSFT_PASS;
401 }
402 
main(int argc,char * argv[])403 int main(int argc, char *argv[])
404 {
405 	int err;
406 	size_t page_size = getpagesize();
407 	int item = sizeof(sizes)/sizeof(int);
408 
409 	sizes[item - 3] = page_size - 1;
410 	sizes[item - 2] = page_size;
411 	sizes[item - 1] = page_size + 1;
412 
413 	err = mte_default_setup();
414 	if (err)
415 		return err;
416 
417 	/* Register SIGSEGV handler */
418 	mte_register_signal(SIGSEGV, mte_default_handler);
419 
420 	/* Set test plan */
421 	ksft_set_plan(20);
422 
423 	/* Buffer by byte tests */
424 	evaluate_test(check_buffer_by_byte(USE_MMAP, MTE_SYNC_ERR),
425 	"Check buffer correctness by byte with sync err mode and mmap memory\n");
426 	evaluate_test(check_buffer_by_byte(USE_MMAP, MTE_ASYNC_ERR),
427 	"Check buffer correctness by byte with async err mode and mmap memory\n");
428 	evaluate_test(check_buffer_by_byte(USE_MPROTECT, MTE_SYNC_ERR),
429 	"Check buffer correctness by byte with sync err mode and mmap/mprotect memory\n");
430 	evaluate_test(check_buffer_by_byte(USE_MPROTECT, MTE_ASYNC_ERR),
431 	"Check buffer correctness by byte with async err mode and mmap/mprotect memory\n");
432 
433 	/* Check buffer underflow with underflow size as 16 */
434 	evaluate_test(check_buffer_underflow_by_byte(USE_MMAP, MTE_SYNC_ERR, MT_GRANULE_SIZE),
435 	"Check buffer write underflow by byte with sync mode and mmap memory\n");
436 	evaluate_test(check_buffer_underflow_by_byte(USE_MMAP, MTE_ASYNC_ERR, MT_GRANULE_SIZE),
437 	"Check buffer write underflow by byte with async mode and mmap memory\n");
438 	evaluate_test(check_buffer_underflow_by_byte(USE_MMAP, MTE_NONE_ERR, MT_GRANULE_SIZE),
439 	"Check buffer write underflow by byte with tag check fault ignore and mmap memory\n");
440 
441 	/* Check buffer underflow with underflow size as page size */
442 	evaluate_test(check_buffer_underflow_by_byte(USE_MMAP, MTE_SYNC_ERR, page_size),
443 	"Check buffer write underflow by byte with sync mode and mmap memory\n");
444 	evaluate_test(check_buffer_underflow_by_byte(USE_MMAP, MTE_ASYNC_ERR, page_size),
445 	"Check buffer write underflow by byte with async mode and mmap memory\n");
446 	evaluate_test(check_buffer_underflow_by_byte(USE_MMAP, MTE_NONE_ERR, page_size),
447 	"Check buffer write underflow by byte with tag check fault ignore and mmap memory\n");
448 
449 	/* Check buffer overflow with overflow size as 16 */
450 	evaluate_test(check_buffer_overflow_by_byte(USE_MMAP, MTE_SYNC_ERR, MT_GRANULE_SIZE),
451 	"Check buffer write overflow by byte with sync mode and mmap memory\n");
452 	evaluate_test(check_buffer_overflow_by_byte(USE_MMAP, MTE_ASYNC_ERR, MT_GRANULE_SIZE),
453 	"Check buffer write overflow by byte with async mode and mmap memory\n");
454 	evaluate_test(check_buffer_overflow_by_byte(USE_MMAP, MTE_NONE_ERR, MT_GRANULE_SIZE),
455 	"Check buffer write overflow by byte with tag fault ignore mode and mmap memory\n");
456 
457 	/* Buffer by block tests */
458 	evaluate_test(check_buffer_by_block(USE_MMAP, MTE_SYNC_ERR),
459 	"Check buffer write correctness by block with sync mode and mmap memory\n");
460 	evaluate_test(check_buffer_by_block(USE_MMAP, MTE_ASYNC_ERR),
461 	"Check buffer write correctness by block with async mode and mmap memory\n");
462 	evaluate_test(check_buffer_by_block(USE_MMAP, MTE_NONE_ERR),
463 	"Check buffer write correctness by block with tag fault ignore and mmap memory\n");
464 
465 	/* Initial tags are supposed to be 0 */
466 	evaluate_test(check_memory_initial_tags(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE),
467 	"Check initial tags with private mapping, sync error mode and mmap memory\n");
468 	evaluate_test(check_memory_initial_tags(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE),
469 	"Check initial tags with private mapping, sync error mode and mmap/mprotect memory\n");
470 	evaluate_test(check_memory_initial_tags(USE_MMAP, MTE_SYNC_ERR, MAP_SHARED),
471 	"Check initial tags with shared mapping, sync error mode and mmap memory\n");
472 	evaluate_test(check_memory_initial_tags(USE_MPROTECT, MTE_SYNC_ERR, MAP_SHARED),
473 	"Check initial tags with shared mapping, sync error mode and mmap/mprotect memory\n");
474 
475 	mte_restore_setup();
476 	ksft_print_cnts();
477 	return ksft_get_fail_cnt() == 0 ? KSFT_PASS : KSFT_FAIL;
478 }
479