• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * mpx-mini-test.c: routines to test Intel MPX (Memory Protection eXtentions)
3  *
4  * Written by:
5  * "Ren, Qiaowei" <qiaowei.ren@intel.com>
6  * "Wei, Gang" <gang.wei@intel.com>
7  * "Hansen, Dave" <dave.hansen@intel.com>
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms and conditions of the GNU General Public License,
11  * version 2.
12  */
13 
14 /*
15  * 2014-12-05: Dave Hansen: fixed all of the compiler warnings, and made sure
16  *	       it works on 32-bit.
17  */
18 
19 int inspect_every_this_many_mallocs = 100;
20 int zap_all_every_this_many_mallocs = 1000;
21 
22 #define _GNU_SOURCE
23 #define _LARGEFILE64_SOURCE
24 
25 #include <string.h>
26 #include <stdio.h>
27 #include <stdint.h>
28 #include <stdbool.h>
29 #include <signal.h>
30 #include <assert.h>
31 #include <stdlib.h>
32 #include <ucontext.h>
33 #include <sys/mman.h>
34 #include <sys/types.h>
35 #include <sys/stat.h>
36 #include <fcntl.h>
37 #include <unistd.h>
38 
39 #include "mpx-hw.h"
40 #include "mpx-debug.h"
41 #include "mpx-mm.h"
42 
43 #ifndef __always_inline
44 #define __always_inline inline __attribute__((always_inline)
45 #endif
46 
47 #ifndef TEST_DURATION_SECS
48 #define TEST_DURATION_SECS 3
49 #endif
50 
write_int_to(char * prefix,char * file,int int_to_write)51 void write_int_to(char *prefix, char *file, int int_to_write)
52 {
53 	char buf[100];
54 	int fd = open(file, O_RDWR);
55 	int len;
56 	int ret;
57 
58 	assert(fd >= 0);
59 	len = snprintf(buf, sizeof(buf), "%s%d", prefix, int_to_write);
60 	assert(len >= 0);
61 	assert(len < sizeof(buf));
62 	ret = write(fd, buf, len);
63 	assert(ret == len);
64 	ret = close(fd);
65 	assert(!ret);
66 }
67 
write_pid_to(char * prefix,char * file)68 void write_pid_to(char *prefix, char *file)
69 {
70 	write_int_to(prefix, file, getpid());
71 }
72 
trace_me(void)73 void trace_me(void)
74 {
75 /* tracing events dir */
76 #define TED "/sys/kernel/debug/tracing/events/"
77 /*
78 	write_pid_to("common_pid=", TED "signal/filter");
79 	write_pid_to("common_pid=", TED "exceptions/filter");
80 	write_int_to("", TED "signal/enable", 1);
81 	write_int_to("", TED "exceptions/enable", 1);
82 */
83 	write_pid_to("", "/sys/kernel/debug/tracing/set_ftrace_pid");
84 	write_int_to("", "/sys/kernel/debug/tracing/trace", 0);
85 }
86 
87 #define test_failed() __test_failed(__FILE__, __LINE__)
__test_failed(char * f,int l)88 static void __test_failed(char *f, int l)
89 {
90 	fprintf(stderr, "abort @ %s::%d\n", f, l);
91 	abort();
92 }
93 
94 /* Error Printf */
95 #define eprintf(args...)	fprintf(stderr, args)
96 
97 #ifdef __i386__
98 
99 /* i386 directory size is 4MB */
100 #define REG_IP_IDX	REG_EIP
101 #define REX_PREFIX
102 
103 #define XSAVE_OFFSET_IN_FPMEM	sizeof(struct _libc_fpstate)
104 
105 /*
106  * __cpuid() is from the Linux Kernel:
107  */
__cpuid(unsigned int * eax,unsigned int * ebx,unsigned int * ecx,unsigned int * edx)108 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
109 		unsigned int *ecx, unsigned int *edx)
110 {
111 	/* ecx is often an input as well as an output. */
112 	asm volatile(
113 		"push %%ebx;"
114 		"cpuid;"
115 		"mov %%ebx, %1;"
116 		"pop %%ebx"
117 		: "=a" (*eax),
118 		  "=g" (*ebx),
119 		  "=c" (*ecx),
120 		  "=d" (*edx)
121 		: "0" (*eax), "2" (*ecx));
122 }
123 
124 #else /* __i386__ */
125 
126 #define REG_IP_IDX	REG_RIP
127 #define REX_PREFIX "0x48, "
128 
129 #define XSAVE_OFFSET_IN_FPMEM	0
130 
131 /*
132  * __cpuid() is from the Linux Kernel:
133  */
__cpuid(unsigned int * eax,unsigned int * ebx,unsigned int * ecx,unsigned int * edx)134 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
135 		unsigned int *ecx, unsigned int *edx)
136 {
137 	/* ecx is often an input as well as an output. */
138 	asm volatile(
139 		"cpuid;"
140 		: "=a" (*eax),
141 		  "=b" (*ebx),
142 		  "=c" (*ecx),
143 		  "=d" (*edx)
144 		: "0" (*eax), "2" (*ecx));
145 }
146 
147 #endif /* !__i386__ */
148 
149 struct xsave_hdr_struct {
150 	uint64_t xstate_bv;
151 	uint64_t reserved1[2];
152 	uint64_t reserved2[5];
153 } __attribute__((packed));
154 
155 struct bndregs_struct {
156 	uint64_t bndregs[8];
157 } __attribute__((packed));
158 
159 struct bndcsr_struct {
160 	uint64_t cfg_reg_u;
161 	uint64_t status_reg;
162 } __attribute__((packed));
163 
164 struct xsave_struct {
165 	uint8_t fpu_sse[512];
166 	struct xsave_hdr_struct xsave_hdr;
167 	uint8_t ymm[256];
168 	uint8_t lwp[128];
169 	struct bndregs_struct bndregs;
170 	struct bndcsr_struct bndcsr;
171 } __attribute__((packed));
172 
173 uint8_t __attribute__((__aligned__(64))) buffer[4096];
174 struct xsave_struct *xsave_buf = (struct xsave_struct *)buffer;
175 
176 uint8_t __attribute__((__aligned__(64))) test_buffer[4096];
177 struct xsave_struct *xsave_test_buf = (struct xsave_struct *)test_buffer;
178 
179 uint64_t num_bnd_chk;
180 
xrstor_state(struct xsave_struct * fx,uint64_t mask)181 static __always_inline void xrstor_state(struct xsave_struct *fx, uint64_t mask)
182 {
183 	uint32_t lmask = mask;
184 	uint32_t hmask = mask >> 32;
185 
186 	asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
187 		     : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
188 		     :   "memory");
189 }
190 
xsave_state_1(void * _fx,uint64_t mask)191 static __always_inline void xsave_state_1(void *_fx, uint64_t mask)
192 {
193 	uint32_t lmask = mask;
194 	uint32_t hmask = mask >> 32;
195 	unsigned char *fx = _fx;
196 
197 	asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x27\n\t"
198 		     : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
199 		     :   "memory");
200 }
201 
xgetbv(uint32_t index)202 static inline uint64_t xgetbv(uint32_t index)
203 {
204 	uint32_t eax, edx;
205 
206 	asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */
207 		     : "=a" (eax), "=d" (edx)
208 		     : "c" (index));
209 	return eax + ((uint64_t)edx << 32);
210 }
211 
read_mpx_status_sig(ucontext_t * uctxt)212 static uint64_t read_mpx_status_sig(ucontext_t *uctxt)
213 {
214 	memset(buffer, 0, sizeof(buffer));
215 	memcpy(buffer,
216 		(uint8_t *)uctxt->uc_mcontext.fpregs + XSAVE_OFFSET_IN_FPMEM,
217 		sizeof(struct xsave_struct));
218 
219 	return xsave_buf->bndcsr.status_reg;
220 }
221 
222 #include <pthread.h>
223 
get_next_inst_ip(uint8_t * addr)224 static uint8_t *get_next_inst_ip(uint8_t *addr)
225 {
226 	uint8_t *ip = addr;
227 	uint8_t sib;
228 	uint8_t rm;
229 	uint8_t mod;
230 	uint8_t base;
231 	uint8_t modrm;
232 
233 	/* determine the prefix. */
234 	switch(*ip) {
235 	case 0xf2:
236 	case 0xf3:
237 	case 0x66:
238 		ip++;
239 		break;
240 	}
241 
242 	/* look for rex prefix */
243 	if ((*ip & 0x40) == 0x40)
244 		ip++;
245 
246 	/* Make sure we have a MPX instruction. */
247 	if (*ip++ != 0x0f)
248 		return addr;
249 
250 	/* Skip the op code byte. */
251 	ip++;
252 
253 	/* Get the modrm byte. */
254 	modrm = *ip++;
255 
256 	/* Break it down into parts. */
257 	rm = modrm & 7;
258 	mod = (modrm >> 6);
259 
260 	/* Init the parts of the address mode. */
261 	base = 8;
262 
263 	/* Is it a mem mode? */
264 	if (mod != 3) {
265 		/* look for scaled indexed addressing */
266 		if (rm == 4) {
267 			/* SIB addressing */
268 			sib = *ip++;
269 			base = sib & 7;
270 			switch (mod) {
271 			case 0:
272 				if (base == 5)
273 					ip += 4;
274 				break;
275 
276 			case 1:
277 				ip++;
278 				break;
279 
280 			case 2:
281 				ip += 4;
282 				break;
283 			}
284 
285 		} else {
286 			/* MODRM addressing */
287 			switch (mod) {
288 			case 0:
289 				/* DISP32 addressing, no base */
290 				if (rm == 5)
291 					ip += 4;
292 				break;
293 
294 			case 1:
295 				ip++;
296 				break;
297 
298 			case 2:
299 				ip += 4;
300 				break;
301 			}
302 		}
303 	}
304 	return ip;
305 }
306 
307 #ifdef si_lower
__si_bounds_lower(siginfo_t * si)308 static inline void *__si_bounds_lower(siginfo_t *si)
309 {
310 	return si->si_lower;
311 }
312 
__si_bounds_upper(siginfo_t * si)313 static inline void *__si_bounds_upper(siginfo_t *si)
314 {
315 	return si->si_upper;
316 }
317 #else
__si_bounds_hack(siginfo_t * si)318 static inline void **__si_bounds_hack(siginfo_t *si)
319 {
320 	void *sigfault = &si->_sifields._sigfault;
321 	void *end_sigfault = sigfault + sizeof(si->_sifields._sigfault);
322 	void **__si_lower = end_sigfault;
323 
324 	return __si_lower;
325 }
326 
__si_bounds_lower(siginfo_t * si)327 static inline void *__si_bounds_lower(siginfo_t *si)
328 {
329 	return *__si_bounds_hack(si);
330 }
331 
__si_bounds_upper(siginfo_t * si)332 static inline void *__si_bounds_upper(siginfo_t *si)
333 {
334 	return (*__si_bounds_hack(si)) + sizeof(void *);
335 }
336 #endif
337 
338 static int br_count;
339 static int expected_bnd_index = -1;
340 uint64_t shadow_plb[NR_MPX_BOUNDS_REGISTERS][2]; /* shadow MPX bound registers */
341 unsigned long shadow_map[NR_MPX_BOUNDS_REGISTERS];
342 
343 /*
344  * The kernel is supposed to provide some information about the bounds
345  * exception in the siginfo.  It should match what we have in the bounds
346  * registers that we are checking against.  Just check against the shadow copy
347  * since it is easily available, and we also check that *it* matches the real
348  * registers.
349  */
check_siginfo_vs_shadow(siginfo_t * si)350 void check_siginfo_vs_shadow(siginfo_t* si)
351 {
352 	int siginfo_ok = 1;
353 	void *shadow_lower = (void *)(unsigned long)shadow_plb[expected_bnd_index][0];
354 	void *shadow_upper = (void *)(unsigned long)shadow_plb[expected_bnd_index][1];
355 
356 	if ((expected_bnd_index < 0) ||
357 	    (expected_bnd_index >= NR_MPX_BOUNDS_REGISTERS)) {
358 		fprintf(stderr, "ERROR: invalid expected_bnd_index: %d\n",
359 			expected_bnd_index);
360 		exit(6);
361 	}
362 	if (__si_bounds_lower(si) != shadow_lower)
363 		siginfo_ok = 0;
364 	if (__si_bounds_upper(si) != shadow_upper)
365 		siginfo_ok = 0;
366 
367 	if (!siginfo_ok) {
368 		fprintf(stderr, "ERROR: siginfo bounds do not match "
369 			"shadow bounds for register %d\n", expected_bnd_index);
370 		exit(7);
371 	}
372 }
373 
handler(int signum,siginfo_t * si,void * vucontext)374 void handler(int signum, siginfo_t *si, void *vucontext)
375 {
376 	int i;
377 	ucontext_t *uctxt = vucontext;
378 	int trapno;
379 	unsigned long ip;
380 
381 	dprintf1("entered signal handler\n");
382 
383 	trapno = uctxt->uc_mcontext.gregs[REG_TRAPNO];
384 	ip = uctxt->uc_mcontext.gregs[REG_IP_IDX];
385 
386 	if (trapno == 5) {
387 		typeof(si->si_addr) *si_addr_ptr = &si->si_addr;
388 		uint64_t status = read_mpx_status_sig(uctxt);
389 		uint64_t br_reason =  status & 0x3;
390 
391 		br_count++;
392 		dprintf1("#BR 0x%jx (total seen: %d)\n", status, br_count);
393 
394 #define __SI_FAULT      (3 << 16)
395 #define SEGV_BNDERR     (__SI_FAULT|3)  /* failed address bound checks */
396 
397 		dprintf2("Saw a #BR! status 0x%jx at %016lx br_reason: %jx\n",
398 				status, ip, br_reason);
399 		dprintf2("si_signo: %d\n", si->si_signo);
400 		dprintf2("  signum: %d\n", signum);
401 		dprintf2("info->si_code == SEGV_BNDERR: %d\n",
402 				(si->si_code == SEGV_BNDERR));
403 		dprintf2("info->si_code: %d\n", si->si_code);
404 		dprintf2("info->si_lower: %p\n", __si_bounds_lower(si));
405 		dprintf2("info->si_upper: %p\n", __si_bounds_upper(si));
406 
407 		check_siginfo_vs_shadow(si);
408 
409 		for (i = 0; i < 8; i++)
410 			dprintf3("[%d]: %p\n", i, si_addr_ptr[i]);
411 		switch (br_reason) {
412 		case 0: /* traditional BR */
413 			fprintf(stderr,
414 				"Undefined status with bound exception:%jx\n",
415 				 status);
416 			exit(5);
417 		case 1: /* #BR MPX bounds exception */
418 			/* these are normal and we expect to see them */
419 			dprintf1("bounds exception (normal): status 0x%jx at %p si_addr: %p\n",
420 				status, (void *)ip, si->si_addr);
421 			num_bnd_chk++;
422 			uctxt->uc_mcontext.gregs[REG_IP_IDX] =
423 				(greg_t)get_next_inst_ip((uint8_t *)ip);
424 			break;
425 		case 2:
426 			fprintf(stderr, "#BR status == 2, missing bounds table,"
427 					"kernel should have handled!!\n");
428 			exit(4);
429 			break;
430 		default:
431 			fprintf(stderr, "bound check error: status 0x%jx at %p\n",
432 				status, (void *)ip);
433 			num_bnd_chk++;
434 			uctxt->uc_mcontext.gregs[REG_IP_IDX] =
435 				(greg_t)get_next_inst_ip((uint8_t *)ip);
436 			fprintf(stderr, "bound check error: si_addr %p\n", si->si_addr);
437 			exit(3);
438 		}
439 	} else if (trapno == 14) {
440 		eprintf("ERROR: In signal handler, page fault, trapno = %d, ip = %016lx\n",
441 			trapno, ip);
442 		eprintf("si_addr %p\n", si->si_addr);
443 		eprintf("REG_ERR: %lx\n", (unsigned long)uctxt->uc_mcontext.gregs[REG_ERR]);
444 		test_failed();
445 	} else {
446 		eprintf("unexpected trap %d! at 0x%lx\n", trapno, ip);
447 		eprintf("si_addr %p\n", si->si_addr);
448 		eprintf("REG_ERR: %lx\n", (unsigned long)uctxt->uc_mcontext.gregs[REG_ERR]);
449 		test_failed();
450 	}
451 }
452 
cpuid_count(unsigned int op,int count,unsigned int * eax,unsigned int * ebx,unsigned int * ecx,unsigned int * edx)453 static inline void cpuid_count(unsigned int op, int count,
454 			       unsigned int *eax, unsigned int *ebx,
455 			       unsigned int *ecx, unsigned int *edx)
456 {
457 	*eax = op;
458 	*ecx = count;
459 	__cpuid(eax, ebx, ecx, edx);
460 }
461 
462 #define XSTATE_CPUID	    0x0000000d
463 
464 /*
465  * List of XSAVE features Linux knows about:
466  */
467 enum xfeature_bit {
468 	XSTATE_BIT_FP,
469 	XSTATE_BIT_SSE,
470 	XSTATE_BIT_YMM,
471 	XSTATE_BIT_BNDREGS,
472 	XSTATE_BIT_BNDCSR,
473 	XSTATE_BIT_OPMASK,
474 	XSTATE_BIT_ZMM_Hi256,
475 	XSTATE_BIT_Hi16_ZMM,
476 
477 	XFEATURES_NR_MAX,
478 };
479 
480 #define XSTATE_FP	       (1 << XSTATE_BIT_FP)
481 #define XSTATE_SSE	      (1 << XSTATE_BIT_SSE)
482 #define XSTATE_YMM	      (1 << XSTATE_BIT_YMM)
483 #define XSTATE_BNDREGS	  (1 << XSTATE_BIT_BNDREGS)
484 #define XSTATE_BNDCSR	   (1 << XSTATE_BIT_BNDCSR)
485 #define XSTATE_OPMASK	   (1 << XSTATE_BIT_OPMASK)
486 #define XSTATE_ZMM_Hi256	(1 << XSTATE_BIT_ZMM_Hi256)
487 #define XSTATE_Hi16_ZMM	 (1 << XSTATE_BIT_Hi16_ZMM)
488 
489 #define MPX_XSTATES		(XSTATE_BNDREGS | XSTATE_BNDCSR) /* 0x18 */
490 
one_bit(unsigned int x,int bit)491 bool one_bit(unsigned int x, int bit)
492 {
493 	return !!(x & (1<<bit));
494 }
495 
print_state_component(int state_bit_nr,char * name)496 void print_state_component(int state_bit_nr, char *name)
497 {
498 	unsigned int eax, ebx, ecx, edx;
499 	unsigned int state_component_size;
500 	unsigned int state_component_supervisor;
501 	unsigned int state_component_user;
502 	unsigned int state_component_aligned;
503 
504 	/* See SDM Section 13.2 */
505 	cpuid_count(XSTATE_CPUID, state_bit_nr, &eax, &ebx, &ecx, &edx);
506 	assert(eax || ebx || ecx);
507 	state_component_size = eax;
508 	state_component_supervisor = ((!ebx) && one_bit(ecx, 0));
509 	state_component_user = !one_bit(ecx, 0);
510 	state_component_aligned = one_bit(ecx, 1);
511 	printf("%8s: size: %d user: %d supervisor: %d aligned: %d\n",
512 		name,
513 		state_component_size,	    state_component_user,
514 		state_component_supervisor, state_component_aligned);
515 
516 }
517 
518 /* Intel-defined CPU features, CPUID level 0x00000001 (ecx) */
519 #define XSAVE_FEATURE_BIT       (26)  /* XSAVE/XRSTOR/XSETBV/XGETBV */
520 #define OSXSAVE_FEATURE_BIT     (27) /* XSAVE enabled in the OS */
521 
check_mpx_support(void)522 bool check_mpx_support(void)
523 {
524 	unsigned int eax, ebx, ecx, edx;
525 
526 	cpuid_count(1, 0, &eax, &ebx, &ecx, &edx);
527 
528 	/* We can't do much without XSAVE, so just make these assert()'s */
529 	if (!one_bit(ecx, XSAVE_FEATURE_BIT)) {
530 		fprintf(stderr, "processor lacks XSAVE, can not run MPX tests\n");
531 		exit(0);
532 	}
533 
534 	if (!one_bit(ecx, OSXSAVE_FEATURE_BIT)) {
535 		fprintf(stderr, "processor lacks OSXSAVE, can not run MPX tests\n");
536 		exit(0);
537 	}
538 
539 	/* CPUs not supporting the XSTATE CPUID leaf do not support MPX */
540 	/* Is this redundant with the feature bit checks? */
541 	cpuid_count(0, 0, &eax, &ebx, &ecx, &edx);
542 	if (eax < XSTATE_CPUID) {
543 		fprintf(stderr, "processor lacks XSTATE CPUID leaf,"
544 				" can not run MPX tests\n");
545 		exit(0);
546 	}
547 
548 	printf("XSAVE is supported by HW & OS\n");
549 
550 	cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
551 
552 	printf("XSAVE processor supported state mask: 0x%x\n", eax);
553 	printf("XSAVE OS supported state mask: 0x%jx\n", xgetbv(0));
554 
555 	/* Make sure that the MPX states are enabled in in XCR0 */
556 	if ((eax & MPX_XSTATES) != MPX_XSTATES) {
557 		fprintf(stderr, "processor lacks MPX XSTATE(s), can not run MPX tests\n");
558 		exit(0);
559 	}
560 
561 	/* Make sure the MPX states are supported by XSAVE* */
562 	if ((xgetbv(0) & MPX_XSTATES) != MPX_XSTATES) {
563 		fprintf(stderr, "MPX XSTATE(s) no enabled in XCR0, "
564 				"can not run MPX tests\n");
565 		exit(0);
566 	}
567 
568 	print_state_component(XSTATE_BIT_BNDREGS, "BNDREGS");
569 	print_state_component(XSTATE_BIT_BNDCSR,  "BNDCSR");
570 
571 	return true;
572 }
573 
enable_mpx(void * l1base)574 void enable_mpx(void *l1base)
575 {
576 	/* enable point lookup */
577 	memset(buffer, 0, sizeof(buffer));
578 	xrstor_state(xsave_buf, 0x18);
579 
580 	xsave_buf->xsave_hdr.xstate_bv = 0x10;
581 	xsave_buf->bndcsr.cfg_reg_u = (unsigned long)l1base | 1;
582 	xsave_buf->bndcsr.status_reg = 0;
583 
584 	dprintf2("bf xrstor\n");
585 	dprintf2("xsave cndcsr: status %jx, configu %jx\n",
586 	       xsave_buf->bndcsr.status_reg, xsave_buf->bndcsr.cfg_reg_u);
587 	xrstor_state(xsave_buf, 0x18);
588 	dprintf2("after xrstor\n");
589 
590 	xsave_state_1(xsave_buf, 0x18);
591 
592 	dprintf1("xsave bndcsr: status %jx, configu %jx\n",
593 	       xsave_buf->bndcsr.status_reg, xsave_buf->bndcsr.cfg_reg_u);
594 }
595 
596 #include <sys/prctl.h>
597 
598 struct mpx_bounds_dir *bounds_dir_ptr;
599 
__bd_incore(const char * func,int line)600 unsigned long __bd_incore(const char *func, int line)
601 {
602 	unsigned long ret = nr_incore(bounds_dir_ptr, MPX_BOUNDS_DIR_SIZE_BYTES);
603 	return ret;
604 }
605 #define bd_incore() __bd_incore(__func__, __LINE__)
606 
check_clear(void * ptr,unsigned long sz)607 void check_clear(void *ptr, unsigned long sz)
608 {
609 	unsigned long *i;
610 
611 	for (i = ptr; (void *)i < ptr + sz; i++) {
612 		if (*i) {
613 			dprintf1("%p is NOT clear at %p\n", ptr, i);
614 			assert(0);
615 		}
616 	}
617 	dprintf1("%p is clear for %lx\n", ptr, sz);
618 }
619 
check_clear_bd(void)620 void check_clear_bd(void)
621 {
622 	check_clear(bounds_dir_ptr, 2UL << 30);
623 }
624 
625 #define USE_MALLOC_FOR_BOUNDS_DIR 1
process_specific_init(void)626 bool process_specific_init(void)
627 {
628 	unsigned long size;
629 	unsigned long *dir;
630 	/* Guarantee we have the space to align it, add padding: */
631 	unsigned long pad = getpagesize();
632 
633 	size = 2UL << 30; /* 2GB */
634 	if (sizeof(unsigned long) == 4)
635 		size = 4UL << 20; /* 4MB */
636 	dprintf1("trying to allocate %ld MB bounds directory\n", (size >> 20));
637 
638 	if (USE_MALLOC_FOR_BOUNDS_DIR) {
639 		unsigned long _dir;
640 
641 		dir = malloc(size + pad);
642 		assert(dir);
643 		_dir = (unsigned long)dir;
644 		_dir += 0xfffUL;
645 		_dir &= ~0xfffUL;
646 		dir = (void *)_dir;
647 	} else {
648 		/*
649 		 * This makes debugging easier because the address
650 		 * calculations are simpler:
651 		 */
652 		dir = mmap((void *)0x200000000000, size + pad,
653 				PROT_READ|PROT_WRITE,
654 				MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
655 		if (dir == (void *)-1) {
656 			perror("unable to allocate bounds directory");
657 			abort();
658 		}
659 		check_clear(dir, size);
660 	}
661 	bounds_dir_ptr = (void *)dir;
662 	madvise(bounds_dir_ptr, size, MADV_NOHUGEPAGE);
663 	bd_incore();
664 	dprintf1("bounds directory: 0x%p -> 0x%p\n", bounds_dir_ptr,
665 			(char *)bounds_dir_ptr + size);
666 	check_clear(dir, size);
667 	enable_mpx(dir);
668 	check_clear(dir, size);
669 	if (prctl(43, 0, 0, 0, 0)) {
670 		printf("no MPX support\n");
671 		abort();
672 		return false;
673 	}
674 	return true;
675 }
676 
process_specific_finish(void)677 bool process_specific_finish(void)
678 {
679 	if (prctl(44)) {
680 		printf("no MPX support\n");
681 		return false;
682 	}
683 	return true;
684 }
685 
setup_handler()686 void setup_handler()
687 {
688 	int r, rs;
689 	struct sigaction newact;
690 	struct sigaction oldact;
691 
692 	/* #BR is mapped to sigsegv */
693 	int signum  = SIGSEGV;
694 
695 	newact.sa_handler = 0;   /* void(*)(int)*/
696 	newact.sa_sigaction = handler; /* void (*)(int, siginfo_t*, void *) */
697 
698 	/*sigset_t - signals to block while in the handler */
699 	/* get the old signal mask. */
700 	rs = sigprocmask(SIG_SETMASK, 0, &newact.sa_mask);
701 	assert(rs == 0);
702 
703 	/* call sa_sigaction, not sa_handler*/
704 	newact.sa_flags = SA_SIGINFO;
705 
706 	newact.sa_restorer = 0;  /* void(*)(), obsolete */
707 	r = sigaction(signum, &newact, &oldact);
708 	assert(r == 0);
709 }
710 
mpx_prepare(void)711 void mpx_prepare(void)
712 {
713 	dprintf2("%s()\n", __func__);
714 	setup_handler();
715 	process_specific_init();
716 }
717 
mpx_cleanup(void)718 void mpx_cleanup(void)
719 {
720 	printf("%s(): %jd BRs. bye...\n", __func__, num_bnd_chk);
721 	process_specific_finish();
722 }
723 
724 /*-------------- the following is test case ---------------*/
725 #include <stdint.h>
726 #include <stdbool.h>
727 #include <stdlib.h>
728 #include <stdio.h>
729 #include <time.h>
730 
731 uint64_t num_lower_brs;
732 uint64_t num_upper_brs;
733 
734 #define MPX_CONFIG_OFFSET 1024
735 #define MPX_BOUNDS_OFFSET 960
736 #define MPX_HEADER_OFFSET 512
737 #define MAX_ADDR_TESTED (1<<28)
738 #define TEST_ROUNDS 100
739 
740 /*
741       0F 1A /r BNDLDX-Load
742       0F 1B /r BNDSTX-Store Extended Bounds Using Address Translation
743    66 0F 1A /r BNDMOV bnd1, bnd2/m128
744    66 0F 1B /r BNDMOV bnd1/m128, bnd2
745    F2 0F 1A /r BNDCU bnd, r/m64
746    F2 0F 1B /r BNDCN bnd, r/m64
747    F3 0F 1A /r BNDCL bnd, r/m64
748    F3 0F 1B /r BNDMK bnd, m64
749 */
750 
xsave_state(void * _fx,uint64_t mask)751 static __always_inline void xsave_state(void *_fx, uint64_t mask)
752 {
753 	uint32_t lmask = mask;
754 	uint32_t hmask = mask >> 32;
755 	unsigned char *fx = _fx;
756 
757 	asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x27\n\t"
758 		     : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
759 		     :   "memory");
760 }
761 
mpx_clear_bnd0(void)762 static __always_inline void mpx_clear_bnd0(void)
763 {
764 	long size = 0;
765 	void *ptr = NULL;
766 	/* F3 0F 1B /r BNDMK bnd, m64			*/
767 	/* f3 0f 1b 04 11    bndmk  (%rcx,%rdx,1),%bnd0	*/
768 	asm volatile(".byte 0xf3,0x0f,0x1b,0x04,0x11\n\t"
769 		     : : "c" (ptr), "d" (size-1)
770 		     :   "memory");
771 }
772 
mpx_make_bound_helper(unsigned long ptr,unsigned long size)773 static __always_inline void mpx_make_bound_helper(unsigned long ptr,
774 		unsigned long size)
775 {
776 	/* F3 0F 1B /r		BNDMK bnd, m64			*/
777 	/* f3 0f 1b 04 11       bndmk  (%rcx,%rdx,1),%bnd0	*/
778 	asm volatile(".byte 0xf3,0x0f,0x1b,0x04,0x11\n\t"
779 		     : : "c" (ptr), "d" (size-1)
780 		     :   "memory");
781 }
782 
mpx_check_lowerbound_helper(unsigned long ptr)783 static __always_inline void mpx_check_lowerbound_helper(unsigned long ptr)
784 {
785 	/* F3 0F 1A /r	NDCL bnd, r/m64			*/
786 	/* f3 0f 1a 01	bndcl  (%rcx),%bnd0		*/
787 	asm volatile(".byte 0xf3,0x0f,0x1a,0x01\n\t"
788 		     : : "c" (ptr)
789 		     :   "memory");
790 }
791 
mpx_check_upperbound_helper(unsigned long ptr)792 static __always_inline void mpx_check_upperbound_helper(unsigned long ptr)
793 {
794 	/* F2 0F 1A /r	BNDCU bnd, r/m64	*/
795 	/* f2 0f 1a 01	bndcu  (%rcx),%bnd0	*/
796 	asm volatile(".byte 0xf2,0x0f,0x1a,0x01\n\t"
797 		     : : "c" (ptr)
798 		     :   "memory");
799 }
800 
mpx_movbndreg_helper()801 static __always_inline void mpx_movbndreg_helper()
802 {
803 	/* 66 0F 1B /r	BNDMOV bnd1/m128, bnd2	*/
804 	/* 66 0f 1b c2	bndmov %bnd0,%bnd2	*/
805 
806 	asm volatile(".byte 0x66,0x0f,0x1b,0xc2\n\t");
807 }
808 
mpx_movbnd2mem_helper(uint8_t * mem)809 static __always_inline void mpx_movbnd2mem_helper(uint8_t *mem)
810 {
811 	/* 66 0F 1B /r	BNDMOV bnd1/m128, bnd2	*/
812 	/* 66 0f 1b 01	bndmov %bnd0,(%rcx)	*/
813 	asm volatile(".byte 0x66,0x0f,0x1b,0x01\n\t"
814 		     : : "c" (mem)
815 		     :   "memory");
816 }
817 
mpx_movbnd_from_mem_helper(uint8_t * mem)818 static __always_inline void mpx_movbnd_from_mem_helper(uint8_t *mem)
819 {
820 	/* 66 0F 1A /r	BNDMOV bnd1, bnd2/m128	*/
821 	/* 66 0f 1a 01	bndmov (%rcx),%bnd0	*/
822 	asm volatile(".byte 0x66,0x0f,0x1a,0x01\n\t"
823 		     : : "c" (mem)
824 		     :   "memory");
825 }
826 
mpx_store_dsc_helper(unsigned long ptr_addr,unsigned long ptr_val)827 static __always_inline void mpx_store_dsc_helper(unsigned long ptr_addr,
828 		unsigned long ptr_val)
829 {
830 	/* 0F 1B /r	BNDSTX-Store Extended Bounds Using Address Translation	*/
831 	/* 0f 1b 04 11	bndstx %bnd0,(%rcx,%rdx,1)				*/
832 	asm volatile(".byte 0x0f,0x1b,0x04,0x11\n\t"
833 		     : : "c" (ptr_addr), "d" (ptr_val)
834 		     :   "memory");
835 }
836 
mpx_load_dsc_helper(unsigned long ptr_addr,unsigned long ptr_val)837 static __always_inline void mpx_load_dsc_helper(unsigned long ptr_addr,
838 		unsigned long ptr_val)
839 {
840 	/* 0F 1A /r	BNDLDX-Load			*/
841 	/*/ 0f 1a 04 11	bndldx (%rcx,%rdx,1),%bnd0	*/
842 	asm volatile(".byte 0x0f,0x1a,0x04,0x11\n\t"
843 		     : : "c" (ptr_addr), "d" (ptr_val)
844 		     :   "memory");
845 }
846 
__print_context(void * __print_xsave_buffer,int line)847 void __print_context(void *__print_xsave_buffer, int line)
848 {
849 	uint64_t *bounds = (uint64_t *)(__print_xsave_buffer + MPX_BOUNDS_OFFSET);
850 	uint64_t *cfg    = (uint64_t *)(__print_xsave_buffer + MPX_CONFIG_OFFSET);
851 
852 	int i;
853 	eprintf("%s()::%d\n", "print_context", line);
854 	for (i = 0; i < 4; i++) {
855 		eprintf("bound[%d]: 0x%016lx 0x%016lx(0x%016lx)\n", i,
856 		       (unsigned long)bounds[i*2],
857 		       ~(unsigned long)bounds[i*2+1],
858 			(unsigned long)bounds[i*2+1]);
859 	}
860 
861 	eprintf("cpcfg: %jx  cpstatus: %jx\n", cfg[0], cfg[1]);
862 }
863 #define print_context(x) __print_context(x, __LINE__)
864 #ifdef DEBUG
865 #define dprint_context(x) print_context(x)
866 #else
867 #define dprint_context(x) do{}while(0)
868 #endif
869 
init()870 void init()
871 {
872 	int i;
873 
874 	srand((unsigned int)time(NULL));
875 
876 	for (i = 0; i < 4; i++) {
877 		shadow_plb[i][0] = 0;
878 		shadow_plb[i][1] = ~(unsigned long)0;
879 	}
880 }
881 
__mpx_random(int line)882 long int __mpx_random(int line)
883 {
884 #ifdef NOT_SO_RANDOM
885 	static long fake = 722122311;
886 	fake += 563792075;
887 	return fakse;
888 #else
889 	return random();
890 #endif
891 }
892 #define mpx_random() __mpx_random(__LINE__)
893 
get_random_addr()894 uint8_t *get_random_addr()
895 {
896 	uint8_t*addr = (uint8_t *)(unsigned long)(rand() % MAX_ADDR_TESTED);
897 	return (addr - (unsigned long)addr % sizeof(uint8_t *));
898 }
899 
compare_context(void * __xsave_buffer)900 static inline bool compare_context(void *__xsave_buffer)
901 {
902 	uint64_t *bounds = (uint64_t *)(__xsave_buffer + MPX_BOUNDS_OFFSET);
903 
904 	int i;
905 	for (i = 0; i < 4; i++) {
906 		dprintf3("shadow[%d]{%016lx/%016lx}\nbounds[%d]{%016lx/%016lx}\n",
907 		       i, (unsigned long)shadow_plb[i][0], (unsigned long)shadow_plb[i][1],
908 		       i, (unsigned long)bounds[i*2],     ~(unsigned long)bounds[i*2+1]);
909 		if ((shadow_plb[i][0] != bounds[i*2]) ||
910 		    (shadow_plb[i][1] != ~(unsigned long)bounds[i*2+1])) {
911 			eprintf("ERROR comparing shadow to real bound register %d\n", i);
912 			eprintf("shadow{0x%016lx/0x%016lx}\nbounds{0x%016lx/0x%016lx}\n",
913 			       (unsigned long)shadow_plb[i][0], (unsigned long)shadow_plb[i][1],
914 			       (unsigned long)bounds[i*2], (unsigned long)bounds[i*2+1]);
915 			return false;
916 		}
917 	}
918 
919 	return true;
920 }
921 
mkbnd_shadow(uint8_t * ptr,int index,long offset)922 void mkbnd_shadow(uint8_t *ptr, int index, long offset)
923 {
924 	uint64_t *lower = (uint64_t *)&(shadow_plb[index][0]);
925 	uint64_t *upper = (uint64_t *)&(shadow_plb[index][1]);
926 	*lower = (unsigned long)ptr;
927 	*upper = (unsigned long)ptr + offset - 1;
928 }
929 
check_lowerbound_shadow(uint8_t * ptr,int index)930 void check_lowerbound_shadow(uint8_t *ptr, int index)
931 {
932 	uint64_t *lower = (uint64_t *)&(shadow_plb[index][0]);
933 	if (*lower > (uint64_t)(unsigned long)ptr)
934 		num_lower_brs++;
935 	else
936 		dprintf1("LowerBoundChk passed:%p\n", ptr);
937 }
938 
check_upperbound_shadow(uint8_t * ptr,int index)939 void check_upperbound_shadow(uint8_t *ptr, int index)
940 {
941 	uint64_t upper = *(uint64_t *)&(shadow_plb[index][1]);
942 	if (upper < (uint64_t)(unsigned long)ptr)
943 		num_upper_brs++;
944 	else
945 		dprintf1("UpperBoundChk passed:%p\n", ptr);
946 }
947 
movbndreg_shadow(int src,int dest)948 __always_inline void movbndreg_shadow(int src, int dest)
949 {
950 	shadow_plb[dest][0] = shadow_plb[src][0];
951 	shadow_plb[dest][1] = shadow_plb[src][1];
952 }
953 
movbnd2mem_shadow(int src,unsigned long * dest)954 __always_inline void movbnd2mem_shadow(int src, unsigned long *dest)
955 {
956 	unsigned long *lower = (unsigned long *)&(shadow_plb[src][0]);
957 	unsigned long *upper = (unsigned long *)&(shadow_plb[src][1]);
958 	*dest = *lower;
959 	*(dest+1) = *upper;
960 }
961 
movbnd_from_mem_shadow(unsigned long * src,int dest)962 __always_inline void movbnd_from_mem_shadow(unsigned long *src, int dest)
963 {
964 	unsigned long *lower = (unsigned long *)&(shadow_plb[dest][0]);
965 	unsigned long *upper = (unsigned long *)&(shadow_plb[dest][1]);
966 	*lower = *src;
967 	*upper = *(src+1);
968 }
969 
stdsc_shadow(int index,uint8_t * ptr,uint8_t * ptr_val)970 __always_inline void stdsc_shadow(int index, uint8_t *ptr, uint8_t *ptr_val)
971 {
972 	shadow_map[0] = (unsigned long)shadow_plb[index][0];
973 	shadow_map[1] = (unsigned long)shadow_plb[index][1];
974 	shadow_map[2] = (unsigned long)ptr_val;
975 	dprintf3("%s(%d, %p, %p) set shadow map[2]: %p\n", __func__,
976 			index, ptr, ptr_val, ptr_val);
977 	/*ptr ignored */
978 }
979 
lddsc_shadow(int index,uint8_t * ptr,uint8_t * ptr_val)980 void lddsc_shadow(int index, uint8_t *ptr, uint8_t *ptr_val)
981 {
982 	uint64_t lower = shadow_map[0];
983 	uint64_t upper = shadow_map[1];
984 	uint8_t *value = (uint8_t *)shadow_map[2];
985 
986 	if (value != ptr_val) {
987 		dprintf2("%s(%d, %p, %p) init shadow bounds[%d] "
988 			 "because %p != %p\n", __func__, index, ptr,
989 			 ptr_val, index, value, ptr_val);
990 		shadow_plb[index][0] = 0;
991 		shadow_plb[index][1] = ~(unsigned long)0;
992 	} else {
993 		shadow_plb[index][0] = lower;
994 		shadow_plb[index][1] = upper;
995 	}
996 	/* ptr ignored */
997 }
998 
mpx_test_helper0(uint8_t * buf,uint8_t * ptr)999 static __always_inline void mpx_test_helper0(uint8_t *buf, uint8_t *ptr)
1000 {
1001 	mpx_make_bound_helper((unsigned long)ptr, 0x1800);
1002 }
1003 
mpx_test_helper0_shadow(uint8_t * buf,uint8_t * ptr)1004 static __always_inline void mpx_test_helper0_shadow(uint8_t *buf, uint8_t *ptr)
1005 {
1006 	mkbnd_shadow(ptr, 0, 0x1800);
1007 }
1008 
mpx_test_helper1(uint8_t * buf,uint8_t * ptr)1009 static __always_inline void mpx_test_helper1(uint8_t *buf, uint8_t *ptr)
1010 {
1011 	/* these are hard-coded to check bnd0 */
1012 	expected_bnd_index = 0;
1013 	mpx_check_lowerbound_helper((unsigned long)(ptr-1));
1014 	mpx_check_upperbound_helper((unsigned long)(ptr+0x1800));
1015 	/* reset this since we do not expect any more bounds exceptions */
1016 	expected_bnd_index = -1;
1017 }
1018 
mpx_test_helper1_shadow(uint8_t * buf,uint8_t * ptr)1019 static __always_inline void mpx_test_helper1_shadow(uint8_t *buf, uint8_t *ptr)
1020 {
1021 	check_lowerbound_shadow(ptr-1, 0);
1022 	check_upperbound_shadow(ptr+0x1800, 0);
1023 }
1024 
mpx_test_helper2(uint8_t * buf,uint8_t * ptr)1025 static __always_inline void mpx_test_helper2(uint8_t *buf, uint8_t *ptr)
1026 {
1027 	mpx_make_bound_helper((unsigned long)ptr, 0x1800);
1028 	mpx_movbndreg_helper();
1029 	mpx_movbnd2mem_helper(buf);
1030 	mpx_make_bound_helper((unsigned long)(ptr+0x12), 0x1800);
1031 }
1032 
mpx_test_helper2_shadow(uint8_t * buf,uint8_t * ptr)1033 static __always_inline void mpx_test_helper2_shadow(uint8_t *buf, uint8_t *ptr)
1034 {
1035 	mkbnd_shadow(ptr, 0, 0x1800);
1036 	movbndreg_shadow(0, 2);
1037 	movbnd2mem_shadow(0, (unsigned long *)buf);
1038 	mkbnd_shadow(ptr+0x12, 0, 0x1800);
1039 }
1040 
mpx_test_helper3(uint8_t * buf,uint8_t * ptr)1041 static __always_inline void mpx_test_helper3(uint8_t *buf, uint8_t *ptr)
1042 {
1043 	mpx_movbnd_from_mem_helper(buf);
1044 }
1045 
mpx_test_helper3_shadow(uint8_t * buf,uint8_t * ptr)1046 static __always_inline void mpx_test_helper3_shadow(uint8_t *buf, uint8_t *ptr)
1047 {
1048 	movbnd_from_mem_shadow((unsigned long *)buf, 0);
1049 }
1050 
mpx_test_helper4(uint8_t * buf,uint8_t * ptr)1051 static __always_inline void mpx_test_helper4(uint8_t *buf, uint8_t *ptr)
1052 {
1053 	mpx_store_dsc_helper((unsigned long)buf, (unsigned long)ptr);
1054 	mpx_make_bound_helper((unsigned long)(ptr+0x12), 0x1800);
1055 }
1056 
mpx_test_helper4_shadow(uint8_t * buf,uint8_t * ptr)1057 static __always_inline void mpx_test_helper4_shadow(uint8_t *buf, uint8_t *ptr)
1058 {
1059 	stdsc_shadow(0, buf, ptr);
1060 	mkbnd_shadow(ptr+0x12, 0, 0x1800);
1061 }
1062 
mpx_test_helper5(uint8_t * buf,uint8_t * ptr)1063 static __always_inline void mpx_test_helper5(uint8_t *buf, uint8_t *ptr)
1064 {
1065 	mpx_load_dsc_helper((unsigned long)buf, (unsigned long)ptr);
1066 }
1067 
mpx_test_helper5_shadow(uint8_t * buf,uint8_t * ptr)1068 static __always_inline void mpx_test_helper5_shadow(uint8_t *buf, uint8_t *ptr)
1069 {
1070 	lddsc_shadow(0, buf, ptr);
1071 }
1072 
1073 #define NR_MPX_TEST_FUNCTIONS 6
1074 
1075 /*
1076  * For compatibility reasons, MPX will clear the bounds registers
1077  * when you make function calls (among other things).  We have to
1078  * preserve the registers in between calls to the "helpers" since
1079  * they build on each other.
1080  *
1081  * Be very careful not to make any function calls inside the
1082  * helpers, or anywhere else beween the xrstor and xsave.
1083  */
1084 #define run_helper(helper_nr, buf, buf_shadow, ptr)	do {	\
1085 	xrstor_state(xsave_test_buf, flags);			\
1086 	mpx_test_helper##helper_nr(buf, ptr);			\
1087 	xsave_state(xsave_test_buf, flags);			\
1088 	mpx_test_helper##helper_nr##_shadow(buf_shadow, ptr);	\
1089 } while (0)
1090 
run_helpers(int nr,uint8_t * buf,uint8_t * buf_shadow,uint8_t * ptr)1091 static void run_helpers(int nr, uint8_t *buf, uint8_t *buf_shadow, uint8_t *ptr)
1092 {
1093 	uint64_t flags = 0x18;
1094 
1095 	dprint_context(xsave_test_buf);
1096 	switch (nr) {
1097 	case 0:
1098 		run_helper(0, buf, buf_shadow, ptr);
1099 		break;
1100 	case 1:
1101 		run_helper(1, buf, buf_shadow, ptr);
1102 		break;
1103 	case 2:
1104 		run_helper(2, buf, buf_shadow, ptr);
1105 		break;
1106 	case 3:
1107 		run_helper(3, buf, buf_shadow, ptr);
1108 		break;
1109 	case 4:
1110 		run_helper(4, buf, buf_shadow, ptr);
1111 		break;
1112 	case 5:
1113 		run_helper(5, buf, buf_shadow, ptr);
1114 		break;
1115 	default:
1116 		test_failed();
1117 		break;
1118 	}
1119 	dprint_context(xsave_test_buf);
1120 }
1121 
1122 unsigned long buf_shadow[1024]; /* used to check load / store descriptors */
1123 extern long inspect_me(struct mpx_bounds_dir *bounds_dir);
1124 
cover_buf_with_bt_entries(void * buf,long buf_len)1125 long cover_buf_with_bt_entries(void *buf, long buf_len)
1126 {
1127 	int i;
1128 	long nr_to_fill;
1129 	int ratio = 1000;
1130 	unsigned long buf_len_in_ptrs;
1131 
1132 	/* Fill about 1/100 of the space with bt entries */
1133 	nr_to_fill = buf_len / (sizeof(unsigned long) * ratio);
1134 
1135 	if (!nr_to_fill)
1136 		dprintf3("%s() nr_to_fill: %ld\n", __func__, nr_to_fill);
1137 
1138 	/* Align the buffer to pointer size */
1139 	while (((unsigned long)buf) % sizeof(void *)) {
1140 		buf++;
1141 		buf_len--;
1142 	}
1143 	/* We are storing pointers, so make */
1144 	buf_len_in_ptrs = buf_len / sizeof(void *);
1145 
1146 	for (i = 0; i < nr_to_fill; i++) {
1147 		long index = (mpx_random() % buf_len_in_ptrs);
1148 		void *ptr = buf + index * sizeof(unsigned long);
1149 		unsigned long ptr_addr = (unsigned long)ptr;
1150 
1151 		/* ptr and size can be anything */
1152 		mpx_make_bound_helper((unsigned long)ptr, 8);
1153 
1154 		/*
1155 		 * take bnd0 and put it in to bounds tables "buf + index" is an
1156 		 * address inside the buffer where we are pretending that we
1157 		 * are going to put a pointer We do not, though because we will
1158 		 * never load entries from the table, so it doesn't matter.
1159 		 */
1160 		mpx_store_dsc_helper(ptr_addr, (unsigned long)ptr);
1161 		dprintf4("storing bound table entry for %lx (buf start @ %p)\n",
1162 				ptr_addr, buf);
1163 	}
1164 	return nr_to_fill;
1165 }
1166 
align_down(unsigned long alignme,unsigned long align_to)1167 unsigned long align_down(unsigned long alignme, unsigned long align_to)
1168 {
1169 	return alignme & ~(align_to-1);
1170 }
1171 
align_up(unsigned long alignme,unsigned long align_to)1172 unsigned long align_up(unsigned long alignme, unsigned long align_to)
1173 {
1174 	return (alignme + align_to - 1) & ~(align_to-1);
1175 }
1176 
1177 /*
1178  * Using 1MB alignment guarantees that each no allocation
1179  * will overlap with another's bounds tables.
1180  *
1181  * We have to cook our own allocator here.  malloc() can
1182  * mix other allocation with ours which means that even
1183  * if we free all of our allocations, there might still
1184  * be bounds tables for the *areas* since there is other
1185  * valid memory there.
1186  *
1187  * We also can't use malloc() because a free() of an area
1188  * might not free it back to the kernel.  We want it
1189  * completely unmapped an malloc() does not guarantee
1190  * that.
1191  */
1192 #ifdef __i386__
1193 long alignment = 4096;
1194 long sz_alignment = 4096;
1195 #else
1196 long alignment = 1 * MB;
1197 long sz_alignment = 1 * MB;
1198 #endif
mpx_mini_alloc(unsigned long sz)1199 void *mpx_mini_alloc(unsigned long sz)
1200 {
1201 	unsigned long long tries = 0;
1202 	static void *last;
1203 	void *ptr;
1204 	void *try_at;
1205 
1206 	sz = align_up(sz, sz_alignment);
1207 
1208 	try_at = last + alignment;
1209 	while (1) {
1210 		ptr = mmap(try_at, sz, PROT_READ|PROT_WRITE,
1211 				MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
1212 		if (ptr == (void *)-1)
1213 			return NULL;
1214 		if (ptr == try_at)
1215 			break;
1216 
1217 		munmap(ptr, sz);
1218 		try_at += alignment;
1219 #ifdef __i386__
1220 		/*
1221 		 * This isn't quite correct for 32-bit binaries
1222 		 * on 64-bit kernels since they can use the
1223 		 * entire 32-bit address space, but it's close
1224 		 * enough.
1225 		 */
1226 		if (try_at > (void *)0xC0000000)
1227 #else
1228 		if (try_at > (void *)0x0000800000000000)
1229 #endif
1230 			try_at = (void *)0x0;
1231 		if (!(++tries % 10000))
1232 			dprintf1("stuck in %s(), tries: %lld\n", __func__, tries);
1233 		continue;
1234 	}
1235 	last = ptr;
1236 	dprintf3("mpx_mini_alloc(0x%lx) returning: %p\n", sz, ptr);
1237 	return ptr;
1238 }
mpx_mini_free(void * ptr,long sz)1239 void mpx_mini_free(void *ptr, long sz)
1240 {
1241 	dprintf2("%s() ptr: %p\n", __func__, ptr);
1242 	if ((unsigned long)ptr > 0x100000000000) {
1243 		dprintf1("uh oh !!!!!!!!!!!!!!! pointer too high: %p\n", ptr);
1244 		test_failed();
1245 	}
1246 	sz = align_up(sz, sz_alignment);
1247 	dprintf3("%s() ptr: %p before munmap\n", __func__, ptr);
1248 	munmap(ptr, sz);
1249 	dprintf3("%s() ptr: %p DONE\n", __func__, ptr);
1250 }
1251 
1252 #define NR_MALLOCS 100
1253 struct one_malloc {
1254 	char *ptr;
1255 	int nr_filled_btes;
1256 	unsigned long size;
1257 };
1258 struct one_malloc mallocs[NR_MALLOCS];
1259 
free_one_malloc(int index)1260 void free_one_malloc(int index)
1261 {
1262 	unsigned long free_ptr;
1263 	unsigned long mask;
1264 
1265 	if (!mallocs[index].ptr)
1266 		return;
1267 
1268 	mpx_mini_free(mallocs[index].ptr, mallocs[index].size);
1269 	dprintf4("freed[%d]:  %p\n", index, mallocs[index].ptr);
1270 
1271 	free_ptr = (unsigned long)mallocs[index].ptr;
1272 	mask = alignment-1;
1273 	dprintf4("lowerbits: %lx / %lx mask: %lx\n", free_ptr,
1274 			(free_ptr & mask), mask);
1275 	assert((free_ptr & mask) == 0);
1276 
1277 	mallocs[index].ptr = NULL;
1278 }
1279 
1280 #ifdef __i386__
1281 #define MPX_BOUNDS_TABLE_COVERS 4096
1282 #else
1283 #define MPX_BOUNDS_TABLE_COVERS (1 * MB)
1284 #endif
zap_everything(void)1285 void zap_everything(void)
1286 {
1287 	long after_zap;
1288 	long before_zap;
1289 	int i;
1290 
1291 	before_zap = inspect_me(bounds_dir_ptr);
1292 	dprintf1("zapping everything start: %ld\n", before_zap);
1293 	for (i = 0; i < NR_MALLOCS; i++)
1294 		free_one_malloc(i);
1295 
1296 	after_zap = inspect_me(bounds_dir_ptr);
1297 	dprintf1("zapping everything done: %ld\n", after_zap);
1298 	/*
1299 	 * We only guarantee to empty the thing out if our allocations are
1300 	 * exactly aligned on the boundaries of a boudns table.
1301 	 */
1302 	if ((alignment >= MPX_BOUNDS_TABLE_COVERS) &&
1303 	    (sz_alignment >= MPX_BOUNDS_TABLE_COVERS)) {
1304 		if (after_zap != 0)
1305 			test_failed();
1306 
1307 		assert(after_zap == 0);
1308 	}
1309 }
1310 
do_one_malloc(void)1311 void do_one_malloc(void)
1312 {
1313 	static int malloc_counter;
1314 	long sz;
1315 	int rand_index = (mpx_random() % NR_MALLOCS);
1316 	void *ptr = mallocs[rand_index].ptr;
1317 
1318 	dprintf3("%s() enter\n", __func__);
1319 
1320 	if (ptr) {
1321 		dprintf3("freeing one malloc at index: %d\n", rand_index);
1322 		free_one_malloc(rand_index);
1323 		if (mpx_random() % (NR_MALLOCS*3) == 3) {
1324 			int i;
1325 			dprintf3("zapping some more\n");
1326 			for (i = rand_index; i < NR_MALLOCS; i++)
1327 				free_one_malloc(i);
1328 		}
1329 		if ((mpx_random() % zap_all_every_this_many_mallocs) == 4)
1330 			zap_everything();
1331 	}
1332 
1333 	/* 1->~1M */
1334 	sz = (1 + mpx_random() % 1000) * 1000;
1335 	ptr = mpx_mini_alloc(sz);
1336 	if (!ptr) {
1337 		/*
1338 		 * If we are failing allocations, just assume we
1339 		 * are out of memory and zap everything.
1340 		 */
1341 		dprintf3("zapping everything because out of memory\n");
1342 		zap_everything();
1343 		goto out;
1344 	}
1345 
1346 	dprintf3("malloc: %p size: 0x%lx\n", ptr, sz);
1347 	mallocs[rand_index].nr_filled_btes = cover_buf_with_bt_entries(ptr, sz);
1348 	mallocs[rand_index].ptr = ptr;
1349 	mallocs[rand_index].size = sz;
1350 out:
1351 	if ((++malloc_counter) % inspect_every_this_many_mallocs == 0)
1352 		inspect_me(bounds_dir_ptr);
1353 }
1354 
run_timed_test(void (* test_func)(void))1355 void run_timed_test(void (*test_func)(void))
1356 {
1357 	int done = 0;
1358 	long iteration = 0;
1359 	static time_t last_print;
1360 	time_t now;
1361 	time_t start;
1362 
1363 	time(&start);
1364 	while (!done) {
1365 		time(&now);
1366 		if ((now - start) > TEST_DURATION_SECS)
1367 			done = 1;
1368 
1369 		test_func();
1370 		iteration++;
1371 
1372 		if ((now - last_print > 1) || done) {
1373 			printf("iteration %ld complete, OK so far\n", iteration);
1374 			last_print = now;
1375 		}
1376 	}
1377 }
1378 
check_bounds_table_frees(void)1379 void check_bounds_table_frees(void)
1380 {
1381 	printf("executing unmaptest\n");
1382 	inspect_me(bounds_dir_ptr);
1383 	run_timed_test(&do_one_malloc);
1384 	printf("done with malloc() fun\n");
1385 }
1386 
insn_test_failed(int test_nr,int test_round,void * buf,void * buf_shadow,void * ptr)1387 void insn_test_failed(int test_nr, int test_round, void *buf,
1388 		void *buf_shadow, void *ptr)
1389 {
1390 	print_context(xsave_test_buf);
1391 	eprintf("ERROR: test %d round %d failed\n", test_nr, test_round);
1392 	while (test_nr == 5) {
1393 		struct mpx_bt_entry *bte;
1394 		struct mpx_bounds_dir *bd = (void *)bounds_dir_ptr;
1395 		struct mpx_bd_entry *bde = mpx_vaddr_to_bd_entry(buf, bd);
1396 
1397 		printf("  bd: %p\n", bd);
1398 		printf("&bde: %p\n", bde);
1399 		printf("*bde: %lx\n", *(unsigned long *)bde);
1400 		if (!bd_entry_valid(bde))
1401 			break;
1402 
1403 		bte = mpx_vaddr_to_bt_entry(buf, bd);
1404 		printf(" te: %p\n", bte);
1405 		printf("bte[0]: %lx\n", bte->contents[0]);
1406 		printf("bte[1]: %lx\n", bte->contents[1]);
1407 		printf("bte[2]: %lx\n", bte->contents[2]);
1408 		printf("bte[3]: %lx\n", bte->contents[3]);
1409 		break;
1410 	}
1411 	test_failed();
1412 }
1413 
check_mpx_insns_and_tables(void)1414 void check_mpx_insns_and_tables(void)
1415 {
1416 	int successes = 0;
1417 	int failures  = 0;
1418 	int buf_size = (1024*1024);
1419 	unsigned long *buf = malloc(buf_size);
1420 	const int total_nr_tests = NR_MPX_TEST_FUNCTIONS * TEST_ROUNDS;
1421 	int i, j;
1422 
1423 	memset(buf, 0, buf_size);
1424 	memset(buf_shadow, 0, sizeof(buf_shadow));
1425 
1426 	for (i = 0; i < TEST_ROUNDS; i++) {
1427 		uint8_t *ptr = get_random_addr() + 8;
1428 
1429 		for (j = 0; j < NR_MPX_TEST_FUNCTIONS; j++) {
1430 			if (0 && j != 5) {
1431 				successes++;
1432 				continue;
1433 			}
1434 			dprintf2("starting test %d round %d\n", j, i);
1435 			dprint_context(xsave_test_buf);
1436 			/*
1437 			 * test5 loads an address from the bounds tables.
1438 			 * The load will only complete if 'ptr' matches
1439 			 * the load and the store, so with random addrs,
1440 			 * the odds of this are very small.  Make it
1441 			 * higher by only moving 'ptr' 1/10 times.
1442 			 */
1443 			if (random() % 10 <= 0)
1444 				ptr = get_random_addr() + 8;
1445 			dprintf3("random ptr{%p}\n", ptr);
1446 			dprint_context(xsave_test_buf);
1447 			run_helpers(j, (void *)buf, (void *)buf_shadow, ptr);
1448 			dprint_context(xsave_test_buf);
1449 			if (!compare_context(xsave_test_buf)) {
1450 				insn_test_failed(j, i, buf, buf_shadow, ptr);
1451 				failures++;
1452 				goto exit;
1453 			}
1454 			successes++;
1455 			dprint_context(xsave_test_buf);
1456 			dprintf2("finished test %d round %d\n", j, i);
1457 			dprintf3("\n");
1458 			dprint_context(xsave_test_buf);
1459 		}
1460 	}
1461 
1462 exit:
1463 	dprintf2("\nabout to free:\n");
1464 	free(buf);
1465 	dprintf1("successes: %d\n", successes);
1466 	dprintf1(" failures: %d\n", failures);
1467 	dprintf1("    tests: %d\n", total_nr_tests);
1468 	dprintf1(" expected: %jd #BRs\n", num_upper_brs + num_lower_brs);
1469 	dprintf1("      saw: %d #BRs\n", br_count);
1470 	if (failures) {
1471 		eprintf("ERROR: non-zero number of failures\n");
1472 		exit(20);
1473 	}
1474 	if (successes != total_nr_tests) {
1475 		eprintf("ERROR: succeded fewer than number of tries (%d != %d)\n",
1476 				successes, total_nr_tests);
1477 		exit(21);
1478 	}
1479 	if (num_upper_brs + num_lower_brs != br_count) {
1480 		eprintf("ERROR: unexpected number of #BRs: %jd %jd %d\n",
1481 				num_upper_brs, num_lower_brs, br_count);
1482 		eprintf("successes: %d\n", successes);
1483 		eprintf(" failures: %d\n", failures);
1484 		eprintf("    tests: %d\n", total_nr_tests);
1485 		eprintf(" expected: %jd #BRs\n", num_upper_brs + num_lower_brs);
1486 		eprintf("      saw: %d #BRs\n", br_count);
1487 		exit(22);
1488 	}
1489 }
1490 
1491 /*
1492  * This is supposed to SIGSEGV nicely once the kernel
1493  * can no longer allocate vaddr space.
1494  */
exhaust_vaddr_space(void)1495 void exhaust_vaddr_space(void)
1496 {
1497 	unsigned long ptr;
1498 	/* Try to make sure there is no room for a bounds table anywhere */
1499 	unsigned long skip = MPX_BOUNDS_TABLE_SIZE_BYTES - PAGE_SIZE;
1500 #ifdef __i386__
1501 	unsigned long max_vaddr = 0xf7788000UL;
1502 #else
1503 	unsigned long max_vaddr = 0x800000000000UL;
1504 #endif
1505 
1506 	dprintf1("%s() start\n", __func__);
1507 	/* do not start at 0, we aren't allowed to map there */
1508 	for (ptr = PAGE_SIZE; ptr < max_vaddr; ptr += skip) {
1509 		void *ptr_ret;
1510 		int ret = madvise((void *)ptr, PAGE_SIZE, MADV_NORMAL);
1511 
1512 		if (!ret) {
1513 			dprintf1("madvise() %lx ret: %d\n", ptr, ret);
1514 			continue;
1515 		}
1516 		ptr_ret = mmap((void *)ptr, PAGE_SIZE, PROT_READ|PROT_WRITE,
1517 				MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
1518 		if (ptr_ret != (void *)ptr) {
1519 			perror("mmap");
1520 			dprintf1("mmap(%lx) ret: %p\n", ptr, ptr_ret);
1521 			break;
1522 		}
1523 		if (!(ptr & 0xffffff))
1524 			dprintf1("mmap(%lx) ret: %p\n", ptr, ptr_ret);
1525 	}
1526 	for (ptr = PAGE_SIZE; ptr < max_vaddr; ptr += skip) {
1527 		dprintf2("covering 0x%lx with bounds table entries\n", ptr);
1528 		cover_buf_with_bt_entries((void *)ptr, PAGE_SIZE);
1529 	}
1530 	dprintf1("%s() end\n", __func__);
1531 	printf("done with vaddr space fun\n");
1532 }
1533 
mpx_table_test(void)1534 void mpx_table_test(void)
1535 {
1536 	printf("starting mpx bounds table test\n");
1537 	run_timed_test(check_mpx_insns_and_tables);
1538 	printf("done with mpx bounds table test\n");
1539 }
1540 
main(int argc,char ** argv)1541 int main(int argc, char **argv)
1542 {
1543 	int unmaptest = 0;
1544 	int vaddrexhaust = 0;
1545 	int tabletest = 0;
1546 	int i;
1547 
1548 	check_mpx_support();
1549 	mpx_prepare();
1550 	srandom(11179);
1551 
1552 	bd_incore();
1553 	init();
1554 	bd_incore();
1555 
1556 	trace_me();
1557 
1558 	xsave_state((void *)xsave_test_buf, 0x1f);
1559 	if (!compare_context(xsave_test_buf))
1560 		printf("Init failed\n");
1561 
1562 	for (i = 1; i < argc; i++) {
1563 		if (!strcmp(argv[i], "unmaptest"))
1564 			unmaptest = 1;
1565 		if (!strcmp(argv[i], "vaddrexhaust"))
1566 			vaddrexhaust = 1;
1567 		if (!strcmp(argv[i], "tabletest"))
1568 			tabletest = 1;
1569 	}
1570 	if (!(unmaptest || vaddrexhaust || tabletest)) {
1571 		unmaptest = 1;
1572 		/* vaddrexhaust = 1; */
1573 		tabletest = 1;
1574 	}
1575 	if (unmaptest)
1576 		check_bounds_table_frees();
1577 	if (tabletest)
1578 		mpx_table_test();
1579 	if (vaddrexhaust)
1580 		exhaust_vaddr_space();
1581 	printf("%s completed successfully\n", argv[0]);
1582 	exit(0);
1583 }
1584 
1585 #include "mpx-dig.c"
1586