• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * mpx.c - Memory Protection eXtensions
3  *
4  * Copyright (c) 2014, Intel Corporation.
5  * Qiaowei Ren <qiaowei.ren@intel.com>
6  * Dave Hansen <dave.hansen@intel.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include <linux/syscalls.h>
11 #include <linux/sched/sysctl.h>
12 
13 #include <asm/insn.h>
14 #include <asm/mman.h>
15 #include <asm/mmu_context.h>
16 #include <asm/mpx.h>
17 #include <asm/processor.h>
18 #include <asm/fpu/internal.h>
19 
20 #define CREATE_TRACE_POINTS
21 #include <asm/trace/mpx.h>
22 
mpx_bd_size_bytes(struct mm_struct * mm)23 static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm)
24 {
25 	if (is_64bit_mm(mm))
26 		return MPX_BD_SIZE_BYTES_64;
27 	else
28 		return MPX_BD_SIZE_BYTES_32;
29 }
30 
mpx_bt_size_bytes(struct mm_struct * mm)31 static inline unsigned long mpx_bt_size_bytes(struct mm_struct *mm)
32 {
33 	if (is_64bit_mm(mm))
34 		return MPX_BT_SIZE_BYTES_64;
35 	else
36 		return MPX_BT_SIZE_BYTES_32;
37 }
38 
39 /*
40  * This is really a simplified "vm_mmap". it only handles MPX
41  * bounds tables (the bounds directory is user-allocated).
42  */
mpx_mmap(unsigned long len)43 static unsigned long mpx_mmap(unsigned long len)
44 {
45 	struct mm_struct *mm = current->mm;
46 	unsigned long addr, populate;
47 
48 	/* Only bounds table can be allocated here */
49 	if (len != mpx_bt_size_bytes(mm))
50 		return -EINVAL;
51 
52 	down_write(&mm->mmap_sem);
53 	addr = do_mmap(NULL, 0, len, PROT_READ | PROT_WRITE,
54 			MAP_ANONYMOUS | MAP_PRIVATE, VM_MPX, 0, &populate);
55 	up_write(&mm->mmap_sem);
56 	if (populate)
57 		mm_populate(addr, populate);
58 
59 	return addr;
60 }
61 
62 enum reg_type {
63 	REG_TYPE_RM = 0,
64 	REG_TYPE_INDEX,
65 	REG_TYPE_BASE,
66 };
67 
get_reg_offset(struct insn * insn,struct pt_regs * regs,enum reg_type type)68 static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
69 			  enum reg_type type)
70 {
71 	int regno = 0;
72 
73 	static const int regoff[] = {
74 		offsetof(struct pt_regs, ax),
75 		offsetof(struct pt_regs, cx),
76 		offsetof(struct pt_regs, dx),
77 		offsetof(struct pt_regs, bx),
78 		offsetof(struct pt_regs, sp),
79 		offsetof(struct pt_regs, bp),
80 		offsetof(struct pt_regs, si),
81 		offsetof(struct pt_regs, di),
82 #ifdef CONFIG_X86_64
83 		offsetof(struct pt_regs, r8),
84 		offsetof(struct pt_regs, r9),
85 		offsetof(struct pt_regs, r10),
86 		offsetof(struct pt_regs, r11),
87 		offsetof(struct pt_regs, r12),
88 		offsetof(struct pt_regs, r13),
89 		offsetof(struct pt_regs, r14),
90 		offsetof(struct pt_regs, r15),
91 #endif
92 	};
93 	int nr_registers = ARRAY_SIZE(regoff);
94 	/*
95 	 * Don't possibly decode a 32-bit instructions as
96 	 * reading a 64-bit-only register.
97 	 */
98 	if (IS_ENABLED(CONFIG_X86_64) && !insn->x86_64)
99 		nr_registers -= 8;
100 
101 	switch (type) {
102 	case REG_TYPE_RM:
103 		regno = X86_MODRM_RM(insn->modrm.value);
104 		if (X86_REX_B(insn->rex_prefix.value))
105 			regno += 8;
106 		break;
107 
108 	case REG_TYPE_INDEX:
109 		regno = X86_SIB_INDEX(insn->sib.value);
110 		if (X86_REX_X(insn->rex_prefix.value))
111 			regno += 8;
112 		break;
113 
114 	case REG_TYPE_BASE:
115 		regno = X86_SIB_BASE(insn->sib.value);
116 		if (X86_REX_B(insn->rex_prefix.value))
117 			regno += 8;
118 		break;
119 
120 	default:
121 		pr_err("invalid register type");
122 		BUG();
123 		break;
124 	}
125 
126 	if (regno >= nr_registers) {
127 		WARN_ONCE(1, "decoded an instruction with an invalid register");
128 		return -EINVAL;
129 	}
130 	return regoff[regno];
131 }
132 
133 /*
134  * return the address being referenced be instruction
135  * for rm=3 returning the content of the rm reg
136  * for rm!=3 calculates the address using SIB and Disp
137  */
mpx_get_addr_ref(struct insn * insn,struct pt_regs * regs)138 static void __user *mpx_get_addr_ref(struct insn *insn, struct pt_regs *regs)
139 {
140 	unsigned long addr, base, indx;
141 	int addr_offset, base_offset, indx_offset;
142 	insn_byte_t sib;
143 
144 	insn_get_modrm(insn);
145 	insn_get_sib(insn);
146 	sib = insn->sib.value;
147 
148 	if (X86_MODRM_MOD(insn->modrm.value) == 3) {
149 		addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM);
150 		if (addr_offset < 0)
151 			goto out_err;
152 		addr = regs_get_register(regs, addr_offset);
153 	} else {
154 		if (insn->sib.nbytes) {
155 			base_offset = get_reg_offset(insn, regs, REG_TYPE_BASE);
156 			if (base_offset < 0)
157 				goto out_err;
158 
159 			indx_offset = get_reg_offset(insn, regs, REG_TYPE_INDEX);
160 			if (indx_offset < 0)
161 				goto out_err;
162 
163 			base = regs_get_register(regs, base_offset);
164 			indx = regs_get_register(regs, indx_offset);
165 			addr = base + indx * (1 << X86_SIB_SCALE(sib));
166 		} else {
167 			addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM);
168 			if (addr_offset < 0)
169 				goto out_err;
170 			addr = regs_get_register(regs, addr_offset);
171 		}
172 		addr += insn->displacement.value;
173 	}
174 	return (void __user *)addr;
175 out_err:
176 	return (void __user *)-1;
177 }
178 
mpx_insn_decode(struct insn * insn,struct pt_regs * regs)179 static int mpx_insn_decode(struct insn *insn,
180 			   struct pt_regs *regs)
181 {
182 	unsigned char buf[MAX_INSN_SIZE];
183 	int x86_64 = !test_thread_flag(TIF_IA32);
184 	int not_copied;
185 	int nr_copied;
186 
187 	not_copied = copy_from_user(buf, (void __user *)regs->ip, sizeof(buf));
188 	nr_copied = sizeof(buf) - not_copied;
189 	/*
190 	 * The decoder _should_ fail nicely if we pass it a short buffer.
191 	 * But, let's not depend on that implementation detail.  If we
192 	 * did not get anything, just error out now.
193 	 */
194 	if (!nr_copied)
195 		return -EFAULT;
196 	insn_init(insn, buf, nr_copied, x86_64);
197 	insn_get_length(insn);
198 	/*
199 	 * copy_from_user() tries to get as many bytes as we could see in
200 	 * the largest possible instruction.  If the instruction we are
201 	 * after is shorter than that _and_ we attempt to copy from
202 	 * something unreadable, we might get a short read.  This is OK
203 	 * as long as the read did not stop in the middle of the
204 	 * instruction.  Check to see if we got a partial instruction.
205 	 */
206 	if (nr_copied < insn->length)
207 		return -EFAULT;
208 
209 	insn_get_opcode(insn);
210 	/*
211 	 * We only _really_ need to decode bndcl/bndcn/bndcu
212 	 * Error out on anything else.
213 	 */
214 	if (insn->opcode.bytes[0] != 0x0f)
215 		goto bad_opcode;
216 	if ((insn->opcode.bytes[1] != 0x1a) &&
217 	    (insn->opcode.bytes[1] != 0x1b))
218 		goto bad_opcode;
219 
220 	return 0;
221 bad_opcode:
222 	return -EINVAL;
223 }
224 
225 /*
226  * If a bounds overflow occurs then a #BR is generated. This
227  * function decodes MPX instructions to get violation address
228  * and set this address into extended struct siginfo.
229  *
230  * Note that this is not a super precise way of doing this.
231  * Userspace could have, by the time we get here, written
232  * anything it wants in to the instructions.  We can not
233  * trust anything about it.  They might not be valid
234  * instructions or might encode invalid registers, etc...
235  *
236  * The caller is expected to kfree() the returned siginfo_t.
237  */
mpx_generate_siginfo(struct pt_regs * regs)238 siginfo_t *mpx_generate_siginfo(struct pt_regs *regs)
239 {
240 	const struct mpx_bndreg_state *bndregs;
241 	const struct mpx_bndreg *bndreg;
242 	siginfo_t *info = NULL;
243 	struct insn insn;
244 	uint8_t bndregno;
245 	int err;
246 
247 	err = mpx_insn_decode(&insn, regs);
248 	if (err)
249 		goto err_out;
250 
251 	/*
252 	 * We know at this point that we are only dealing with
253 	 * MPX instructions.
254 	 */
255 	insn_get_modrm(&insn);
256 	bndregno = X86_MODRM_REG(insn.modrm.value);
257 	if (bndregno > 3) {
258 		err = -EINVAL;
259 		goto err_out;
260 	}
261 	/* get bndregs field from current task's xsave area */
262 	bndregs = get_xsave_field_ptr(XFEATURE_MASK_BNDREGS);
263 	if (!bndregs) {
264 		err = -EINVAL;
265 		goto err_out;
266 	}
267 	/* now go select the individual register in the set of 4 */
268 	bndreg = &bndregs->bndreg[bndregno];
269 
270 	info = kzalloc(sizeof(*info), GFP_KERNEL);
271 	if (!info) {
272 		err = -ENOMEM;
273 		goto err_out;
274 	}
275 	/*
276 	 * The registers are always 64-bit, but the upper 32
277 	 * bits are ignored in 32-bit mode.  Also, note that the
278 	 * upper bounds are architecturally represented in 1's
279 	 * complement form.
280 	 *
281 	 * The 'unsigned long' cast is because the compiler
282 	 * complains when casting from integers to different-size
283 	 * pointers.
284 	 */
285 	info->si_lower = (void __user *)(unsigned long)bndreg->lower_bound;
286 	info->si_upper = (void __user *)(unsigned long)~bndreg->upper_bound;
287 	info->si_addr_lsb = 0;
288 	info->si_signo = SIGSEGV;
289 	info->si_errno = 0;
290 	info->si_code = SEGV_BNDERR;
291 	info->si_addr = mpx_get_addr_ref(&insn, regs);
292 	/*
293 	 * We were not able to extract an address from the instruction,
294 	 * probably because there was something invalid in it.
295 	 */
296 	if (info->si_addr == (void __user *)-1) {
297 		err = -EINVAL;
298 		goto err_out;
299 	}
300 	trace_mpx_bounds_register_exception(info->si_addr, bndreg);
301 	return info;
302 err_out:
303 	/* info might be NULL, but kfree() handles that */
304 	kfree(info);
305 	return ERR_PTR(err);
306 }
307 
mpx_get_bounds_dir(void)308 static __user void *mpx_get_bounds_dir(void)
309 {
310 	const struct mpx_bndcsr *bndcsr;
311 
312 	if (!cpu_feature_enabled(X86_FEATURE_MPX))
313 		return MPX_INVALID_BOUNDS_DIR;
314 
315 	/*
316 	 * The bounds directory pointer is stored in a register
317 	 * only accessible if we first do an xsave.
318 	 */
319 	bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR);
320 	if (!bndcsr)
321 		return MPX_INVALID_BOUNDS_DIR;
322 
323 	/*
324 	 * Make sure the register looks valid by checking the
325 	 * enable bit.
326 	 */
327 	if (!(bndcsr->bndcfgu & MPX_BNDCFG_ENABLE_FLAG))
328 		return MPX_INVALID_BOUNDS_DIR;
329 
330 	/*
331 	 * Lastly, mask off the low bits used for configuration
332 	 * flags, and return the address of the bounds table.
333 	 */
334 	return (void __user *)(unsigned long)
335 		(bndcsr->bndcfgu & MPX_BNDCFG_ADDR_MASK);
336 }
337 
mpx_enable_management(void)338 int mpx_enable_management(void)
339 {
340 	void __user *bd_base = MPX_INVALID_BOUNDS_DIR;
341 	struct mm_struct *mm = current->mm;
342 	int ret = 0;
343 
344 	/*
345 	 * runtime in the userspace will be responsible for allocation of
346 	 * the bounds directory. Then, it will save the base of the bounds
347 	 * directory into XSAVE/XRSTOR Save Area and enable MPX through
348 	 * XRSTOR instruction.
349 	 *
350 	 * The copy_xregs_to_kernel() beneath get_xsave_field_ptr() is
351 	 * expected to be relatively expensive. Storing the bounds
352 	 * directory here means that we do not have to do xsave in the
353 	 * unmap path; we can just use mm->bd_addr instead.
354 	 */
355 	bd_base = mpx_get_bounds_dir();
356 	down_write(&mm->mmap_sem);
357 	mm->bd_addr = bd_base;
358 	if (mm->bd_addr == MPX_INVALID_BOUNDS_DIR)
359 		ret = -ENXIO;
360 
361 	up_write(&mm->mmap_sem);
362 	return ret;
363 }
364 
mpx_disable_management(void)365 int mpx_disable_management(void)
366 {
367 	struct mm_struct *mm = current->mm;
368 
369 	if (!cpu_feature_enabled(X86_FEATURE_MPX))
370 		return -ENXIO;
371 
372 	down_write(&mm->mmap_sem);
373 	mm->bd_addr = MPX_INVALID_BOUNDS_DIR;
374 	up_write(&mm->mmap_sem);
375 	return 0;
376 }
377 
mpx_cmpxchg_bd_entry(struct mm_struct * mm,unsigned long * curval,unsigned long __user * addr,unsigned long old_val,unsigned long new_val)378 static int mpx_cmpxchg_bd_entry(struct mm_struct *mm,
379 		unsigned long *curval,
380 		unsigned long __user *addr,
381 		unsigned long old_val, unsigned long new_val)
382 {
383 	int ret;
384 	/*
385 	 * user_atomic_cmpxchg_inatomic() actually uses sizeof()
386 	 * the pointer that we pass to it to figure out how much
387 	 * data to cmpxchg.  We have to be careful here not to
388 	 * pass a pointer to a 64-bit data type when we only want
389 	 * a 32-bit copy.
390 	 */
391 	if (is_64bit_mm(mm)) {
392 		ret = user_atomic_cmpxchg_inatomic(curval,
393 				addr, old_val, new_val);
394 	} else {
395 		u32 uninitialized_var(curval_32);
396 		u32 old_val_32 = old_val;
397 		u32 new_val_32 = new_val;
398 		u32 __user *addr_32 = (u32 __user *)addr;
399 
400 		ret = user_atomic_cmpxchg_inatomic(&curval_32,
401 				addr_32, old_val_32, new_val_32);
402 		*curval = curval_32;
403 	}
404 	return ret;
405 }
406 
407 /*
408  * With 32-bit mode, a bounds directory is 4MB, and the size of each
409  * bounds table is 16KB. With 64-bit mode, a bounds directory is 2GB,
410  * and the size of each bounds table is 4MB.
411  */
allocate_bt(struct mm_struct * mm,long __user * bd_entry)412 static int allocate_bt(struct mm_struct *mm, long __user *bd_entry)
413 {
414 	unsigned long expected_old_val = 0;
415 	unsigned long actual_old_val = 0;
416 	unsigned long bt_addr;
417 	unsigned long bd_new_entry;
418 	int ret = 0;
419 
420 	/*
421 	 * Carve the virtual space out of userspace for the new
422 	 * bounds table:
423 	 */
424 	bt_addr = mpx_mmap(mpx_bt_size_bytes(mm));
425 	if (IS_ERR((void *)bt_addr))
426 		return PTR_ERR((void *)bt_addr);
427 	/*
428 	 * Set the valid flag (kinda like _PAGE_PRESENT in a pte)
429 	 */
430 	bd_new_entry = bt_addr | MPX_BD_ENTRY_VALID_FLAG;
431 
432 	/*
433 	 * Go poke the address of the new bounds table in to the
434 	 * bounds directory entry out in userspace memory.  Note:
435 	 * we may race with another CPU instantiating the same table.
436 	 * In that case the cmpxchg will see an unexpected
437 	 * 'actual_old_val'.
438 	 *
439 	 * This can fault, but that's OK because we do not hold
440 	 * mmap_sem at this point, unlike some of the other part
441 	 * of the MPX code that have to pagefault_disable().
442 	 */
443 	ret = mpx_cmpxchg_bd_entry(mm, &actual_old_val,	bd_entry,
444 				   expected_old_val, bd_new_entry);
445 	if (ret)
446 		goto out_unmap;
447 
448 	/*
449 	 * The user_atomic_cmpxchg_inatomic() will only return nonzero
450 	 * for faults, *not* if the cmpxchg itself fails.  Now we must
451 	 * verify that the cmpxchg itself completed successfully.
452 	 */
453 	/*
454 	 * We expected an empty 'expected_old_val', but instead found
455 	 * an apparently valid entry.  Assume we raced with another
456 	 * thread to instantiate this table and desclare succecss.
457 	 */
458 	if (actual_old_val & MPX_BD_ENTRY_VALID_FLAG) {
459 		ret = 0;
460 		goto out_unmap;
461 	}
462 	/*
463 	 * We found a non-empty bd_entry but it did not have the
464 	 * VALID_FLAG set.  Return an error which will result in
465 	 * a SEGV since this probably means that somebody scribbled
466 	 * some invalid data in to a bounds table.
467 	 */
468 	if (expected_old_val != actual_old_val) {
469 		ret = -EINVAL;
470 		goto out_unmap;
471 	}
472 	trace_mpx_new_bounds_table(bt_addr);
473 	return 0;
474 out_unmap:
475 	vm_munmap(bt_addr, mpx_bt_size_bytes(mm));
476 	return ret;
477 }
478 
479 /*
480  * When a BNDSTX instruction attempts to save bounds to a bounds
481  * table, it will first attempt to look up the table in the
482  * first-level bounds directory.  If it does not find a table in
483  * the directory, a #BR is generated and we get here in order to
484  * allocate a new table.
485  *
486  * With 32-bit mode, the size of BD is 4MB, and the size of each
487  * bound table is 16KB. With 64-bit mode, the size of BD is 2GB,
488  * and the size of each bound table is 4MB.
489  */
do_mpx_bt_fault(void)490 static int do_mpx_bt_fault(void)
491 {
492 	unsigned long bd_entry, bd_base;
493 	const struct mpx_bndcsr *bndcsr;
494 	struct mm_struct *mm = current->mm;
495 
496 	bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR);
497 	if (!bndcsr)
498 		return -EINVAL;
499 	/*
500 	 * Mask off the preserve and enable bits
501 	 */
502 	bd_base = bndcsr->bndcfgu & MPX_BNDCFG_ADDR_MASK;
503 	/*
504 	 * The hardware provides the address of the missing or invalid
505 	 * entry via BNDSTATUS, so we don't have to go look it up.
506 	 */
507 	bd_entry = bndcsr->bndstatus & MPX_BNDSTA_ADDR_MASK;
508 	/*
509 	 * Make sure the directory entry is within where we think
510 	 * the directory is.
511 	 */
512 	if ((bd_entry < bd_base) ||
513 	    (bd_entry >= bd_base + mpx_bd_size_bytes(mm)))
514 		return -EINVAL;
515 
516 	return allocate_bt(mm, (long __user *)bd_entry);
517 }
518 
mpx_handle_bd_fault(void)519 int mpx_handle_bd_fault(void)
520 {
521 	/*
522 	 * Userspace never asked us to manage the bounds tables,
523 	 * so refuse to help.
524 	 */
525 	if (!kernel_managing_mpx_tables(current->mm))
526 		return -EINVAL;
527 
528 	return do_mpx_bt_fault();
529 }
530 
531 /*
532  * A thin wrapper around get_user_pages().  Returns 0 if the
533  * fault was resolved or -errno if not.
534  */
mpx_resolve_fault(long __user * addr,int write)535 static int mpx_resolve_fault(long __user *addr, int write)
536 {
537 	long gup_ret;
538 	int nr_pages = 1;
539 
540 	gup_ret = get_user_pages((unsigned long)addr, nr_pages,
541 			write ? FOLL_WRITE : 0,	NULL, NULL);
542 	/*
543 	 * get_user_pages() returns number of pages gotten.
544 	 * 0 means we failed to fault in and get anything,
545 	 * probably because 'addr' is bad.
546 	 */
547 	if (!gup_ret)
548 		return -EFAULT;
549 	/* Other error, return it */
550 	if (gup_ret < 0)
551 		return gup_ret;
552 	/* must have gup'd a page and gup_ret>0, success */
553 	return 0;
554 }
555 
mpx_bd_entry_to_bt_addr(struct mm_struct * mm,unsigned long bd_entry)556 static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm,
557 					     unsigned long bd_entry)
558 {
559 	unsigned long bt_addr = bd_entry;
560 	int align_to_bytes;
561 	/*
562 	 * Bit 0 in a bt_entry is always the valid bit.
563 	 */
564 	bt_addr &= ~MPX_BD_ENTRY_VALID_FLAG;
565 	/*
566 	 * Tables are naturally aligned at 8-byte boundaries
567 	 * on 64-bit and 4-byte boundaries on 32-bit.  The
568 	 * documentation makes it appear that the low bits
569 	 * are ignored by the hardware, so we do the same.
570 	 */
571 	if (is_64bit_mm(mm))
572 		align_to_bytes = 8;
573 	else
574 		align_to_bytes = 4;
575 	bt_addr &= ~(align_to_bytes-1);
576 	return bt_addr;
577 }
578 
579 /*
580  * We only want to do a 4-byte get_user() on 32-bit.  Otherwise,
581  * we might run off the end of the bounds table if we are on
582  * a 64-bit kernel and try to get 8 bytes.
583  */
get_user_bd_entry(struct mm_struct * mm,unsigned long * bd_entry_ret,long __user * bd_entry_ptr)584 int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
585 		long __user *bd_entry_ptr)
586 {
587 	u32 bd_entry_32;
588 	int ret;
589 
590 	if (is_64bit_mm(mm))
591 		return get_user(*bd_entry_ret, bd_entry_ptr);
592 
593 	/*
594 	 * Note that get_user() uses the type of the *pointer* to
595 	 * establish the size of the get, not the destination.
596 	 */
597 	ret = get_user(bd_entry_32, (u32 __user *)bd_entry_ptr);
598 	*bd_entry_ret = bd_entry_32;
599 	return ret;
600 }
601 
602 /*
603  * Get the base of bounds tables pointed by specific bounds
604  * directory entry.
605  */
get_bt_addr(struct mm_struct * mm,long __user * bd_entry_ptr,unsigned long * bt_addr_result)606 static int get_bt_addr(struct mm_struct *mm,
607 			long __user *bd_entry_ptr,
608 			unsigned long *bt_addr_result)
609 {
610 	int ret;
611 	int valid_bit;
612 	unsigned long bd_entry;
613 	unsigned long bt_addr;
614 
615 	if (!access_ok(VERIFY_READ, (bd_entry_ptr), sizeof(*bd_entry_ptr)))
616 		return -EFAULT;
617 
618 	while (1) {
619 		int need_write = 0;
620 
621 		pagefault_disable();
622 		ret = get_user_bd_entry(mm, &bd_entry, bd_entry_ptr);
623 		pagefault_enable();
624 		if (!ret)
625 			break;
626 		if (ret == -EFAULT)
627 			ret = mpx_resolve_fault(bd_entry_ptr, need_write);
628 		/*
629 		 * If we could not resolve the fault, consider it
630 		 * userspace's fault and error out.
631 		 */
632 		if (ret)
633 			return ret;
634 	}
635 
636 	valid_bit = bd_entry & MPX_BD_ENTRY_VALID_FLAG;
637 	bt_addr = mpx_bd_entry_to_bt_addr(mm, bd_entry);
638 
639 	/*
640 	 * When the kernel is managing bounds tables, a bounds directory
641 	 * entry will either have a valid address (plus the valid bit)
642 	 * *OR* be completely empty. If we see a !valid entry *and* some
643 	 * data in the address field, we know something is wrong. This
644 	 * -EINVAL return will cause a SIGSEGV.
645 	 */
646 	if (!valid_bit && bt_addr)
647 		return -EINVAL;
648 	/*
649 	 * Do we have an completely zeroed bt entry?  That is OK.  It
650 	 * just means there was no bounds table for this memory.  Make
651 	 * sure to distinguish this from -EINVAL, which will cause
652 	 * a SEGV.
653 	 */
654 	if (!valid_bit)
655 		return -ENOENT;
656 
657 	*bt_addr_result = bt_addr;
658 	return 0;
659 }
660 
bt_entry_size_bytes(struct mm_struct * mm)661 static inline int bt_entry_size_bytes(struct mm_struct *mm)
662 {
663 	if (is_64bit_mm(mm))
664 		return MPX_BT_ENTRY_BYTES_64;
665 	else
666 		return MPX_BT_ENTRY_BYTES_32;
667 }
668 
669 /*
670  * Take a virtual address and turns it in to the offset in bytes
671  * inside of the bounds table where the bounds table entry
672  * controlling 'addr' can be found.
673  */
mpx_get_bt_entry_offset_bytes(struct mm_struct * mm,unsigned long addr)674 static unsigned long mpx_get_bt_entry_offset_bytes(struct mm_struct *mm,
675 		unsigned long addr)
676 {
677 	unsigned long bt_table_nr_entries;
678 	unsigned long offset = addr;
679 
680 	if (is_64bit_mm(mm)) {
681 		/* Bottom 3 bits are ignored on 64-bit */
682 		offset >>= 3;
683 		bt_table_nr_entries = MPX_BT_NR_ENTRIES_64;
684 	} else {
685 		/* Bottom 2 bits are ignored on 32-bit */
686 		offset >>= 2;
687 		bt_table_nr_entries = MPX_BT_NR_ENTRIES_32;
688 	}
689 	/*
690 	 * We know the size of the table in to which we are
691 	 * indexing, and we have eliminated all the low bits
692 	 * which are ignored for indexing.
693 	 *
694 	 * Mask out all the high bits which we do not need
695 	 * to index in to the table.  Note that the tables
696 	 * are always powers of two so this gives us a proper
697 	 * mask.
698 	 */
699 	offset &= (bt_table_nr_entries-1);
700 	/*
701 	 * We now have an entry offset in terms of *entries* in
702 	 * the table.  We need to scale it back up to bytes.
703 	 */
704 	offset *= bt_entry_size_bytes(mm);
705 	return offset;
706 }
707 
708 /*
709  * How much virtual address space does a single bounds
710  * directory entry cover?
711  *
712  * Note, we need a long long because 4GB doesn't fit in
713  * to a long on 32-bit.
714  */
bd_entry_virt_space(struct mm_struct * mm)715 static inline unsigned long bd_entry_virt_space(struct mm_struct *mm)
716 {
717 	unsigned long long virt_space;
718 	unsigned long long GB = (1ULL << 30);
719 
720 	/*
721 	 * This covers 32-bit emulation as well as 32-bit kernels
722 	 * running on 64-bit hardware.
723 	 */
724 	if (!is_64bit_mm(mm))
725 		return (4ULL * GB) / MPX_BD_NR_ENTRIES_32;
726 
727 	/*
728 	 * 'x86_virt_bits' returns what the hardware is capable
729 	 * of, and returns the full >32-bit address space when
730 	 * running 32-bit kernels on 64-bit hardware.
731 	 */
732 	virt_space = (1ULL << boot_cpu_data.x86_virt_bits);
733 	return virt_space / MPX_BD_NR_ENTRIES_64;
734 }
735 
736 /*
737  * Free the backing physical pages of bounds table 'bt_addr'.
738  * Assume start...end is within that bounds table.
739  */
zap_bt_entries_mapping(struct mm_struct * mm,unsigned long bt_addr,unsigned long start_mapping,unsigned long end_mapping)740 static noinline int zap_bt_entries_mapping(struct mm_struct *mm,
741 		unsigned long bt_addr,
742 		unsigned long start_mapping, unsigned long end_mapping)
743 {
744 	struct vm_area_struct *vma;
745 	unsigned long addr, len;
746 	unsigned long start;
747 	unsigned long end;
748 
749 	/*
750 	 * if we 'end' on a boundary, the offset will be 0 which
751 	 * is not what we want.  Back it up a byte to get the
752 	 * last bt entry.  Then once we have the entry itself,
753 	 * move 'end' back up by the table entry size.
754 	 */
755 	start = bt_addr + mpx_get_bt_entry_offset_bytes(mm, start_mapping);
756 	end   = bt_addr + mpx_get_bt_entry_offset_bytes(mm, end_mapping - 1);
757 	/*
758 	 * Move end back up by one entry.  Among other things
759 	 * this ensures that it remains page-aligned and does
760 	 * not screw up zap_page_range()
761 	 */
762 	end += bt_entry_size_bytes(mm);
763 
764 	/*
765 	 * Find the first overlapping vma. If vma->vm_start > start, there
766 	 * will be a hole in the bounds table. This -EINVAL return will
767 	 * cause a SIGSEGV.
768 	 */
769 	vma = find_vma(mm, start);
770 	if (!vma || vma->vm_start > start)
771 		return -EINVAL;
772 
773 	/*
774 	 * A NUMA policy on a VM_MPX VMA could cause this bounds table to
775 	 * be split. So we need to look across the entire 'start -> end'
776 	 * range of this bounds table, find all of the VM_MPX VMAs, and
777 	 * zap only those.
778 	 */
779 	addr = start;
780 	while (vma && vma->vm_start < end) {
781 		/*
782 		 * We followed a bounds directory entry down
783 		 * here.  If we find a non-MPX VMA, that's bad,
784 		 * so stop immediately and return an error.  This
785 		 * probably results in a SIGSEGV.
786 		 */
787 		if (!(vma->vm_flags & VM_MPX))
788 			return -EINVAL;
789 
790 		len = min(vma->vm_end, end) - addr;
791 		zap_page_range(vma, addr, len, NULL);
792 		trace_mpx_unmap_zap(addr, addr+len);
793 
794 		vma = vma->vm_next;
795 		addr = vma->vm_start;
796 	}
797 	return 0;
798 }
799 
mpx_get_bd_entry_offset(struct mm_struct * mm,unsigned long addr)800 static unsigned long mpx_get_bd_entry_offset(struct mm_struct *mm,
801 		unsigned long addr)
802 {
803 	/*
804 	 * There are several ways to derive the bd offsets.  We
805 	 * use the following approach here:
806 	 * 1. We know the size of the virtual address space
807 	 * 2. We know the number of entries in a bounds table
808 	 * 3. We know that each entry covers a fixed amount of
809 	 *    virtual address space.
810 	 * So, we can just divide the virtual address by the
811 	 * virtual space used by one entry to determine which
812 	 * entry "controls" the given virtual address.
813 	 */
814 	if (is_64bit_mm(mm)) {
815 		int bd_entry_size = 8; /* 64-bit pointer */
816 		/*
817 		 * Take the 64-bit addressing hole in to account.
818 		 */
819 		addr &= ((1UL << boot_cpu_data.x86_virt_bits) - 1);
820 		return (addr / bd_entry_virt_space(mm)) * bd_entry_size;
821 	} else {
822 		int bd_entry_size = 4; /* 32-bit pointer */
823 		/*
824 		 * 32-bit has no hole so this case needs no mask
825 		 */
826 		return (addr / bd_entry_virt_space(mm)) * bd_entry_size;
827 	}
828 	/*
829 	 * The two return calls above are exact copies.  If we
830 	 * pull out a single copy and put it in here, gcc won't
831 	 * realize that we're doing a power-of-2 divide and use
832 	 * shifts.  It uses a real divide.  If we put them up
833 	 * there, it manages to figure it out (gcc 4.8.3).
834 	 */
835 }
836 
unmap_entire_bt(struct mm_struct * mm,long __user * bd_entry,unsigned long bt_addr)837 static int unmap_entire_bt(struct mm_struct *mm,
838 		long __user *bd_entry, unsigned long bt_addr)
839 {
840 	unsigned long expected_old_val = bt_addr | MPX_BD_ENTRY_VALID_FLAG;
841 	unsigned long uninitialized_var(actual_old_val);
842 	int ret;
843 
844 	while (1) {
845 		int need_write = 1;
846 		unsigned long cleared_bd_entry = 0;
847 
848 		pagefault_disable();
849 		ret = mpx_cmpxchg_bd_entry(mm, &actual_old_val,
850 				bd_entry, expected_old_val, cleared_bd_entry);
851 		pagefault_enable();
852 		if (!ret)
853 			break;
854 		if (ret == -EFAULT)
855 			ret = mpx_resolve_fault(bd_entry, need_write);
856 		/*
857 		 * If we could not resolve the fault, consider it
858 		 * userspace's fault and error out.
859 		 */
860 		if (ret)
861 			return ret;
862 	}
863 	/*
864 	 * The cmpxchg was performed, check the results.
865 	 */
866 	if (actual_old_val != expected_old_val) {
867 		/*
868 		 * Someone else raced with us to unmap the table.
869 		 * That is OK, since we were both trying to do
870 		 * the same thing.  Declare success.
871 		 */
872 		if (!actual_old_val)
873 			return 0;
874 		/*
875 		 * Something messed with the bounds directory
876 		 * entry.  We hold mmap_sem for read or write
877 		 * here, so it could not be a _new_ bounds table
878 		 * that someone just allocated.  Something is
879 		 * wrong, so pass up the error and SIGSEGV.
880 		 */
881 		return -EINVAL;
882 	}
883 	/*
884 	 * Note, we are likely being called under do_munmap() already. To
885 	 * avoid recursion, do_munmap() will check whether it comes
886 	 * from one bounds table through VM_MPX flag.
887 	 */
888 	return do_munmap(mm, bt_addr, mpx_bt_size_bytes(mm));
889 }
890 
try_unmap_single_bt(struct mm_struct * mm,unsigned long start,unsigned long end)891 static int try_unmap_single_bt(struct mm_struct *mm,
892 	       unsigned long start, unsigned long end)
893 {
894 	struct vm_area_struct *next;
895 	struct vm_area_struct *prev;
896 	/*
897 	 * "bta" == Bounds Table Area: the area controlled by the
898 	 * bounds table that we are unmapping.
899 	 */
900 	unsigned long bta_start_vaddr = start & ~(bd_entry_virt_space(mm)-1);
901 	unsigned long bta_end_vaddr = bta_start_vaddr + bd_entry_virt_space(mm);
902 	unsigned long uninitialized_var(bt_addr);
903 	void __user *bde_vaddr;
904 	int ret;
905 	/*
906 	 * We already unlinked the VMAs from the mm's rbtree so 'start'
907 	 * is guaranteed to be in a hole. This gets us the first VMA
908 	 * before the hole in to 'prev' and the next VMA after the hole
909 	 * in to 'next'.
910 	 */
911 	next = find_vma_prev(mm, start, &prev);
912 	/*
913 	 * Do not count other MPX bounds table VMAs as neighbors.
914 	 * Although theoretically possible, we do not allow bounds
915 	 * tables for bounds tables so our heads do not explode.
916 	 * If we count them as neighbors here, we may end up with
917 	 * lots of tables even though we have no actual table
918 	 * entries in use.
919 	 */
920 	while (next && (next->vm_flags & VM_MPX))
921 		next = next->vm_next;
922 	while (prev && (prev->vm_flags & VM_MPX))
923 		prev = prev->vm_prev;
924 	/*
925 	 * We know 'start' and 'end' lie within an area controlled
926 	 * by a single bounds table.  See if there are any other
927 	 * VMAs controlled by that bounds table.  If there are not
928 	 * then we can "expand" the are we are unmapping to possibly
929 	 * cover the entire table.
930 	 */
931 	next = find_vma_prev(mm, start, &prev);
932 	if ((!prev || prev->vm_end <= bta_start_vaddr) &&
933 	    (!next || next->vm_start >= bta_end_vaddr)) {
934 		/*
935 		 * No neighbor VMAs controlled by same bounds
936 		 * table.  Try to unmap the whole thing
937 		 */
938 		start = bta_start_vaddr;
939 		end = bta_end_vaddr;
940 	}
941 
942 	bde_vaddr = mm->bd_addr + mpx_get_bd_entry_offset(mm, start);
943 	ret = get_bt_addr(mm, bde_vaddr, &bt_addr);
944 	/*
945 	 * No bounds table there, so nothing to unmap.
946 	 */
947 	if (ret == -ENOENT) {
948 		ret = 0;
949 		return 0;
950 	}
951 	if (ret)
952 		return ret;
953 	/*
954 	 * We are unmapping an entire table.  Either because the
955 	 * unmap that started this whole process was large enough
956 	 * to cover an entire table, or that the unmap was small
957 	 * but was the area covered by a bounds table.
958 	 */
959 	if ((start == bta_start_vaddr) &&
960 	    (end == bta_end_vaddr))
961 		return unmap_entire_bt(mm, bde_vaddr, bt_addr);
962 	return zap_bt_entries_mapping(mm, bt_addr, start, end);
963 }
964 
mpx_unmap_tables(struct mm_struct * mm,unsigned long start,unsigned long end)965 static int mpx_unmap_tables(struct mm_struct *mm,
966 		unsigned long start, unsigned long end)
967 {
968 	unsigned long one_unmap_start;
969 	trace_mpx_unmap_search(start, end);
970 
971 	one_unmap_start = start;
972 	while (one_unmap_start < end) {
973 		int ret;
974 		unsigned long next_unmap_start = ALIGN(one_unmap_start+1,
975 						       bd_entry_virt_space(mm));
976 		unsigned long one_unmap_end = end;
977 		/*
978 		 * if the end is beyond the current bounds table,
979 		 * move it back so we only deal with a single one
980 		 * at a time
981 		 */
982 		if (one_unmap_end > next_unmap_start)
983 			one_unmap_end = next_unmap_start;
984 		ret = try_unmap_single_bt(mm, one_unmap_start, one_unmap_end);
985 		if (ret)
986 			return ret;
987 
988 		one_unmap_start = next_unmap_start;
989 	}
990 	return 0;
991 }
992 
993 /*
994  * Free unused bounds tables covered in a virtual address region being
995  * munmap()ed. Assume end > start.
996  *
997  * This function will be called by do_munmap(), and the VMAs covering
998  * the virtual address region start...end have already been split if
999  * necessary, and the 'vma' is the first vma in this range (start -> end).
1000  */
mpx_notify_unmap(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long start,unsigned long end)1001 void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
1002 		unsigned long start, unsigned long end)
1003 {
1004 	int ret;
1005 
1006 	/*
1007 	 * Refuse to do anything unless userspace has asked
1008 	 * the kernel to help manage the bounds tables,
1009 	 */
1010 	if (!kernel_managing_mpx_tables(current->mm))
1011 		return;
1012 	/*
1013 	 * This will look across the entire 'start -> end' range,
1014 	 * and find all of the non-VM_MPX VMAs.
1015 	 *
1016 	 * To avoid recursion, if a VM_MPX vma is found in the range
1017 	 * (start->end), we will not continue follow-up work. This
1018 	 * recursion represents having bounds tables for bounds tables,
1019 	 * which should not occur normally. Being strict about it here
1020 	 * helps ensure that we do not have an exploitable stack overflow.
1021 	 */
1022 	do {
1023 		if (vma->vm_flags & VM_MPX)
1024 			return;
1025 		vma = vma->vm_next;
1026 	} while (vma && vma->vm_start < end);
1027 
1028 	ret = mpx_unmap_tables(mm, start, end);
1029 	if (ret)
1030 		force_sig(SIGSEGV, current);
1031 }
1032