• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Based on arch/arm/kernel/ptrace.c
4  *
5  * By Ross Biro 1/23/92
6  * edited by Linus Torvalds
7  * ARM modifications Copyright (C) 2000 Russell King
8  * Copyright (C) 2012 ARM Ltd.
9  */
10 
11 #include <linux/audit.h>
12 #include <linux/compat.h>
13 #include <linux/kernel.h>
14 #include <linux/sched/signal.h>
15 #include <linux/sched/task_stack.h>
16 #include <linux/mm.h>
17 #include <linux/nospec.h>
18 #include <linux/smp.h>
19 #include <linux/ptrace.h>
20 #include <linux/user.h>
21 #include <linux/seccomp.h>
22 #include <linux/security.h>
23 #include <linux/init.h>
24 #include <linux/signal.h>
25 #include <linux/string.h>
26 #include <linux/uaccess.h>
27 #include <linux/perf_event.h>
28 #include <linux/hw_breakpoint.h>
29 #include <linux/regset.h>
30 #include <linux/elf.h>
31 #include <linux/rseq.h>
32 
33 #include <asm/compat.h>
34 #include <asm/cpufeature.h>
35 #include <asm/debug-monitors.h>
36 #include <asm/fpsimd.h>
37 #include <asm/mte.h>
38 #include <asm/pointer_auth.h>
39 #include <asm/stacktrace.h>
40 #include <asm/syscall.h>
41 #include <asm/traps.h>
42 #include <asm/system_misc.h>
43 
44 #define CREATE_TRACE_POINTS
45 #include <trace/events/syscalls.h>
46 
47 EXPORT_TRACEPOINT_SYMBOL_GPL(sys_exit);
48 
49 struct pt_regs_offset {
50 	const char *name;
51 	int offset;
52 };
53 
54 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
55 #define REG_OFFSET_END {.name = NULL, .offset = 0}
56 #define GPR_OFFSET_NAME(r) \
57 	{.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])}
58 
59 static const struct pt_regs_offset regoffset_table[] = {
60 	GPR_OFFSET_NAME(0),
61 	GPR_OFFSET_NAME(1),
62 	GPR_OFFSET_NAME(2),
63 	GPR_OFFSET_NAME(3),
64 	GPR_OFFSET_NAME(4),
65 	GPR_OFFSET_NAME(5),
66 	GPR_OFFSET_NAME(6),
67 	GPR_OFFSET_NAME(7),
68 	GPR_OFFSET_NAME(8),
69 	GPR_OFFSET_NAME(9),
70 	GPR_OFFSET_NAME(10),
71 	GPR_OFFSET_NAME(11),
72 	GPR_OFFSET_NAME(12),
73 	GPR_OFFSET_NAME(13),
74 	GPR_OFFSET_NAME(14),
75 	GPR_OFFSET_NAME(15),
76 	GPR_OFFSET_NAME(16),
77 	GPR_OFFSET_NAME(17),
78 	GPR_OFFSET_NAME(18),
79 	GPR_OFFSET_NAME(19),
80 	GPR_OFFSET_NAME(20),
81 	GPR_OFFSET_NAME(21),
82 	GPR_OFFSET_NAME(22),
83 	GPR_OFFSET_NAME(23),
84 	GPR_OFFSET_NAME(24),
85 	GPR_OFFSET_NAME(25),
86 	GPR_OFFSET_NAME(26),
87 	GPR_OFFSET_NAME(27),
88 	GPR_OFFSET_NAME(28),
89 	GPR_OFFSET_NAME(29),
90 	GPR_OFFSET_NAME(30),
91 	{.name = "lr", .offset = offsetof(struct pt_regs, regs[30])},
92 	REG_OFFSET_NAME(sp),
93 	REG_OFFSET_NAME(pc),
94 	REG_OFFSET_NAME(pstate),
95 	REG_OFFSET_END,
96 };
97 
98 /**
99  * regs_query_register_offset() - query register offset from its name
100  * @name:	the name of a register
101  *
102  * regs_query_register_offset() returns the offset of a register in struct
103  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
104  */
regs_query_register_offset(const char * name)105 int regs_query_register_offset(const char *name)
106 {
107 	const struct pt_regs_offset *roff;
108 
109 	for (roff = regoffset_table; roff->name != NULL; roff++)
110 		if (!strcmp(roff->name, name))
111 			return roff->offset;
112 	return -EINVAL;
113 }
114 
115 /**
116  * regs_within_kernel_stack() - check the address in the stack
117  * @regs:      pt_regs which contains kernel stack pointer.
118  * @addr:      address which is checked.
119  *
120  * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
121  * If @addr is within the kernel stack, it returns true. If not, returns false.
122  */
regs_within_kernel_stack(struct pt_regs * regs,unsigned long addr)123 static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
124 {
125 	return ((addr & ~(THREAD_SIZE - 1))  ==
126 		(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
127 		on_irq_stack(addr, sizeof(unsigned long));
128 }
129 
130 /**
131  * regs_get_kernel_stack_nth() - get Nth entry of the stack
132  * @regs:	pt_regs which contains kernel stack pointer.
133  * @n:		stack entry number.
134  *
135  * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
136  * is specified by @regs. If the @n th entry is NOT in the kernel stack,
137  * this returns 0.
138  */
regs_get_kernel_stack_nth(struct pt_regs * regs,unsigned int n)139 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
140 {
141 	unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
142 
143 	addr += n;
144 	if (regs_within_kernel_stack(regs, (unsigned long)addr))
145 		return READ_ONCE_NOCHECK(*addr);
146 	else
147 		return 0;
148 }
149 
150 /*
151  * TODO: does not yet catch signals sent when the child dies.
152  * in exit.c or in signal.c.
153  */
154 
155 /*
156  * Called by kernel/ptrace.c when detaching..
157  */
ptrace_disable(struct task_struct * child)158 void ptrace_disable(struct task_struct *child)
159 {
160 	/*
161 	 * This would be better off in core code, but PTRACE_DETACH has
162 	 * grown its fair share of arch-specific worts and changing it
163 	 * is likely to cause regressions on obscure architectures.
164 	 */
165 	user_disable_single_step(child);
166 }
167 
168 #ifdef CONFIG_HAVE_HW_BREAKPOINT
169 /*
170  * Handle hitting a HW-breakpoint.
171  */
ptrace_hbptriggered(struct perf_event * bp,struct perf_sample_data * data,struct pt_regs * regs)172 static void ptrace_hbptriggered(struct perf_event *bp,
173 				struct perf_sample_data *data,
174 				struct pt_regs *regs)
175 {
176 	struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
177 	const char *desc = "Hardware breakpoint trap (ptrace)";
178 
179 	if (is_compat_task()) {
180 		int si_errno = 0;
181 		int i;
182 
183 		for (i = 0; i < ARM_MAX_BRP; ++i) {
184 			if (current->thread.debug.hbp_break[i] == bp) {
185 				si_errno = (i << 1) + 1;
186 				break;
187 			}
188 		}
189 
190 		for (i = 0; i < ARM_MAX_WRP; ++i) {
191 			if (current->thread.debug.hbp_watch[i] == bp) {
192 				si_errno = -((i << 1) + 1);
193 				break;
194 			}
195 		}
196 		arm64_force_sig_ptrace_errno_trap(si_errno, bkpt->trigger,
197 						  desc);
198 		return;
199 	}
200 
201 	arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT, bkpt->trigger, desc);
202 }
203 
204 /*
205  * Unregister breakpoints from this task and reset the pointers in
206  * the thread_struct.
207  */
flush_ptrace_hw_breakpoint(struct task_struct * tsk)208 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
209 {
210 	int i;
211 	struct thread_struct *t = &tsk->thread;
212 
213 	for (i = 0; i < ARM_MAX_BRP; i++) {
214 		if (t->debug.hbp_break[i]) {
215 			unregister_hw_breakpoint(t->debug.hbp_break[i]);
216 			t->debug.hbp_break[i] = NULL;
217 		}
218 	}
219 
220 	for (i = 0; i < ARM_MAX_WRP; i++) {
221 		if (t->debug.hbp_watch[i]) {
222 			unregister_hw_breakpoint(t->debug.hbp_watch[i]);
223 			t->debug.hbp_watch[i] = NULL;
224 		}
225 	}
226 }
227 
ptrace_hw_copy_thread(struct task_struct * tsk)228 void ptrace_hw_copy_thread(struct task_struct *tsk)
229 {
230 	memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
231 }
232 
ptrace_hbp_get_event(unsigned int note_type,struct task_struct * tsk,unsigned long idx)233 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
234 					       struct task_struct *tsk,
235 					       unsigned long idx)
236 {
237 	struct perf_event *bp = ERR_PTR(-EINVAL);
238 
239 	switch (note_type) {
240 	case NT_ARM_HW_BREAK:
241 		if (idx >= ARM_MAX_BRP)
242 			goto out;
243 		idx = array_index_nospec(idx, ARM_MAX_BRP);
244 		bp = tsk->thread.debug.hbp_break[idx];
245 		break;
246 	case NT_ARM_HW_WATCH:
247 		if (idx >= ARM_MAX_WRP)
248 			goto out;
249 		idx = array_index_nospec(idx, ARM_MAX_WRP);
250 		bp = tsk->thread.debug.hbp_watch[idx];
251 		break;
252 	}
253 
254 out:
255 	return bp;
256 }
257 
ptrace_hbp_set_event(unsigned int note_type,struct task_struct * tsk,unsigned long idx,struct perf_event * bp)258 static int ptrace_hbp_set_event(unsigned int note_type,
259 				struct task_struct *tsk,
260 				unsigned long idx,
261 				struct perf_event *bp)
262 {
263 	int err = -EINVAL;
264 
265 	switch (note_type) {
266 	case NT_ARM_HW_BREAK:
267 		if (idx >= ARM_MAX_BRP)
268 			goto out;
269 		idx = array_index_nospec(idx, ARM_MAX_BRP);
270 		tsk->thread.debug.hbp_break[idx] = bp;
271 		err = 0;
272 		break;
273 	case NT_ARM_HW_WATCH:
274 		if (idx >= ARM_MAX_WRP)
275 			goto out;
276 		idx = array_index_nospec(idx, ARM_MAX_WRP);
277 		tsk->thread.debug.hbp_watch[idx] = bp;
278 		err = 0;
279 		break;
280 	}
281 
282 out:
283 	return err;
284 }
285 
ptrace_hbp_create(unsigned int note_type,struct task_struct * tsk,unsigned long idx)286 static struct perf_event *ptrace_hbp_create(unsigned int note_type,
287 					    struct task_struct *tsk,
288 					    unsigned long idx)
289 {
290 	struct perf_event *bp;
291 	struct perf_event_attr attr;
292 	int err, type;
293 
294 	switch (note_type) {
295 	case NT_ARM_HW_BREAK:
296 		type = HW_BREAKPOINT_X;
297 		break;
298 	case NT_ARM_HW_WATCH:
299 		type = HW_BREAKPOINT_RW;
300 		break;
301 	default:
302 		return ERR_PTR(-EINVAL);
303 	}
304 
305 	ptrace_breakpoint_init(&attr);
306 
307 	/*
308 	 * Initialise fields to sane defaults
309 	 * (i.e. values that will pass validation).
310 	 */
311 	attr.bp_addr	= 0;
312 	attr.bp_len	= HW_BREAKPOINT_LEN_4;
313 	attr.bp_type	= type;
314 	attr.disabled	= 1;
315 
316 	bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
317 	if (IS_ERR(bp))
318 		return bp;
319 
320 	err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
321 	if (err)
322 		return ERR_PTR(err);
323 
324 	return bp;
325 }
326 
ptrace_hbp_fill_attr_ctrl(unsigned int note_type,struct arch_hw_breakpoint_ctrl ctrl,struct perf_event_attr * attr)327 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
328 				     struct arch_hw_breakpoint_ctrl ctrl,
329 				     struct perf_event_attr *attr)
330 {
331 	int err, len, type, offset, disabled = !ctrl.enabled;
332 
333 	attr->disabled = disabled;
334 	if (disabled)
335 		return 0;
336 
337 	err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
338 	if (err)
339 		return err;
340 
341 	switch (note_type) {
342 	case NT_ARM_HW_BREAK:
343 		if ((type & HW_BREAKPOINT_X) != type)
344 			return -EINVAL;
345 		break;
346 	case NT_ARM_HW_WATCH:
347 		if ((type & HW_BREAKPOINT_RW) != type)
348 			return -EINVAL;
349 		break;
350 	default:
351 		return -EINVAL;
352 	}
353 
354 	attr->bp_len	= len;
355 	attr->bp_type	= type;
356 	attr->bp_addr	+= offset;
357 
358 	return 0;
359 }
360 
ptrace_hbp_get_resource_info(unsigned int note_type,u32 * info)361 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
362 {
363 	u8 num;
364 	u32 reg = 0;
365 
366 	switch (note_type) {
367 	case NT_ARM_HW_BREAK:
368 		num = hw_breakpoint_slots(TYPE_INST);
369 		break;
370 	case NT_ARM_HW_WATCH:
371 		num = hw_breakpoint_slots(TYPE_DATA);
372 		break;
373 	default:
374 		return -EINVAL;
375 	}
376 
377 	reg |= debug_monitors_arch();
378 	reg <<= 8;
379 	reg |= num;
380 
381 	*info = reg;
382 	return 0;
383 }
384 
ptrace_hbp_get_ctrl(unsigned int note_type,struct task_struct * tsk,unsigned long idx,u32 * ctrl)385 static int ptrace_hbp_get_ctrl(unsigned int note_type,
386 			       struct task_struct *tsk,
387 			       unsigned long idx,
388 			       u32 *ctrl)
389 {
390 	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
391 
392 	if (IS_ERR(bp))
393 		return PTR_ERR(bp);
394 
395 	*ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
396 	return 0;
397 }
398 
ptrace_hbp_get_addr(unsigned int note_type,struct task_struct * tsk,unsigned long idx,u64 * addr)399 static int ptrace_hbp_get_addr(unsigned int note_type,
400 			       struct task_struct *tsk,
401 			       unsigned long idx,
402 			       u64 *addr)
403 {
404 	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
405 
406 	if (IS_ERR(bp))
407 		return PTR_ERR(bp);
408 
409 	*addr = bp ? counter_arch_bp(bp)->address : 0;
410 	return 0;
411 }
412 
ptrace_hbp_get_initialised_bp(unsigned int note_type,struct task_struct * tsk,unsigned long idx)413 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
414 							struct task_struct *tsk,
415 							unsigned long idx)
416 {
417 	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
418 
419 	if (!bp)
420 		bp = ptrace_hbp_create(note_type, tsk, idx);
421 
422 	return bp;
423 }
424 
ptrace_hbp_set_ctrl(unsigned int note_type,struct task_struct * tsk,unsigned long idx,u32 uctrl)425 static int ptrace_hbp_set_ctrl(unsigned int note_type,
426 			       struct task_struct *tsk,
427 			       unsigned long idx,
428 			       u32 uctrl)
429 {
430 	int err;
431 	struct perf_event *bp;
432 	struct perf_event_attr attr;
433 	struct arch_hw_breakpoint_ctrl ctrl;
434 
435 	bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
436 	if (IS_ERR(bp)) {
437 		err = PTR_ERR(bp);
438 		return err;
439 	}
440 
441 	attr = bp->attr;
442 	decode_ctrl_reg(uctrl, &ctrl);
443 	err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
444 	if (err)
445 		return err;
446 
447 	return modify_user_hw_breakpoint(bp, &attr);
448 }
449 
ptrace_hbp_set_addr(unsigned int note_type,struct task_struct * tsk,unsigned long idx,u64 addr)450 static int ptrace_hbp_set_addr(unsigned int note_type,
451 			       struct task_struct *tsk,
452 			       unsigned long idx,
453 			       u64 addr)
454 {
455 	int err;
456 	struct perf_event *bp;
457 	struct perf_event_attr attr;
458 
459 	bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
460 	if (IS_ERR(bp)) {
461 		err = PTR_ERR(bp);
462 		return err;
463 	}
464 
465 	attr = bp->attr;
466 	attr.bp_addr = addr;
467 	err = modify_user_hw_breakpoint(bp, &attr);
468 	return err;
469 }
470 
471 #define PTRACE_HBP_ADDR_SZ	sizeof(u64)
472 #define PTRACE_HBP_CTRL_SZ	sizeof(u32)
473 #define PTRACE_HBP_PAD_SZ	sizeof(u32)
474 
hw_break_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)475 static int hw_break_get(struct task_struct *target,
476 			const struct user_regset *regset,
477 			struct membuf to)
478 {
479 	unsigned int note_type = regset->core_note_type;
480 	int ret, idx = 0;
481 	u32 info, ctrl;
482 	u64 addr;
483 
484 	/* Resource info */
485 	ret = ptrace_hbp_get_resource_info(note_type, &info);
486 	if (ret)
487 		return ret;
488 
489 	membuf_write(&to, &info, sizeof(info));
490 	membuf_zero(&to, sizeof(u32));
491 	/* (address, ctrl) registers */
492 	while (to.left) {
493 		ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
494 		if (ret)
495 			return ret;
496 		ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
497 		if (ret)
498 			return ret;
499 		membuf_store(&to, addr);
500 		membuf_store(&to, ctrl);
501 		membuf_zero(&to, sizeof(u32));
502 		idx++;
503 	}
504 	return 0;
505 }
506 
hw_break_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)507 static int hw_break_set(struct task_struct *target,
508 			const struct user_regset *regset,
509 			unsigned int pos, unsigned int count,
510 			const void *kbuf, const void __user *ubuf)
511 {
512 	unsigned int note_type = regset->core_note_type;
513 	int ret, idx = 0, offset, limit;
514 	u32 ctrl;
515 	u64 addr;
516 
517 	/* Resource info and pad */
518 	offset = offsetof(struct user_hwdebug_state, dbg_regs);
519 	user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
520 
521 	/* (address, ctrl) registers */
522 	limit = regset->n * regset->size;
523 	while (count && offset < limit) {
524 		if (count < PTRACE_HBP_ADDR_SZ)
525 			return -EINVAL;
526 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
527 					 offset, offset + PTRACE_HBP_ADDR_SZ);
528 		if (ret)
529 			return ret;
530 		ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
531 		if (ret)
532 			return ret;
533 		offset += PTRACE_HBP_ADDR_SZ;
534 
535 		if (!count)
536 			break;
537 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
538 					 offset, offset + PTRACE_HBP_CTRL_SZ);
539 		if (ret)
540 			return ret;
541 		ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
542 		if (ret)
543 			return ret;
544 		offset += PTRACE_HBP_CTRL_SZ;
545 
546 		user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
547 					  offset, offset + PTRACE_HBP_PAD_SZ);
548 		offset += PTRACE_HBP_PAD_SZ;
549 		idx++;
550 	}
551 
552 	return 0;
553 }
554 #endif	/* CONFIG_HAVE_HW_BREAKPOINT */
555 
gpr_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)556 static int gpr_get(struct task_struct *target,
557 		   const struct user_regset *regset,
558 		   struct membuf to)
559 {
560 	struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
561 	return membuf_write(&to, uregs, sizeof(*uregs));
562 }
563 
gpr_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)564 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
565 		   unsigned int pos, unsigned int count,
566 		   const void *kbuf, const void __user *ubuf)
567 {
568 	int ret;
569 	struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
570 
571 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
572 	if (ret)
573 		return ret;
574 
575 	if (!valid_user_regs(&newregs, target))
576 		return -EINVAL;
577 
578 	task_pt_regs(target)->user_regs = newregs;
579 	return 0;
580 }
581 
fpr_active(struct task_struct * target,const struct user_regset * regset)582 static int fpr_active(struct task_struct *target, const struct user_regset *regset)
583 {
584 	if (!system_supports_fpsimd())
585 		return -ENODEV;
586 	return regset->n;
587 }
588 
589 /*
590  * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
591  */
__fpr_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)592 static int __fpr_get(struct task_struct *target,
593 		     const struct user_regset *regset,
594 		     struct membuf to)
595 {
596 	struct user_fpsimd_state *uregs;
597 
598 	fpsimd_sync_from_effective_state(target);
599 
600 	uregs = &target->thread.uw.fpsimd_state;
601 
602 	return membuf_write(&to, uregs, sizeof(*uregs));
603 }
604 
fpr_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)605 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
606 		   struct membuf to)
607 {
608 	if (!system_supports_fpsimd())
609 		return -EINVAL;
610 
611 	if (target == current)
612 		fpsimd_preserve_current_state();
613 
614 	return __fpr_get(target, regset, to);
615 }
616 
__fpr_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf,unsigned int start_pos)617 static int __fpr_set(struct task_struct *target,
618 		     const struct user_regset *regset,
619 		     unsigned int pos, unsigned int count,
620 		     const void *kbuf, const void __user *ubuf,
621 		     unsigned int start_pos)
622 {
623 	int ret;
624 	struct user_fpsimd_state newstate;
625 
626 	/*
627 	 * Ensure target->thread.uw.fpsimd_state is up to date, so that a
628 	 * short copyin can't resurrect stale data.
629 	 */
630 	fpsimd_sync_from_effective_state(target);
631 
632 	newstate = target->thread.uw.fpsimd_state;
633 
634 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate,
635 				 start_pos, start_pos + sizeof(newstate));
636 	if (ret)
637 		return ret;
638 
639 	target->thread.uw.fpsimd_state = newstate;
640 
641 	return ret;
642 }
643 
fpr_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)644 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
645 		   unsigned int pos, unsigned int count,
646 		   const void *kbuf, const void __user *ubuf)
647 {
648 	int ret;
649 
650 	if (!system_supports_fpsimd())
651 		return -EINVAL;
652 
653 	ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
654 	if (ret)
655 		return ret;
656 
657 	fpsimd_sync_to_effective_state_zeropad(target);
658 	fpsimd_flush_task_state(target);
659 
660 	return ret;
661 }
662 
tls_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)663 static int tls_get(struct task_struct *target, const struct user_regset *regset,
664 		   struct membuf to)
665 {
666 	int ret;
667 
668 	if (target == current)
669 		tls_preserve_current_state();
670 
671 	ret = membuf_store(&to, target->thread.uw.tp_value);
672 	if (system_supports_tpidr2())
673 		ret = membuf_store(&to, target->thread.tpidr2_el0);
674 	else
675 		ret = membuf_zero(&to, sizeof(u64));
676 
677 	return ret;
678 }
679 
tls_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)680 static int tls_set(struct task_struct *target, const struct user_regset *regset,
681 		   unsigned int pos, unsigned int count,
682 		   const void *kbuf, const void __user *ubuf)
683 {
684 	int ret;
685 	unsigned long tls[2];
686 
687 	tls[0] = target->thread.uw.tp_value;
688 	if (system_supports_tpidr2())
689 		tls[1] = target->thread.tpidr2_el0;
690 
691 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, tls, 0, count);
692 	if (ret)
693 		return ret;
694 
695 	target->thread.uw.tp_value = tls[0];
696 	if (system_supports_tpidr2())
697 		target->thread.tpidr2_el0 = tls[1];
698 
699 	return ret;
700 }
701 
fpmr_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)702 static int fpmr_get(struct task_struct *target, const struct user_regset *regset,
703 		   struct membuf to)
704 {
705 	if (!system_supports_fpmr())
706 		return -EINVAL;
707 
708 	if (target == current)
709 		fpsimd_preserve_current_state();
710 
711 	return membuf_store(&to, target->thread.uw.fpmr);
712 }
713 
fpmr_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)714 static int fpmr_set(struct task_struct *target, const struct user_regset *regset,
715 		   unsigned int pos, unsigned int count,
716 		   const void *kbuf, const void __user *ubuf)
717 {
718 	int ret;
719 	unsigned long fpmr;
720 
721 	if (!system_supports_fpmr())
722 		return -EINVAL;
723 
724 	fpmr = target->thread.uw.fpmr;
725 
726 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpmr, 0, count);
727 	if (ret)
728 		return ret;
729 
730 	target->thread.uw.fpmr = fpmr;
731 
732 	fpsimd_flush_task_state(target);
733 
734 	return 0;
735 }
736 
system_call_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)737 static int system_call_get(struct task_struct *target,
738 			   const struct user_regset *regset,
739 			   struct membuf to)
740 {
741 	return membuf_store(&to, task_pt_regs(target)->syscallno);
742 }
743 
system_call_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)744 static int system_call_set(struct task_struct *target,
745 			   const struct user_regset *regset,
746 			   unsigned int pos, unsigned int count,
747 			   const void *kbuf, const void __user *ubuf)
748 {
749 	int syscallno = task_pt_regs(target)->syscallno;
750 	int ret;
751 
752 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
753 	if (ret)
754 		return ret;
755 
756 	task_pt_regs(target)->syscallno = syscallno;
757 	return ret;
758 }
759 
760 #ifdef CONFIG_ARM64_SVE
761 
sve_init_header_from_task(struct user_sve_header * header,struct task_struct * target,enum vec_type type)762 static void sve_init_header_from_task(struct user_sve_header *header,
763 				      struct task_struct *target,
764 				      enum vec_type type)
765 {
766 	unsigned int vq;
767 	bool active;
768 	enum vec_type task_type;
769 
770 	memset(header, 0, sizeof(*header));
771 
772 	/* Check if the requested registers are active for the task */
773 	if (thread_sm_enabled(&target->thread))
774 		task_type = ARM64_VEC_SME;
775 	else
776 		task_type = ARM64_VEC_SVE;
777 	active = (task_type == type);
778 
779 	if (active && target->thread.fp_type == FP_STATE_SVE)
780 		header->flags = SVE_PT_REGS_SVE;
781 	else
782 		header->flags = SVE_PT_REGS_FPSIMD;
783 
784 	switch (type) {
785 	case ARM64_VEC_SVE:
786 		if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
787 			header->flags |= SVE_PT_VL_INHERIT;
788 		break;
789 	case ARM64_VEC_SME:
790 		if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT))
791 			header->flags |= SVE_PT_VL_INHERIT;
792 		break;
793 	default:
794 		WARN_ON_ONCE(1);
795 		return;
796 	}
797 
798 	header->vl = task_get_vl(target, type);
799 	vq = sve_vq_from_vl(header->vl);
800 
801 	header->max_vl = vec_max_vl(type);
802 	if (active)
803 		header->size = SVE_PT_SIZE(vq, header->flags);
804 	else
805 		header->size = sizeof(header);
806 	header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
807 				      SVE_PT_REGS_SVE);
808 }
809 
sve_size_from_header(struct user_sve_header const * header)810 static unsigned int sve_size_from_header(struct user_sve_header const *header)
811 {
812 	return ALIGN(header->size, SVE_VQ_BYTES);
813 }
814 
sve_get_common(struct task_struct * target,const struct user_regset * regset,struct membuf to,enum vec_type type)815 static int sve_get_common(struct task_struct *target,
816 			  const struct user_regset *regset,
817 			  struct membuf to,
818 			  enum vec_type type)
819 {
820 	struct user_sve_header header;
821 	unsigned int vq;
822 	unsigned long start, end;
823 
824 	if (target == current)
825 		fpsimd_preserve_current_state();
826 
827 	/* Header */
828 	sve_init_header_from_task(&header, target, type);
829 	vq = sve_vq_from_vl(header.vl);
830 
831 	membuf_write(&to, &header, sizeof(header));
832 
833 	BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
834 	BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
835 
836 	/*
837 	 * When the requested vector type is not active, do not present data
838 	 * from the other mode to userspace.
839 	 */
840 	if (header.size == sizeof(header))
841 		return 0;
842 
843 	switch ((header.flags & SVE_PT_REGS_MASK)) {
844 	case SVE_PT_REGS_FPSIMD:
845 		return __fpr_get(target, regset, to);
846 
847 	case SVE_PT_REGS_SVE:
848 		start = SVE_PT_SVE_OFFSET;
849 		end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
850 		membuf_write(&to, target->thread.sve_state, end - start);
851 
852 		start = end;
853 		end = SVE_PT_SVE_FPSR_OFFSET(vq);
854 		membuf_zero(&to, end - start);
855 
856 		/*
857 		 * Copy fpsr, and fpcr which must follow contiguously in
858 		 * struct fpsimd_state:
859 		 */
860 		start = end;
861 		end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
862 		membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr,
863 			     end - start);
864 
865 		start = end;
866 		end = sve_size_from_header(&header);
867 		return membuf_zero(&to, end - start);
868 
869 	default:
870 		BUILD_BUG();
871 	}
872 }
873 
sve_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)874 static int sve_get(struct task_struct *target,
875 		   const struct user_regset *regset,
876 		   struct membuf to)
877 {
878 	if (!system_supports_sve())
879 		return -EINVAL;
880 
881 	return sve_get_common(target, regset, to, ARM64_VEC_SVE);
882 }
883 
sve_set_common(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf,enum vec_type type)884 static int sve_set_common(struct task_struct *target,
885 			  const struct user_regset *regset,
886 			  unsigned int pos, unsigned int count,
887 			  const void *kbuf, const void __user *ubuf,
888 			  enum vec_type type)
889 {
890 	int ret;
891 	struct user_sve_header header;
892 	unsigned int vq;
893 	unsigned long start, end;
894 	bool fpsimd;
895 
896 	fpsimd_flush_task_state(target);
897 
898 	/* Header */
899 	if (count < sizeof(header))
900 		return -EINVAL;
901 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
902 				 0, sizeof(header));
903 	if (ret)
904 		return ret;
905 
906 	/*
907 	 * Streaming SVE data is always stored and presented in SVE format.
908 	 * Require the user to provide SVE formatted data for consistency, and
909 	 * to avoid the risk that we configure the task into an invalid state.
910 	 */
911 	fpsimd = (header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD;
912 	if (fpsimd && type == ARM64_VEC_SME)
913 		return -EINVAL;
914 
915 	/*
916 	 * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by
917 	 * vec_set_vector_length(), which will also validate them for us:
918 	 */
919 	ret = vec_set_vector_length(target, type, header.vl,
920 		((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16);
921 	if (ret)
922 		return ret;
923 
924 	/* Allocate SME storage if necessary, preserving any existing ZA/ZT state */
925 	if (type == ARM64_VEC_SME) {
926 		sme_alloc(target, false);
927 		if (!target->thread.sme_state)
928 			return -ENOMEM;
929 	}
930 
931 	/* Allocate SVE storage if necessary, zeroing any existing SVE state */
932 	if (!fpsimd) {
933 		sve_alloc(target, true);
934 		if (!target->thread.sve_state)
935 			return -ENOMEM;
936 	}
937 
938 	/* Actual VL set may be less than the user asked for: */
939 	vq = sve_vq_from_vl(task_get_vl(target, type));
940 
941 	/* Enter/exit streaming mode */
942 	if (system_supports_sme()) {
943 		switch (type) {
944 		case ARM64_VEC_SVE:
945 			target->thread.svcr &= ~SVCR_SM_MASK;
946 			set_tsk_thread_flag(target, TIF_SVE);
947 			break;
948 		case ARM64_VEC_SME:
949 			target->thread.svcr |= SVCR_SM_MASK;
950 			set_tsk_thread_flag(target, TIF_SME);
951 			break;
952 		default:
953 			WARN_ON_ONCE(1);
954 			return -EINVAL;
955 		}
956 	}
957 
958 	/* Always zero V regs, FPSR, and FPCR */
959 	memset(&current->thread.uw.fpsimd_state, 0,
960 	       sizeof(current->thread.uw.fpsimd_state));
961 
962 	/* Registers: FPSIMD-only case */
963 
964 	BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
965 	if (fpsimd) {
966 		clear_tsk_thread_flag(target, TIF_SVE);
967 		target->thread.fp_type = FP_STATE_FPSIMD;
968 		ret = __fpr_set(target, regset, pos, count, kbuf, ubuf,
969 				SVE_PT_FPSIMD_OFFSET);
970 		return ret;
971 	}
972 
973 	/* Otherwise: no registers or full SVE case. */
974 
975 	target->thread.fp_type = FP_STATE_SVE;
976 
977 	/*
978 	 * If setting a different VL from the requested VL and there is
979 	 * register data, the data layout will be wrong: don't even
980 	 * try to set the registers in this case.
981 	 */
982 	if (count && vq != sve_vq_from_vl(header.vl))
983 		return -EIO;
984 
985 	BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
986 	start = SVE_PT_SVE_OFFSET;
987 	end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
988 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
989 				 target->thread.sve_state,
990 				 start, end);
991 	if (ret)
992 		return ret;
993 
994 	start = end;
995 	end = SVE_PT_SVE_FPSR_OFFSET(vq);
996 	user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, start, end);
997 
998 	/*
999 	 * Copy fpsr, and fpcr which must follow contiguously in
1000 	 * struct fpsimd_state:
1001 	 */
1002 	start = end;
1003 	end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
1004 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1005 				 &target->thread.uw.fpsimd_state.fpsr,
1006 				 start, end);
1007 
1008 	return ret;
1009 }
1010 
sve_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1011 static int sve_set(struct task_struct *target,
1012 		   const struct user_regset *regset,
1013 		   unsigned int pos, unsigned int count,
1014 		   const void *kbuf, const void __user *ubuf)
1015 {
1016 	if (!system_supports_sve())
1017 		return -EINVAL;
1018 
1019 	return sve_set_common(target, regset, pos, count, kbuf, ubuf,
1020 			      ARM64_VEC_SVE);
1021 }
1022 
1023 #endif /* CONFIG_ARM64_SVE */
1024 
1025 #ifdef CONFIG_ARM64_SME
1026 
ssve_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1027 static int ssve_get(struct task_struct *target,
1028 		   const struct user_regset *regset,
1029 		   struct membuf to)
1030 {
1031 	if (!system_supports_sme())
1032 		return -EINVAL;
1033 
1034 	return sve_get_common(target, regset, to, ARM64_VEC_SME);
1035 }
1036 
ssve_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1037 static int ssve_set(struct task_struct *target,
1038 		    const struct user_regset *regset,
1039 		    unsigned int pos, unsigned int count,
1040 		    const void *kbuf, const void __user *ubuf)
1041 {
1042 	if (!system_supports_sme())
1043 		return -EINVAL;
1044 
1045 	return sve_set_common(target, regset, pos, count, kbuf, ubuf,
1046 			      ARM64_VEC_SME);
1047 }
1048 
za_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1049 static int za_get(struct task_struct *target,
1050 		  const struct user_regset *regset,
1051 		  struct membuf to)
1052 {
1053 	struct user_za_header header;
1054 	unsigned int vq;
1055 	unsigned long start, end;
1056 
1057 	if (!system_supports_sme())
1058 		return -EINVAL;
1059 
1060 	/* Header */
1061 	memset(&header, 0, sizeof(header));
1062 
1063 	if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT))
1064 		header.flags |= ZA_PT_VL_INHERIT;
1065 
1066 	header.vl = task_get_sme_vl(target);
1067 	vq = sve_vq_from_vl(header.vl);
1068 	header.max_vl = sme_max_vl();
1069 	header.max_size = ZA_PT_SIZE(vq);
1070 
1071 	/* If ZA is not active there is only the header */
1072 	if (thread_za_enabled(&target->thread))
1073 		header.size = ZA_PT_SIZE(vq);
1074 	else
1075 		header.size = ZA_PT_ZA_OFFSET;
1076 
1077 	membuf_write(&to, &header, sizeof(header));
1078 
1079 	BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header));
1080 	end = ZA_PT_ZA_OFFSET;
1081 
1082 	if (target == current)
1083 		fpsimd_preserve_current_state();
1084 
1085 	/* Any register data to include? */
1086 	if (thread_za_enabled(&target->thread)) {
1087 		start = end;
1088 		end = ZA_PT_SIZE(vq);
1089 		membuf_write(&to, target->thread.sme_state, end - start);
1090 	}
1091 
1092 	/* Zero any trailing padding */
1093 	start = end;
1094 	end = ALIGN(header.size, SVE_VQ_BYTES);
1095 	return membuf_zero(&to, end - start);
1096 }
1097 
za_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1098 static int za_set(struct task_struct *target,
1099 		  const struct user_regset *regset,
1100 		  unsigned int pos, unsigned int count,
1101 		  const void *kbuf, const void __user *ubuf)
1102 {
1103 	int ret;
1104 	struct user_za_header header;
1105 	unsigned int vq;
1106 	unsigned long start, end;
1107 
1108 	if (!system_supports_sme())
1109 		return -EINVAL;
1110 
1111 	/* Header */
1112 	if (count < sizeof(header))
1113 		return -EINVAL;
1114 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
1115 				 0, sizeof(header));
1116 	if (ret)
1117 		goto out;
1118 
1119 	/*
1120 	 * All current ZA_PT_* flags are consumed by
1121 	 * vec_set_vector_length(), which will also validate them for
1122 	 * us:
1123 	 */
1124 	ret = vec_set_vector_length(target, ARM64_VEC_SME, header.vl,
1125 		((unsigned long)header.flags) << 16);
1126 	if (ret)
1127 		goto out;
1128 
1129 	/* Actual VL set may be less than the user asked for: */
1130 	vq = sve_vq_from_vl(task_get_sme_vl(target));
1131 
1132 	/* Ensure there is some SVE storage for streaming mode */
1133 	if (!target->thread.sve_state) {
1134 		sve_alloc(target, false);
1135 		if (!target->thread.sve_state) {
1136 			ret = -ENOMEM;
1137 			goto out;
1138 		}
1139 	}
1140 
1141 	/*
1142 	 * Only flush the storage if PSTATE.ZA was not already set,
1143 	 * otherwise preserve any existing data.
1144 	 */
1145 	sme_alloc(target, !thread_za_enabled(&target->thread));
1146 	if (!target->thread.sme_state)
1147 		return -ENOMEM;
1148 
1149 	/* If there is no data then disable ZA */
1150 	if (!count) {
1151 		target->thread.svcr &= ~SVCR_ZA_MASK;
1152 		goto out;
1153 	}
1154 
1155 	/*
1156 	 * If setting a different VL from the requested VL and there is
1157 	 * register data, the data layout will be wrong: don't even
1158 	 * try to set the registers in this case.
1159 	 */
1160 	if (vq != sve_vq_from_vl(header.vl)) {
1161 		ret = -EIO;
1162 		goto out;
1163 	}
1164 
1165 	BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header));
1166 	start = ZA_PT_ZA_OFFSET;
1167 	end = ZA_PT_SIZE(vq);
1168 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1169 				 target->thread.sme_state,
1170 				 start, end);
1171 	if (ret)
1172 		goto out;
1173 
1174 	/* Mark ZA as active and let userspace use it */
1175 	set_tsk_thread_flag(target, TIF_SME);
1176 	target->thread.svcr |= SVCR_ZA_MASK;
1177 
1178 out:
1179 	fpsimd_flush_task_state(target);
1180 	return ret;
1181 }
1182 
zt_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1183 static int zt_get(struct task_struct *target,
1184 		  const struct user_regset *regset,
1185 		  struct membuf to)
1186 {
1187 	if (!system_supports_sme2())
1188 		return -EINVAL;
1189 
1190 	/*
1191 	 * If PSTATE.ZA is not set then ZT will be zeroed when it is
1192 	 * enabled so report the current register value as zero.
1193 	 */
1194 	if (thread_za_enabled(&target->thread))
1195 		membuf_write(&to, thread_zt_state(&target->thread),
1196 			     ZT_SIG_REG_BYTES);
1197 	else
1198 		membuf_zero(&to, ZT_SIG_REG_BYTES);
1199 
1200 	return 0;
1201 }
1202 
zt_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1203 static int zt_set(struct task_struct *target,
1204 		  const struct user_regset *regset,
1205 		  unsigned int pos, unsigned int count,
1206 		  const void *kbuf, const void __user *ubuf)
1207 {
1208 	int ret;
1209 
1210 	if (!system_supports_sme2())
1211 		return -EINVAL;
1212 
1213 	/* Ensure SVE storage in case this is first use of SME */
1214 	sve_alloc(target, false);
1215 	if (!target->thread.sve_state)
1216 		return -ENOMEM;
1217 
1218 	if (!thread_za_enabled(&target->thread)) {
1219 		sme_alloc(target, true);
1220 		if (!target->thread.sme_state)
1221 			return -ENOMEM;
1222 	}
1223 
1224 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1225 				 thread_zt_state(&target->thread),
1226 				 0, ZT_SIG_REG_BYTES);
1227 	if (ret == 0) {
1228 		target->thread.svcr |= SVCR_ZA_MASK;
1229 		set_tsk_thread_flag(target, TIF_SME);
1230 	}
1231 
1232 	fpsimd_flush_task_state(target);
1233 
1234 	return ret;
1235 }
1236 
1237 #endif /* CONFIG_ARM64_SME */
1238 
1239 #ifdef CONFIG_ARM64_PTR_AUTH
pac_mask_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1240 static int pac_mask_get(struct task_struct *target,
1241 			const struct user_regset *regset,
1242 			struct membuf to)
1243 {
1244 	/*
1245 	 * The PAC bits can differ across data and instruction pointers
1246 	 * depending on TCR_EL1.TBID*, which we may make use of in future, so
1247 	 * we expose separate masks.
1248 	 */
1249 	unsigned long mask = ptrauth_user_pac_mask();
1250 	struct user_pac_mask uregs = {
1251 		.data_mask = mask,
1252 		.insn_mask = mask,
1253 	};
1254 
1255 	if (!system_supports_address_auth())
1256 		return -EINVAL;
1257 
1258 	return membuf_write(&to, &uregs, sizeof(uregs));
1259 }
1260 
pac_enabled_keys_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1261 static int pac_enabled_keys_get(struct task_struct *target,
1262 				const struct user_regset *regset,
1263 				struct membuf to)
1264 {
1265 	long enabled_keys = ptrauth_get_enabled_keys(target);
1266 
1267 	if (IS_ERR_VALUE(enabled_keys))
1268 		return enabled_keys;
1269 
1270 	return membuf_write(&to, &enabled_keys, sizeof(enabled_keys));
1271 }
1272 
pac_enabled_keys_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1273 static int pac_enabled_keys_set(struct task_struct *target,
1274 				const struct user_regset *regset,
1275 				unsigned int pos, unsigned int count,
1276 				const void *kbuf, const void __user *ubuf)
1277 {
1278 	int ret;
1279 	long enabled_keys = ptrauth_get_enabled_keys(target);
1280 
1281 	if (IS_ERR_VALUE(enabled_keys))
1282 		return enabled_keys;
1283 
1284 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &enabled_keys, 0,
1285 				 sizeof(long));
1286 	if (ret)
1287 		return ret;
1288 
1289 	return ptrauth_set_enabled_keys(target, PR_PAC_ENABLED_KEYS_MASK,
1290 					enabled_keys);
1291 }
1292 
1293 #ifdef CONFIG_CHECKPOINT_RESTORE
pac_key_to_user(const struct ptrauth_key * key)1294 static __uint128_t pac_key_to_user(const struct ptrauth_key *key)
1295 {
1296 	return (__uint128_t)key->hi << 64 | key->lo;
1297 }
1298 
pac_key_from_user(__uint128_t ukey)1299 static struct ptrauth_key pac_key_from_user(__uint128_t ukey)
1300 {
1301 	struct ptrauth_key key = {
1302 		.lo = (unsigned long)ukey,
1303 		.hi = (unsigned long)(ukey >> 64),
1304 	};
1305 
1306 	return key;
1307 }
1308 
pac_address_keys_to_user(struct user_pac_address_keys * ukeys,const struct ptrauth_keys_user * keys)1309 static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys,
1310 				     const struct ptrauth_keys_user *keys)
1311 {
1312 	ukeys->apiakey = pac_key_to_user(&keys->apia);
1313 	ukeys->apibkey = pac_key_to_user(&keys->apib);
1314 	ukeys->apdakey = pac_key_to_user(&keys->apda);
1315 	ukeys->apdbkey = pac_key_to_user(&keys->apdb);
1316 }
1317 
pac_address_keys_from_user(struct ptrauth_keys_user * keys,const struct user_pac_address_keys * ukeys)1318 static void pac_address_keys_from_user(struct ptrauth_keys_user *keys,
1319 				       const struct user_pac_address_keys *ukeys)
1320 {
1321 	keys->apia = pac_key_from_user(ukeys->apiakey);
1322 	keys->apib = pac_key_from_user(ukeys->apibkey);
1323 	keys->apda = pac_key_from_user(ukeys->apdakey);
1324 	keys->apdb = pac_key_from_user(ukeys->apdbkey);
1325 }
1326 
pac_address_keys_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1327 static int pac_address_keys_get(struct task_struct *target,
1328 				const struct user_regset *regset,
1329 				struct membuf to)
1330 {
1331 	struct ptrauth_keys_user *keys = &target->thread.keys_user;
1332 	struct user_pac_address_keys user_keys;
1333 
1334 	if (!system_supports_address_auth())
1335 		return -EINVAL;
1336 
1337 	pac_address_keys_to_user(&user_keys, keys);
1338 
1339 	return membuf_write(&to, &user_keys, sizeof(user_keys));
1340 }
1341 
pac_address_keys_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1342 static int pac_address_keys_set(struct task_struct *target,
1343 				const struct user_regset *regset,
1344 				unsigned int pos, unsigned int count,
1345 				const void *kbuf, const void __user *ubuf)
1346 {
1347 	struct ptrauth_keys_user *keys = &target->thread.keys_user;
1348 	struct user_pac_address_keys user_keys;
1349 	int ret;
1350 
1351 	if (!system_supports_address_auth())
1352 		return -EINVAL;
1353 
1354 	pac_address_keys_to_user(&user_keys, keys);
1355 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1356 				 &user_keys, 0, -1);
1357 	if (ret)
1358 		return ret;
1359 	pac_address_keys_from_user(keys, &user_keys);
1360 
1361 	return 0;
1362 }
1363 
pac_generic_keys_to_user(struct user_pac_generic_keys * ukeys,const struct ptrauth_keys_user * keys)1364 static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys,
1365 				     const struct ptrauth_keys_user *keys)
1366 {
1367 	ukeys->apgakey = pac_key_to_user(&keys->apga);
1368 }
1369 
pac_generic_keys_from_user(struct ptrauth_keys_user * keys,const struct user_pac_generic_keys * ukeys)1370 static void pac_generic_keys_from_user(struct ptrauth_keys_user *keys,
1371 				       const struct user_pac_generic_keys *ukeys)
1372 {
1373 	keys->apga = pac_key_from_user(ukeys->apgakey);
1374 }
1375 
pac_generic_keys_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1376 static int pac_generic_keys_get(struct task_struct *target,
1377 				const struct user_regset *regset,
1378 				struct membuf to)
1379 {
1380 	struct ptrauth_keys_user *keys = &target->thread.keys_user;
1381 	struct user_pac_generic_keys user_keys;
1382 
1383 	if (!system_supports_generic_auth())
1384 		return -EINVAL;
1385 
1386 	pac_generic_keys_to_user(&user_keys, keys);
1387 
1388 	return membuf_write(&to, &user_keys, sizeof(user_keys));
1389 }
1390 
pac_generic_keys_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1391 static int pac_generic_keys_set(struct task_struct *target,
1392 				const struct user_regset *regset,
1393 				unsigned int pos, unsigned int count,
1394 				const void *kbuf, const void __user *ubuf)
1395 {
1396 	struct ptrauth_keys_user *keys = &target->thread.keys_user;
1397 	struct user_pac_generic_keys user_keys;
1398 	int ret;
1399 
1400 	if (!system_supports_generic_auth())
1401 		return -EINVAL;
1402 
1403 	pac_generic_keys_to_user(&user_keys, keys);
1404 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1405 				 &user_keys, 0, -1);
1406 	if (ret)
1407 		return ret;
1408 	pac_generic_keys_from_user(keys, &user_keys);
1409 
1410 	return 0;
1411 }
1412 #endif /* CONFIG_CHECKPOINT_RESTORE */
1413 #endif /* CONFIG_ARM64_PTR_AUTH */
1414 
1415 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
tagged_addr_ctrl_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1416 static int tagged_addr_ctrl_get(struct task_struct *target,
1417 				const struct user_regset *regset,
1418 				struct membuf to)
1419 {
1420 	long ctrl = get_tagged_addr_ctrl(target);
1421 
1422 	if (WARN_ON_ONCE(IS_ERR_VALUE(ctrl)))
1423 		return ctrl;
1424 
1425 	return membuf_write(&to, &ctrl, sizeof(ctrl));
1426 }
1427 
tagged_addr_ctrl_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1428 static int tagged_addr_ctrl_set(struct task_struct *target, const struct
1429 				user_regset *regset, unsigned int pos,
1430 				unsigned int count, const void *kbuf, const
1431 				void __user *ubuf)
1432 {
1433 	int ret;
1434 	long ctrl;
1435 
1436 	ctrl = get_tagged_addr_ctrl(target);
1437 	if (WARN_ON_ONCE(IS_ERR_VALUE(ctrl)))
1438 		return ctrl;
1439 
1440 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1);
1441 	if (ret)
1442 		return ret;
1443 
1444 	return set_tagged_addr_ctrl(target, ctrl);
1445 }
1446 #endif
1447 
1448 #ifdef CONFIG_ARM64_POE
poe_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1449 static int poe_get(struct task_struct *target,
1450 		   const struct user_regset *regset,
1451 		   struct membuf to)
1452 {
1453 	if (!system_supports_poe())
1454 		return -EINVAL;
1455 
1456 	return membuf_write(&to, &target->thread.por_el0,
1457 			    sizeof(target->thread.por_el0));
1458 }
1459 
poe_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1460 static int poe_set(struct task_struct *target, const struct
1461 		   user_regset *regset, unsigned int pos,
1462 		   unsigned int count, const void *kbuf, const
1463 		   void __user *ubuf)
1464 {
1465 	int ret;
1466 	long ctrl;
1467 
1468 	if (!system_supports_poe())
1469 		return -EINVAL;
1470 
1471 	ctrl = target->thread.por_el0;
1472 
1473 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1);
1474 	if (ret)
1475 		return ret;
1476 
1477 	target->thread.por_el0 = ctrl;
1478 
1479 	return 0;
1480 }
1481 #endif
1482 
1483 enum aarch64_regset {
1484 	REGSET_GPR,
1485 	REGSET_FPR,
1486 	REGSET_TLS,
1487 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1488 	REGSET_HW_BREAK,
1489 	REGSET_HW_WATCH,
1490 #endif
1491 	REGSET_FPMR,
1492 	REGSET_SYSTEM_CALL,
1493 #ifdef CONFIG_ARM64_SVE
1494 	REGSET_SVE,
1495 #endif
1496 #ifdef CONFIG_ARM64_SME
1497 	REGSET_SSVE,
1498 	REGSET_ZA,
1499 	REGSET_ZT,
1500 #endif
1501 #ifdef CONFIG_ARM64_PTR_AUTH
1502 	REGSET_PAC_MASK,
1503 	REGSET_PAC_ENABLED_KEYS,
1504 #ifdef CONFIG_CHECKPOINT_RESTORE
1505 	REGSET_PACA_KEYS,
1506 	REGSET_PACG_KEYS,
1507 #endif
1508 #endif
1509 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
1510 	REGSET_TAGGED_ADDR_CTRL,
1511 #endif
1512 #ifdef CONFIG_ARM64_POE
1513 	REGSET_POE
1514 #endif
1515 };
1516 
1517 static const struct user_regset aarch64_regsets[] = {
1518 	[REGSET_GPR] = {
1519 		.core_note_type = NT_PRSTATUS,
1520 		.n = sizeof(struct user_pt_regs) / sizeof(u64),
1521 		.size = sizeof(u64),
1522 		.align = sizeof(u64),
1523 		.regset_get = gpr_get,
1524 		.set = gpr_set
1525 	},
1526 	[REGSET_FPR] = {
1527 		.core_note_type = NT_PRFPREG,
1528 		.n = sizeof(struct user_fpsimd_state) / sizeof(u32),
1529 		/*
1530 		 * We pretend we have 32-bit registers because the fpsr and
1531 		 * fpcr are 32-bits wide.
1532 		 */
1533 		.size = sizeof(u32),
1534 		.align = sizeof(u32),
1535 		.active = fpr_active,
1536 		.regset_get = fpr_get,
1537 		.set = fpr_set
1538 	},
1539 	[REGSET_TLS] = {
1540 		.core_note_type = NT_ARM_TLS,
1541 		.n = 2,
1542 		.size = sizeof(void *),
1543 		.align = sizeof(void *),
1544 		.regset_get = tls_get,
1545 		.set = tls_set,
1546 	},
1547 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1548 	[REGSET_HW_BREAK] = {
1549 		.core_note_type = NT_ARM_HW_BREAK,
1550 		.n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1551 		.size = sizeof(u32),
1552 		.align = sizeof(u32),
1553 		.regset_get = hw_break_get,
1554 		.set = hw_break_set,
1555 	},
1556 	[REGSET_HW_WATCH] = {
1557 		.core_note_type = NT_ARM_HW_WATCH,
1558 		.n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1559 		.size = sizeof(u32),
1560 		.align = sizeof(u32),
1561 		.regset_get = hw_break_get,
1562 		.set = hw_break_set,
1563 	},
1564 #endif
1565 	[REGSET_SYSTEM_CALL] = {
1566 		.core_note_type = NT_ARM_SYSTEM_CALL,
1567 		.n = 1,
1568 		.size = sizeof(int),
1569 		.align = sizeof(int),
1570 		.regset_get = system_call_get,
1571 		.set = system_call_set,
1572 	},
1573 	[REGSET_FPMR] = {
1574 		.core_note_type = NT_ARM_FPMR,
1575 		.n = 1,
1576 		.size = sizeof(u64),
1577 		.align = sizeof(u64),
1578 		.regset_get = fpmr_get,
1579 		.set = fpmr_set,
1580 	},
1581 #ifdef CONFIG_ARM64_SVE
1582 	[REGSET_SVE] = { /* Scalable Vector Extension */
1583 		.core_note_type = NT_ARM_SVE,
1584 		.n = DIV_ROUND_UP(SVE_PT_SIZE(ARCH_SVE_VQ_MAX,
1585 					      SVE_PT_REGS_SVE),
1586 				  SVE_VQ_BYTES),
1587 		.size = SVE_VQ_BYTES,
1588 		.align = SVE_VQ_BYTES,
1589 		.regset_get = sve_get,
1590 		.set = sve_set,
1591 	},
1592 #endif
1593 #ifdef CONFIG_ARM64_SME
1594 	[REGSET_SSVE] = { /* Streaming mode SVE */
1595 		.core_note_type = NT_ARM_SSVE,
1596 		.n = DIV_ROUND_UP(SVE_PT_SIZE(SME_VQ_MAX, SVE_PT_REGS_SVE),
1597 				  SVE_VQ_BYTES),
1598 		.size = SVE_VQ_BYTES,
1599 		.align = SVE_VQ_BYTES,
1600 		.regset_get = ssve_get,
1601 		.set = ssve_set,
1602 	},
1603 	[REGSET_ZA] = { /* SME ZA */
1604 		.core_note_type = NT_ARM_ZA,
1605 		/*
1606 		 * ZA is a single register but it's variably sized and
1607 		 * the ptrace core requires that the size of any data
1608 		 * be an exact multiple of the configured register
1609 		 * size so report as though we had SVE_VQ_BYTES
1610 		 * registers. These values aren't exposed to
1611 		 * userspace.
1612 		 */
1613 		.n = DIV_ROUND_UP(ZA_PT_SIZE(SME_VQ_MAX), SVE_VQ_BYTES),
1614 		.size = SVE_VQ_BYTES,
1615 		.align = SVE_VQ_BYTES,
1616 		.regset_get = za_get,
1617 		.set = za_set,
1618 	},
1619 	[REGSET_ZT] = { /* SME ZT */
1620 		.core_note_type = NT_ARM_ZT,
1621 		.n = 1,
1622 		.size = ZT_SIG_REG_BYTES,
1623 		.align = sizeof(u64),
1624 		.regset_get = zt_get,
1625 		.set = zt_set,
1626 	},
1627 #endif
1628 #ifdef CONFIG_ARM64_PTR_AUTH
1629 	[REGSET_PAC_MASK] = {
1630 		.core_note_type = NT_ARM_PAC_MASK,
1631 		.n = sizeof(struct user_pac_mask) / sizeof(u64),
1632 		.size = sizeof(u64),
1633 		.align = sizeof(u64),
1634 		.regset_get = pac_mask_get,
1635 		/* this cannot be set dynamically */
1636 	},
1637 	[REGSET_PAC_ENABLED_KEYS] = {
1638 		.core_note_type = NT_ARM_PAC_ENABLED_KEYS,
1639 		.n = 1,
1640 		.size = sizeof(long),
1641 		.align = sizeof(long),
1642 		.regset_get = pac_enabled_keys_get,
1643 		.set = pac_enabled_keys_set,
1644 	},
1645 #ifdef CONFIG_CHECKPOINT_RESTORE
1646 	[REGSET_PACA_KEYS] = {
1647 		.core_note_type = NT_ARM_PACA_KEYS,
1648 		.n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t),
1649 		.size = sizeof(__uint128_t),
1650 		.align = sizeof(__uint128_t),
1651 		.regset_get = pac_address_keys_get,
1652 		.set = pac_address_keys_set,
1653 	},
1654 	[REGSET_PACG_KEYS] = {
1655 		.core_note_type = NT_ARM_PACG_KEYS,
1656 		.n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t),
1657 		.size = sizeof(__uint128_t),
1658 		.align = sizeof(__uint128_t),
1659 		.regset_get = pac_generic_keys_get,
1660 		.set = pac_generic_keys_set,
1661 	},
1662 #endif
1663 #endif
1664 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
1665 	[REGSET_TAGGED_ADDR_CTRL] = {
1666 		.core_note_type = NT_ARM_TAGGED_ADDR_CTRL,
1667 		.n = 1,
1668 		.size = sizeof(long),
1669 		.align = sizeof(long),
1670 		.regset_get = tagged_addr_ctrl_get,
1671 		.set = tagged_addr_ctrl_set,
1672 	},
1673 #endif
1674 #ifdef CONFIG_ARM64_POE
1675 	[REGSET_POE] = {
1676 		.core_note_type = NT_ARM_POE,
1677 		.n = 1,
1678 		.size = sizeof(long),
1679 		.align = sizeof(long),
1680 		.regset_get = poe_get,
1681 		.set = poe_set,
1682 	},
1683 #endif
1684 };
1685 
1686 static const struct user_regset_view user_aarch64_view = {
1687 	.name = "aarch64", .e_machine = EM_AARCH64,
1688 	.regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
1689 };
1690 
1691 enum compat_regset {
1692 	REGSET_COMPAT_GPR,
1693 	REGSET_COMPAT_VFP,
1694 };
1695 
compat_get_user_reg(struct task_struct * task,int idx)1696 static inline compat_ulong_t compat_get_user_reg(struct task_struct *task, int idx)
1697 {
1698 	struct pt_regs *regs = task_pt_regs(task);
1699 
1700 	switch (idx) {
1701 	case 15:
1702 		return regs->pc;
1703 	case 16:
1704 		return pstate_to_compat_psr(regs->pstate);
1705 	case 17:
1706 		return regs->orig_x0;
1707 	default:
1708 		return regs->regs[idx];
1709 	}
1710 }
1711 
compat_gpr_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1712 static int compat_gpr_get(struct task_struct *target,
1713 			  const struct user_regset *regset,
1714 			  struct membuf to)
1715 {
1716 	int i = 0;
1717 
1718 	while (to.left)
1719 		membuf_store(&to, compat_get_user_reg(target, i++));
1720 	return 0;
1721 }
1722 
compat_gpr_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1723 static int compat_gpr_set(struct task_struct *target,
1724 			  const struct user_regset *regset,
1725 			  unsigned int pos, unsigned int count,
1726 			  const void *kbuf, const void __user *ubuf)
1727 {
1728 	struct pt_regs newregs;
1729 	int ret = 0;
1730 	unsigned int i, start, num_regs;
1731 
1732 	/* Calculate the number of AArch32 registers contained in count */
1733 	num_regs = count / regset->size;
1734 
1735 	/* Convert pos into an register number */
1736 	start = pos / regset->size;
1737 
1738 	if (start + num_regs > regset->n)
1739 		return -EIO;
1740 
1741 	newregs = *task_pt_regs(target);
1742 
1743 	for (i = 0; i < num_regs; ++i) {
1744 		unsigned int idx = start + i;
1745 		compat_ulong_t reg;
1746 
1747 		if (kbuf) {
1748 			memcpy(&reg, kbuf, sizeof(reg));
1749 			kbuf += sizeof(reg);
1750 		} else {
1751 			ret = copy_from_user(&reg, ubuf, sizeof(reg));
1752 			if (ret) {
1753 				ret = -EFAULT;
1754 				break;
1755 			}
1756 
1757 			ubuf += sizeof(reg);
1758 		}
1759 
1760 		switch (idx) {
1761 		case 15:
1762 			newregs.pc = reg;
1763 			break;
1764 		case 16:
1765 			reg = compat_psr_to_pstate(reg);
1766 			newregs.pstate = reg;
1767 			break;
1768 		case 17:
1769 			newregs.orig_x0 = reg;
1770 			break;
1771 		default:
1772 			newregs.regs[idx] = reg;
1773 		}
1774 
1775 	}
1776 
1777 	if (valid_user_regs(&newregs.user_regs, target))
1778 		*task_pt_regs(target) = newregs;
1779 	else
1780 		ret = -EINVAL;
1781 
1782 	return ret;
1783 }
1784 
compat_vfp_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1785 static int compat_vfp_get(struct task_struct *target,
1786 			  const struct user_regset *regset,
1787 			  struct membuf to)
1788 {
1789 	struct user_fpsimd_state *uregs;
1790 	compat_ulong_t fpscr;
1791 
1792 	if (!system_supports_fpsimd())
1793 		return -EINVAL;
1794 
1795 	uregs = &target->thread.uw.fpsimd_state;
1796 
1797 	if (target == current)
1798 		fpsimd_preserve_current_state();
1799 
1800 	/*
1801 	 * The VFP registers are packed into the fpsimd_state, so they all sit
1802 	 * nicely together for us. We just need to create the fpscr separately.
1803 	 */
1804 	membuf_write(&to, uregs, VFP_STATE_SIZE - sizeof(compat_ulong_t));
1805 	fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
1806 		(uregs->fpcr & VFP_FPSCR_CTRL_MASK);
1807 	return membuf_store(&to, fpscr);
1808 }
1809 
compat_vfp_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1810 static int compat_vfp_set(struct task_struct *target,
1811 			  const struct user_regset *regset,
1812 			  unsigned int pos, unsigned int count,
1813 			  const void *kbuf, const void __user *ubuf)
1814 {
1815 	struct user_fpsimd_state *uregs;
1816 	compat_ulong_t fpscr;
1817 	int ret, vregs_end_pos;
1818 
1819 	if (!system_supports_fpsimd())
1820 		return -EINVAL;
1821 
1822 	uregs = &target->thread.uw.fpsimd_state;
1823 
1824 	vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
1825 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
1826 				 vregs_end_pos);
1827 
1828 	if (count && !ret) {
1829 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr,
1830 					 vregs_end_pos, VFP_STATE_SIZE);
1831 		if (!ret) {
1832 			uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
1833 			uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
1834 		}
1835 	}
1836 
1837 	fpsimd_flush_task_state(target);
1838 	return ret;
1839 }
1840 
compat_tls_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1841 static int compat_tls_get(struct task_struct *target,
1842 			  const struct user_regset *regset,
1843 			  struct membuf to)
1844 {
1845 	return membuf_store(&to, (compat_ulong_t)target->thread.uw.tp_value);
1846 }
1847 
compat_tls_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1848 static int compat_tls_set(struct task_struct *target,
1849 			  const struct user_regset *regset, unsigned int pos,
1850 			  unsigned int count, const void *kbuf,
1851 			  const void __user *ubuf)
1852 {
1853 	int ret;
1854 	compat_ulong_t tls = target->thread.uw.tp_value;
1855 
1856 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
1857 	if (ret)
1858 		return ret;
1859 
1860 	target->thread.uw.tp_value = tls;
1861 	return ret;
1862 }
1863 
1864 static const struct user_regset aarch32_regsets[] = {
1865 	[REGSET_COMPAT_GPR] = {
1866 		.core_note_type = NT_PRSTATUS,
1867 		.n = COMPAT_ELF_NGREG,
1868 		.size = sizeof(compat_elf_greg_t),
1869 		.align = sizeof(compat_elf_greg_t),
1870 		.regset_get = compat_gpr_get,
1871 		.set = compat_gpr_set
1872 	},
1873 	[REGSET_COMPAT_VFP] = {
1874 		.core_note_type = NT_ARM_VFP,
1875 		.n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1876 		.size = sizeof(compat_ulong_t),
1877 		.align = sizeof(compat_ulong_t),
1878 		.active = fpr_active,
1879 		.regset_get = compat_vfp_get,
1880 		.set = compat_vfp_set
1881 	},
1882 };
1883 
1884 static const struct user_regset_view user_aarch32_view = {
1885 	.name = "aarch32", .e_machine = EM_ARM,
1886 	.regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
1887 };
1888 
1889 static const struct user_regset aarch32_ptrace_regsets[] = {
1890 	[REGSET_GPR] = {
1891 		.core_note_type = NT_PRSTATUS,
1892 		.n = COMPAT_ELF_NGREG,
1893 		.size = sizeof(compat_elf_greg_t),
1894 		.align = sizeof(compat_elf_greg_t),
1895 		.regset_get = compat_gpr_get,
1896 		.set = compat_gpr_set
1897 	},
1898 	[REGSET_FPR] = {
1899 		.core_note_type = NT_ARM_VFP,
1900 		.n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1901 		.size = sizeof(compat_ulong_t),
1902 		.align = sizeof(compat_ulong_t),
1903 		.regset_get = compat_vfp_get,
1904 		.set = compat_vfp_set
1905 	},
1906 	[REGSET_TLS] = {
1907 		.core_note_type = NT_ARM_TLS,
1908 		.n = 1,
1909 		.size = sizeof(compat_ulong_t),
1910 		.align = sizeof(compat_ulong_t),
1911 		.regset_get = compat_tls_get,
1912 		.set = compat_tls_set,
1913 	},
1914 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1915 	[REGSET_HW_BREAK] = {
1916 		.core_note_type = NT_ARM_HW_BREAK,
1917 		.n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1918 		.size = sizeof(u32),
1919 		.align = sizeof(u32),
1920 		.regset_get = hw_break_get,
1921 		.set = hw_break_set,
1922 	},
1923 	[REGSET_HW_WATCH] = {
1924 		.core_note_type = NT_ARM_HW_WATCH,
1925 		.n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1926 		.size = sizeof(u32),
1927 		.align = sizeof(u32),
1928 		.regset_get = hw_break_get,
1929 		.set = hw_break_set,
1930 	},
1931 #endif
1932 	[REGSET_SYSTEM_CALL] = {
1933 		.core_note_type = NT_ARM_SYSTEM_CALL,
1934 		.n = 1,
1935 		.size = sizeof(int),
1936 		.align = sizeof(int),
1937 		.regset_get = system_call_get,
1938 		.set = system_call_set,
1939 	},
1940 };
1941 
1942 static const struct user_regset_view user_aarch32_ptrace_view = {
1943 	.name = "aarch32", .e_machine = EM_ARM,
1944 	.regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets)
1945 };
1946 
1947 #ifdef CONFIG_COMPAT
compat_ptrace_read_user(struct task_struct * tsk,compat_ulong_t off,compat_ulong_t __user * ret)1948 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
1949 				   compat_ulong_t __user *ret)
1950 {
1951 	compat_ulong_t tmp;
1952 
1953 	if (off & 3)
1954 		return -EIO;
1955 
1956 	if (off == COMPAT_PT_TEXT_ADDR)
1957 		tmp = tsk->mm->start_code;
1958 	else if (off == COMPAT_PT_DATA_ADDR)
1959 		tmp = tsk->mm->start_data;
1960 	else if (off == COMPAT_PT_TEXT_END_ADDR)
1961 		tmp = tsk->mm->end_code;
1962 	else if (off < sizeof(compat_elf_gregset_t))
1963 		tmp = compat_get_user_reg(tsk, off >> 2);
1964 	else if (off >= COMPAT_USER_SZ)
1965 		return -EIO;
1966 	else
1967 		tmp = 0;
1968 
1969 	return put_user(tmp, ret);
1970 }
1971 
compat_ptrace_write_user(struct task_struct * tsk,compat_ulong_t off,compat_ulong_t val)1972 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
1973 				    compat_ulong_t val)
1974 {
1975 	struct pt_regs newregs = *task_pt_regs(tsk);
1976 	unsigned int idx = off / 4;
1977 
1978 	if (off & 3 || off >= COMPAT_USER_SZ)
1979 		return -EIO;
1980 
1981 	if (off >= sizeof(compat_elf_gregset_t))
1982 		return 0;
1983 
1984 	switch (idx) {
1985 	case 15:
1986 		newregs.pc = val;
1987 		break;
1988 	case 16:
1989 		newregs.pstate = compat_psr_to_pstate(val);
1990 		break;
1991 	case 17:
1992 		newregs.orig_x0 = val;
1993 		break;
1994 	default:
1995 		newregs.regs[idx] = val;
1996 	}
1997 
1998 	if (!valid_user_regs(&newregs.user_regs, tsk))
1999 		return -EINVAL;
2000 
2001 	*task_pt_regs(tsk) = newregs;
2002 	return 0;
2003 }
2004 
2005 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2006 
2007 /*
2008  * Convert a virtual register number into an index for a thread_info
2009  * breakpoint array. Breakpoints are identified using positive numbers
2010  * whilst watchpoints are negative. The registers are laid out as pairs
2011  * of (address, control), each pair mapping to a unique hw_breakpoint struct.
2012  * Register 0 is reserved for describing resource information.
2013  */
compat_ptrace_hbp_num_to_idx(compat_long_t num)2014 static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
2015 {
2016 	return (abs(num) - 1) >> 1;
2017 }
2018 
compat_ptrace_hbp_get_resource_info(u32 * kdata)2019 static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
2020 {
2021 	u8 num_brps, num_wrps, debug_arch, wp_len;
2022 	u32 reg = 0;
2023 
2024 	num_brps	= hw_breakpoint_slots(TYPE_INST);
2025 	num_wrps	= hw_breakpoint_slots(TYPE_DATA);
2026 
2027 	debug_arch	= debug_monitors_arch();
2028 	wp_len		= 8;
2029 	reg		|= debug_arch;
2030 	reg		<<= 8;
2031 	reg		|= wp_len;
2032 	reg		<<= 8;
2033 	reg		|= num_wrps;
2034 	reg		<<= 8;
2035 	reg		|= num_brps;
2036 
2037 	*kdata = reg;
2038 	return 0;
2039 }
2040 
compat_ptrace_hbp_get(unsigned int note_type,struct task_struct * tsk,compat_long_t num,u32 * kdata)2041 static int compat_ptrace_hbp_get(unsigned int note_type,
2042 				 struct task_struct *tsk,
2043 				 compat_long_t num,
2044 				 u32 *kdata)
2045 {
2046 	u64 addr = 0;
2047 	u32 ctrl = 0;
2048 
2049 	int err, idx = compat_ptrace_hbp_num_to_idx(num);
2050 
2051 	if (num & 1) {
2052 		err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
2053 		*kdata = (u32)addr;
2054 	} else {
2055 		err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
2056 		*kdata = ctrl;
2057 	}
2058 
2059 	return err;
2060 }
2061 
compat_ptrace_hbp_set(unsigned int note_type,struct task_struct * tsk,compat_long_t num,u32 * kdata)2062 static int compat_ptrace_hbp_set(unsigned int note_type,
2063 				 struct task_struct *tsk,
2064 				 compat_long_t num,
2065 				 u32 *kdata)
2066 {
2067 	u64 addr;
2068 	u32 ctrl;
2069 
2070 	int err, idx = compat_ptrace_hbp_num_to_idx(num);
2071 
2072 	if (num & 1) {
2073 		addr = *kdata;
2074 		err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
2075 	} else {
2076 		ctrl = *kdata;
2077 		err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
2078 	}
2079 
2080 	return err;
2081 }
2082 
compat_ptrace_gethbpregs(struct task_struct * tsk,compat_long_t num,compat_ulong_t __user * data)2083 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
2084 				    compat_ulong_t __user *data)
2085 {
2086 	int ret;
2087 	u32 kdata;
2088 
2089 	/* Watchpoint */
2090 	if (num < 0) {
2091 		ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
2092 	/* Resource info */
2093 	} else if (num == 0) {
2094 		ret = compat_ptrace_hbp_get_resource_info(&kdata);
2095 	/* Breakpoint */
2096 	} else {
2097 		ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
2098 	}
2099 
2100 	if (!ret)
2101 		ret = put_user(kdata, data);
2102 
2103 	return ret;
2104 }
2105 
compat_ptrace_sethbpregs(struct task_struct * tsk,compat_long_t num,compat_ulong_t __user * data)2106 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
2107 				    compat_ulong_t __user *data)
2108 {
2109 	int ret;
2110 	u32 kdata = 0;
2111 
2112 	if (num == 0)
2113 		return 0;
2114 
2115 	ret = get_user(kdata, data);
2116 	if (ret)
2117 		return ret;
2118 
2119 	if (num < 0)
2120 		ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
2121 	else
2122 		ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
2123 
2124 	return ret;
2125 }
2126 #endif	/* CONFIG_HAVE_HW_BREAKPOINT */
2127 
compat_arch_ptrace(struct task_struct * child,compat_long_t request,compat_ulong_t caddr,compat_ulong_t cdata)2128 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
2129 			compat_ulong_t caddr, compat_ulong_t cdata)
2130 {
2131 	unsigned long addr = caddr;
2132 	unsigned long data = cdata;
2133 	void __user *datap = compat_ptr(data);
2134 	int ret;
2135 
2136 	switch (request) {
2137 		case PTRACE_PEEKUSR:
2138 			ret = compat_ptrace_read_user(child, addr, datap);
2139 			break;
2140 
2141 		case PTRACE_POKEUSR:
2142 			ret = compat_ptrace_write_user(child, addr, data);
2143 			break;
2144 
2145 		case COMPAT_PTRACE_GETREGS:
2146 			ret = copy_regset_to_user(child,
2147 						  &user_aarch32_view,
2148 						  REGSET_COMPAT_GPR,
2149 						  0, sizeof(compat_elf_gregset_t),
2150 						  datap);
2151 			break;
2152 
2153 		case COMPAT_PTRACE_SETREGS:
2154 			ret = copy_regset_from_user(child,
2155 						    &user_aarch32_view,
2156 						    REGSET_COMPAT_GPR,
2157 						    0, sizeof(compat_elf_gregset_t),
2158 						    datap);
2159 			break;
2160 
2161 		case COMPAT_PTRACE_GET_THREAD_AREA:
2162 			ret = put_user((compat_ulong_t)child->thread.uw.tp_value,
2163 				       (compat_ulong_t __user *)datap);
2164 			break;
2165 
2166 		case COMPAT_PTRACE_SET_SYSCALL:
2167 			task_pt_regs(child)->syscallno = data;
2168 			ret = 0;
2169 			break;
2170 
2171 		case COMPAT_PTRACE_GETVFPREGS:
2172 			ret = copy_regset_to_user(child,
2173 						  &user_aarch32_view,
2174 						  REGSET_COMPAT_VFP,
2175 						  0, VFP_STATE_SIZE,
2176 						  datap);
2177 			break;
2178 
2179 		case COMPAT_PTRACE_SETVFPREGS:
2180 			ret = copy_regset_from_user(child,
2181 						    &user_aarch32_view,
2182 						    REGSET_COMPAT_VFP,
2183 						    0, VFP_STATE_SIZE,
2184 						    datap);
2185 			break;
2186 
2187 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2188 		case COMPAT_PTRACE_GETHBPREGS:
2189 			ret = compat_ptrace_gethbpregs(child, addr, datap);
2190 			break;
2191 
2192 		case COMPAT_PTRACE_SETHBPREGS:
2193 			ret = compat_ptrace_sethbpregs(child, addr, datap);
2194 			break;
2195 #endif
2196 
2197 		default:
2198 			ret = compat_ptrace_request(child, request, addr,
2199 						    data);
2200 			break;
2201 	}
2202 
2203 	return ret;
2204 }
2205 #endif /* CONFIG_COMPAT */
2206 
task_user_regset_view(struct task_struct * task)2207 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
2208 {
2209 	/*
2210 	 * Core dumping of 32-bit tasks or compat ptrace requests must use the
2211 	 * user_aarch32_view compatible with arm32. Native ptrace requests on
2212 	 * 32-bit children use an extended user_aarch32_ptrace_view to allow
2213 	 * access to the TLS register.
2214 	 */
2215 	if (is_compat_task())
2216 		return &user_aarch32_view;
2217 	else if (is_compat_thread(task_thread_info(task)))
2218 		return &user_aarch32_ptrace_view;
2219 
2220 	return &user_aarch64_view;
2221 }
2222 
arch_ptrace(struct task_struct * child,long request,unsigned long addr,unsigned long data)2223 long arch_ptrace(struct task_struct *child, long request,
2224 		 unsigned long addr, unsigned long data)
2225 {
2226 	switch (request) {
2227 	case PTRACE_PEEKMTETAGS:
2228 	case PTRACE_POKEMTETAGS:
2229 		return mte_ptrace_copy_tags(child, request, addr, data);
2230 	}
2231 
2232 	return ptrace_request(child, request, addr, data);
2233 }
2234 
2235 enum ptrace_syscall_dir {
2236 	PTRACE_SYSCALL_ENTER = 0,
2237 	PTRACE_SYSCALL_EXIT,
2238 };
2239 
report_syscall(struct pt_regs * regs,enum ptrace_syscall_dir dir)2240 static void report_syscall(struct pt_regs *regs, enum ptrace_syscall_dir dir)
2241 {
2242 	int regno;
2243 	unsigned long saved_reg;
2244 
2245 	/*
2246 	 * We have some ABI weirdness here in the way that we handle syscall
2247 	 * exit stops because we indicate whether or not the stop has been
2248 	 * signalled from syscall entry or syscall exit by clobbering a general
2249 	 * purpose register (ip/r12 for AArch32, x7 for AArch64) in the tracee
2250 	 * and restoring its old value after the stop. This means that:
2251 	 *
2252 	 * - Any writes by the tracer to this register during the stop are
2253 	 *   ignored/discarded.
2254 	 *
2255 	 * - The actual value of the register is not available during the stop,
2256 	 *   so the tracer cannot save it and restore it later.
2257 	 *
2258 	 * - Syscall stops behave differently to seccomp and pseudo-step traps
2259 	 *   (the latter do not nobble any registers).
2260 	 */
2261 	regno = (is_compat_task() ? 12 : 7);
2262 	saved_reg = regs->regs[regno];
2263 	regs->regs[regno] = dir;
2264 
2265 	if (dir == PTRACE_SYSCALL_ENTER) {
2266 		if (ptrace_report_syscall_entry(regs))
2267 			forget_syscall(regs);
2268 		regs->regs[regno] = saved_reg;
2269 	} else if (!test_thread_flag(TIF_SINGLESTEP)) {
2270 		ptrace_report_syscall_exit(regs, 0);
2271 		regs->regs[regno] = saved_reg;
2272 	} else {
2273 		regs->regs[regno] = saved_reg;
2274 
2275 		/*
2276 		 * Signal a pseudo-step exception since we are stepping but
2277 		 * tracer modifications to the registers may have rewound the
2278 		 * state machine.
2279 		 */
2280 		ptrace_report_syscall_exit(regs, 1);
2281 	}
2282 }
2283 
syscall_trace_enter(struct pt_regs * regs)2284 int syscall_trace_enter(struct pt_regs *regs)
2285 {
2286 	unsigned long flags = read_thread_flags();
2287 
2288 	if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) {
2289 		report_syscall(regs, PTRACE_SYSCALL_ENTER);
2290 		if (flags & _TIF_SYSCALL_EMU)
2291 			return NO_SYSCALL;
2292 	}
2293 
2294 	/* Do the secure computing after ptrace; failures should be fast. */
2295 	if (secure_computing() == -1)
2296 		return NO_SYSCALL;
2297 
2298 	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
2299 		trace_sys_enter(regs, regs->syscallno);
2300 
2301 	audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1],
2302 			    regs->regs[2], regs->regs[3]);
2303 
2304 	return regs->syscallno;
2305 }
2306 
syscall_trace_exit(struct pt_regs * regs)2307 void syscall_trace_exit(struct pt_regs *regs)
2308 {
2309 	unsigned long flags = read_thread_flags();
2310 
2311 	audit_syscall_exit(regs);
2312 
2313 	if (flags & _TIF_SYSCALL_TRACEPOINT)
2314 		trace_sys_exit(regs, syscall_get_return_value(current, regs));
2315 
2316 	if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP))
2317 		report_syscall(regs, PTRACE_SYSCALL_EXIT);
2318 
2319 	rseq_syscall(regs);
2320 }
2321 
2322 /*
2323  * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
2324  * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
2325  * not described in ARM DDI 0487D.a.
2326  * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
2327  * be allocated an EL0 meaning in future.
2328  * Userspace cannot use these until they have an architectural meaning.
2329  * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
2330  * We also reserve IL for the kernel; SS is handled dynamically.
2331  */
2332 #define SPSR_EL1_AARCH64_RES0_BITS \
2333 	(GENMASK_ULL(63, 32) | GENMASK_ULL(27, 26) | GENMASK_ULL(23, 22) | \
2334 	 GENMASK_ULL(20, 13) | GENMASK_ULL(5, 5))
2335 #define SPSR_EL1_AARCH32_RES0_BITS \
2336 	(GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
2337 
valid_compat_regs(struct user_pt_regs * regs)2338 static int valid_compat_regs(struct user_pt_regs *regs)
2339 {
2340 	regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS;
2341 
2342 	if (!system_supports_mixed_endian_el0()) {
2343 		if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
2344 			regs->pstate |= PSR_AA32_E_BIT;
2345 		else
2346 			regs->pstate &= ~PSR_AA32_E_BIT;
2347 	}
2348 
2349 	if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) &&
2350 	    (regs->pstate & PSR_AA32_A_BIT) == 0 &&
2351 	    (regs->pstate & PSR_AA32_I_BIT) == 0 &&
2352 	    (regs->pstate & PSR_AA32_F_BIT) == 0) {
2353 		return 1;
2354 	}
2355 
2356 	/*
2357 	 * Force PSR to a valid 32-bit EL0t, preserving the same bits as
2358 	 * arch/arm.
2359 	 */
2360 	regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT |
2361 			PSR_AA32_C_BIT | PSR_AA32_V_BIT |
2362 			PSR_AA32_Q_BIT | PSR_AA32_IT_MASK |
2363 			PSR_AA32_GE_MASK | PSR_AA32_E_BIT |
2364 			PSR_AA32_T_BIT;
2365 	regs->pstate |= PSR_MODE32_BIT;
2366 
2367 	return 0;
2368 }
2369 
valid_native_regs(struct user_pt_regs * regs)2370 static int valid_native_regs(struct user_pt_regs *regs)
2371 {
2372 	regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS;
2373 
2374 	if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) &&
2375 	    (regs->pstate & PSR_D_BIT) == 0 &&
2376 	    (regs->pstate & PSR_A_BIT) == 0 &&
2377 	    (regs->pstate & PSR_I_BIT) == 0 &&
2378 	    (regs->pstate & PSR_F_BIT) == 0) {
2379 		return 1;
2380 	}
2381 
2382 	/* Force PSR to a valid 64-bit EL0t */
2383 	regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT;
2384 
2385 	return 0;
2386 }
2387 
2388 /*
2389  * Are the current registers suitable for user mode? (used to maintain
2390  * security in signal handlers)
2391  */
valid_user_regs(struct user_pt_regs * regs,struct task_struct * task)2392 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
2393 {
2394 	/* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */
2395 	user_regs_reset_single_step(regs, task);
2396 
2397 	if (is_compat_thread(task_thread_info(task)))
2398 		return valid_compat_regs(regs);
2399 	else
2400 		return valid_native_regs(regs);
2401 }
2402