1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Based on arch/arm/kernel/ptrace.c
4 *
5 * By Ross Biro 1/23/92
6 * edited by Linus Torvalds
7 * ARM modifications Copyright (C) 2000 Russell King
8 * Copyright (C) 2012 ARM Ltd.
9 */
10
11 #include <linux/audit.h>
12 #include <linux/compat.h>
13 #include <linux/kernel.h>
14 #include <linux/sched/signal.h>
15 #include <linux/sched/task_stack.h>
16 #include <linux/mm.h>
17 #include <linux/nospec.h>
18 #include <linux/smp.h>
19 #include <linux/ptrace.h>
20 #include <linux/user.h>
21 #include <linux/seccomp.h>
22 #include <linux/security.h>
23 #include <linux/init.h>
24 #include <linux/signal.h>
25 #include <linux/string.h>
26 #include <linux/uaccess.h>
27 #include <linux/perf_event.h>
28 #include <linux/hw_breakpoint.h>
29 #include <linux/regset.h>
30 #include <linux/tracehook.h>
31 #include <linux/elf.h>
32
33 #include <asm/compat.h>
34 #include <asm/cpufeature.h>
35 #include <asm/debug-monitors.h>
36 #include <asm/fpsimd.h>
37 #include <asm/mte.h>
38 #include <asm/pointer_auth.h>
39 #include <asm/stacktrace.h>
40 #include <asm/syscall.h>
41 #include <asm/traps.h>
42 #include <asm/system_misc.h>
43
44 #define CREATE_TRACE_POINTS
45 #include <trace/events/syscalls.h>
46
47 struct pt_regs_offset {
48 const char *name;
49 int offset;
50 };
51
52 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
53 #define REG_OFFSET_END {.name = NULL, .offset = 0}
54 #define GPR_OFFSET_NAME(r) \
55 {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])}
56
57 static const struct pt_regs_offset regoffset_table[] = {
58 GPR_OFFSET_NAME(0),
59 GPR_OFFSET_NAME(1),
60 GPR_OFFSET_NAME(2),
61 GPR_OFFSET_NAME(3),
62 GPR_OFFSET_NAME(4),
63 GPR_OFFSET_NAME(5),
64 GPR_OFFSET_NAME(6),
65 GPR_OFFSET_NAME(7),
66 GPR_OFFSET_NAME(8),
67 GPR_OFFSET_NAME(9),
68 GPR_OFFSET_NAME(10),
69 GPR_OFFSET_NAME(11),
70 GPR_OFFSET_NAME(12),
71 GPR_OFFSET_NAME(13),
72 GPR_OFFSET_NAME(14),
73 GPR_OFFSET_NAME(15),
74 GPR_OFFSET_NAME(16),
75 GPR_OFFSET_NAME(17),
76 GPR_OFFSET_NAME(18),
77 GPR_OFFSET_NAME(19),
78 GPR_OFFSET_NAME(20),
79 GPR_OFFSET_NAME(21),
80 GPR_OFFSET_NAME(22),
81 GPR_OFFSET_NAME(23),
82 GPR_OFFSET_NAME(24),
83 GPR_OFFSET_NAME(25),
84 GPR_OFFSET_NAME(26),
85 GPR_OFFSET_NAME(27),
86 GPR_OFFSET_NAME(28),
87 GPR_OFFSET_NAME(29),
88 GPR_OFFSET_NAME(30),
89 {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])},
90 REG_OFFSET_NAME(sp),
91 REG_OFFSET_NAME(pc),
92 REG_OFFSET_NAME(pstate),
93 REG_OFFSET_END,
94 };
95
96 /**
97 * regs_query_register_offset() - query register offset from its name
98 * @name: the name of a register
99 *
100 * regs_query_register_offset() returns the offset of a register in struct
101 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
102 */
regs_query_register_offset(const char * name)103 int regs_query_register_offset(const char *name)
104 {
105 const struct pt_regs_offset *roff;
106
107 for (roff = regoffset_table; roff->name != NULL; roff++)
108 if (!strcmp(roff->name, name))
109 return roff->offset;
110 return -EINVAL;
111 }
112
113 /**
114 * regs_within_kernel_stack() - check the address in the stack
115 * @regs: pt_regs which contains kernel stack pointer.
116 * @addr: address which is checked.
117 *
118 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
119 * If @addr is within the kernel stack, it returns true. If not, returns false.
120 */
regs_within_kernel_stack(struct pt_regs * regs,unsigned long addr)121 static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
122 {
123 return ((addr & ~(THREAD_SIZE - 1)) ==
124 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
125 on_irq_stack(addr, sizeof(unsigned long), NULL);
126 }
127
128 /**
129 * regs_get_kernel_stack_nth() - get Nth entry of the stack
130 * @regs: pt_regs which contains kernel stack pointer.
131 * @n: stack entry number.
132 *
133 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
134 * is specified by @regs. If the @n th entry is NOT in the kernel stack,
135 * this returns 0.
136 */
regs_get_kernel_stack_nth(struct pt_regs * regs,unsigned int n)137 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
138 {
139 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
140
141 addr += n;
142 if (regs_within_kernel_stack(regs, (unsigned long)addr))
143 return *addr;
144 else
145 return 0;
146 }
147
148 /*
149 * TODO: does not yet catch signals sent when the child dies.
150 * in exit.c or in signal.c.
151 */
152
153 /*
154 * Called by kernel/ptrace.c when detaching..
155 */
ptrace_disable(struct task_struct * child)156 void ptrace_disable(struct task_struct *child)
157 {
158 /*
159 * This would be better off in core code, but PTRACE_DETACH has
160 * grown its fair share of arch-specific worts and changing it
161 * is likely to cause regressions on obscure architectures.
162 */
163 user_disable_single_step(child);
164 }
165
166 #ifdef CONFIG_HAVE_HW_BREAKPOINT
167 /*
168 * Handle hitting a HW-breakpoint.
169 */
ptrace_hbptriggered(struct perf_event * bp,struct perf_sample_data * data,struct pt_regs * regs)170 static void ptrace_hbptriggered(struct perf_event *bp,
171 struct perf_sample_data *data,
172 struct pt_regs *regs)
173 {
174 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
175 const char *desc = "Hardware breakpoint trap (ptrace)";
176
177 #ifdef CONFIG_COMPAT
178 if (is_compat_task()) {
179 int si_errno = 0;
180 int i;
181
182 for (i = 0; i < ARM_MAX_BRP; ++i) {
183 if (current->thread.debug.hbp_break[i] == bp) {
184 si_errno = (i << 1) + 1;
185 break;
186 }
187 }
188
189 for (i = 0; i < ARM_MAX_WRP; ++i) {
190 if (current->thread.debug.hbp_watch[i] == bp) {
191 si_errno = -((i << 1) + 1);
192 break;
193 }
194 }
195 arm64_force_sig_ptrace_errno_trap(si_errno, bkpt->trigger,
196 desc);
197 return;
198 }
199 #endif
200 arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT, bkpt->trigger, desc);
201 }
202
203 /*
204 * Unregister breakpoints from this task and reset the pointers in
205 * the thread_struct.
206 */
flush_ptrace_hw_breakpoint(struct task_struct * tsk)207 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
208 {
209 int i;
210 struct thread_struct *t = &tsk->thread;
211
212 for (i = 0; i < ARM_MAX_BRP; i++) {
213 if (t->debug.hbp_break[i]) {
214 unregister_hw_breakpoint(t->debug.hbp_break[i]);
215 t->debug.hbp_break[i] = NULL;
216 }
217 }
218
219 for (i = 0; i < ARM_MAX_WRP; i++) {
220 if (t->debug.hbp_watch[i]) {
221 unregister_hw_breakpoint(t->debug.hbp_watch[i]);
222 t->debug.hbp_watch[i] = NULL;
223 }
224 }
225 }
226
ptrace_hw_copy_thread(struct task_struct * tsk)227 void ptrace_hw_copy_thread(struct task_struct *tsk)
228 {
229 memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
230 }
231
ptrace_hbp_get_event(unsigned int note_type,struct task_struct * tsk,unsigned long idx)232 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
233 struct task_struct *tsk,
234 unsigned long idx)
235 {
236 struct perf_event *bp = ERR_PTR(-EINVAL);
237
238 switch (note_type) {
239 case NT_ARM_HW_BREAK:
240 if (idx >= ARM_MAX_BRP)
241 goto out;
242 idx = array_index_nospec(idx, ARM_MAX_BRP);
243 bp = tsk->thread.debug.hbp_break[idx];
244 break;
245 case NT_ARM_HW_WATCH:
246 if (idx >= ARM_MAX_WRP)
247 goto out;
248 idx = array_index_nospec(idx, ARM_MAX_WRP);
249 bp = tsk->thread.debug.hbp_watch[idx];
250 break;
251 }
252
253 out:
254 return bp;
255 }
256
ptrace_hbp_set_event(unsigned int note_type,struct task_struct * tsk,unsigned long idx,struct perf_event * bp)257 static int ptrace_hbp_set_event(unsigned int note_type,
258 struct task_struct *tsk,
259 unsigned long idx,
260 struct perf_event *bp)
261 {
262 int err = -EINVAL;
263
264 switch (note_type) {
265 case NT_ARM_HW_BREAK:
266 if (idx >= ARM_MAX_BRP)
267 goto out;
268 idx = array_index_nospec(idx, ARM_MAX_BRP);
269 tsk->thread.debug.hbp_break[idx] = bp;
270 err = 0;
271 break;
272 case NT_ARM_HW_WATCH:
273 if (idx >= ARM_MAX_WRP)
274 goto out;
275 idx = array_index_nospec(idx, ARM_MAX_WRP);
276 tsk->thread.debug.hbp_watch[idx] = bp;
277 err = 0;
278 break;
279 }
280
281 out:
282 return err;
283 }
284
ptrace_hbp_create(unsigned int note_type,struct task_struct * tsk,unsigned long idx)285 static struct perf_event *ptrace_hbp_create(unsigned int note_type,
286 struct task_struct *tsk,
287 unsigned long idx)
288 {
289 struct perf_event *bp;
290 struct perf_event_attr attr;
291 int err, type;
292
293 switch (note_type) {
294 case NT_ARM_HW_BREAK:
295 type = HW_BREAKPOINT_X;
296 break;
297 case NT_ARM_HW_WATCH:
298 type = HW_BREAKPOINT_RW;
299 break;
300 default:
301 return ERR_PTR(-EINVAL);
302 }
303
304 ptrace_breakpoint_init(&attr);
305
306 /*
307 * Initialise fields to sane defaults
308 * (i.e. values that will pass validation).
309 */
310 attr.bp_addr = 0;
311 attr.bp_len = HW_BREAKPOINT_LEN_4;
312 attr.bp_type = type;
313 attr.disabled = 1;
314
315 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
316 if (IS_ERR(bp))
317 return bp;
318
319 err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
320 if (err)
321 return ERR_PTR(err);
322
323 return bp;
324 }
325
ptrace_hbp_fill_attr_ctrl(unsigned int note_type,struct arch_hw_breakpoint_ctrl ctrl,struct perf_event_attr * attr)326 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
327 struct arch_hw_breakpoint_ctrl ctrl,
328 struct perf_event_attr *attr)
329 {
330 int err, len, type, offset, disabled = !ctrl.enabled;
331
332 attr->disabled = disabled;
333 if (disabled)
334 return 0;
335
336 err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
337 if (err)
338 return err;
339
340 switch (note_type) {
341 case NT_ARM_HW_BREAK:
342 if ((type & HW_BREAKPOINT_X) != type)
343 return -EINVAL;
344 break;
345 case NT_ARM_HW_WATCH:
346 if ((type & HW_BREAKPOINT_RW) != type)
347 return -EINVAL;
348 break;
349 default:
350 return -EINVAL;
351 }
352
353 attr->bp_len = len;
354 attr->bp_type = type;
355 attr->bp_addr += offset;
356
357 return 0;
358 }
359
ptrace_hbp_get_resource_info(unsigned int note_type,u32 * info)360 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
361 {
362 u8 num;
363 u32 reg = 0;
364
365 switch (note_type) {
366 case NT_ARM_HW_BREAK:
367 num = hw_breakpoint_slots(TYPE_INST);
368 break;
369 case NT_ARM_HW_WATCH:
370 num = hw_breakpoint_slots(TYPE_DATA);
371 break;
372 default:
373 return -EINVAL;
374 }
375
376 reg |= debug_monitors_arch();
377 reg <<= 8;
378 reg |= num;
379
380 *info = reg;
381 return 0;
382 }
383
ptrace_hbp_get_ctrl(unsigned int note_type,struct task_struct * tsk,unsigned long idx,u32 * ctrl)384 static int ptrace_hbp_get_ctrl(unsigned int note_type,
385 struct task_struct *tsk,
386 unsigned long idx,
387 u32 *ctrl)
388 {
389 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
390
391 if (IS_ERR(bp))
392 return PTR_ERR(bp);
393
394 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
395 return 0;
396 }
397
ptrace_hbp_get_addr(unsigned int note_type,struct task_struct * tsk,unsigned long idx,u64 * addr)398 static int ptrace_hbp_get_addr(unsigned int note_type,
399 struct task_struct *tsk,
400 unsigned long idx,
401 u64 *addr)
402 {
403 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
404
405 if (IS_ERR(bp))
406 return PTR_ERR(bp);
407
408 *addr = bp ? counter_arch_bp(bp)->address : 0;
409 return 0;
410 }
411
ptrace_hbp_get_initialised_bp(unsigned int note_type,struct task_struct * tsk,unsigned long idx)412 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
413 struct task_struct *tsk,
414 unsigned long idx)
415 {
416 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
417
418 if (!bp)
419 bp = ptrace_hbp_create(note_type, tsk, idx);
420
421 return bp;
422 }
423
ptrace_hbp_set_ctrl(unsigned int note_type,struct task_struct * tsk,unsigned long idx,u32 uctrl)424 static int ptrace_hbp_set_ctrl(unsigned int note_type,
425 struct task_struct *tsk,
426 unsigned long idx,
427 u32 uctrl)
428 {
429 int err;
430 struct perf_event *bp;
431 struct perf_event_attr attr;
432 struct arch_hw_breakpoint_ctrl ctrl;
433
434 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
435 if (IS_ERR(bp)) {
436 err = PTR_ERR(bp);
437 return err;
438 }
439
440 attr = bp->attr;
441 decode_ctrl_reg(uctrl, &ctrl);
442 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
443 if (err)
444 return err;
445
446 return modify_user_hw_breakpoint(bp, &attr);
447 }
448
ptrace_hbp_set_addr(unsigned int note_type,struct task_struct * tsk,unsigned long idx,u64 addr)449 static int ptrace_hbp_set_addr(unsigned int note_type,
450 struct task_struct *tsk,
451 unsigned long idx,
452 u64 addr)
453 {
454 int err;
455 struct perf_event *bp;
456 struct perf_event_attr attr;
457
458 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
459 if (IS_ERR(bp)) {
460 err = PTR_ERR(bp);
461 return err;
462 }
463
464 attr = bp->attr;
465 attr.bp_addr = addr;
466 err = modify_user_hw_breakpoint(bp, &attr);
467 return err;
468 }
469
470 #define PTRACE_HBP_ADDR_SZ sizeof(u64)
471 #define PTRACE_HBP_CTRL_SZ sizeof(u32)
472 #define PTRACE_HBP_PAD_SZ sizeof(u32)
473
hw_break_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)474 static int hw_break_get(struct task_struct *target,
475 const struct user_regset *regset,
476 struct membuf to)
477 {
478 unsigned int note_type = regset->core_note_type;
479 int ret, idx = 0;
480 u32 info, ctrl;
481 u64 addr;
482
483 /* Resource info */
484 ret = ptrace_hbp_get_resource_info(note_type, &info);
485 if (ret)
486 return ret;
487
488 membuf_write(&to, &info, sizeof(info));
489 membuf_zero(&to, sizeof(u32));
490 /* (address, ctrl) registers */
491 while (to.left) {
492 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
493 if (ret)
494 return ret;
495 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
496 if (ret)
497 return ret;
498 membuf_store(&to, addr);
499 membuf_store(&to, ctrl);
500 membuf_zero(&to, sizeof(u32));
501 idx++;
502 }
503 return 0;
504 }
505
hw_break_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)506 static int hw_break_set(struct task_struct *target,
507 const struct user_regset *regset,
508 unsigned int pos, unsigned int count,
509 const void *kbuf, const void __user *ubuf)
510 {
511 unsigned int note_type = regset->core_note_type;
512 int ret, idx = 0, offset, limit;
513 u32 ctrl;
514 u64 addr;
515
516 /* Resource info and pad */
517 offset = offsetof(struct user_hwdebug_state, dbg_regs);
518 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
519 if (ret)
520 return ret;
521
522 /* (address, ctrl) registers */
523 limit = regset->n * regset->size;
524 while (count && offset < limit) {
525 if (count < PTRACE_HBP_ADDR_SZ)
526 return -EINVAL;
527 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
528 offset, offset + PTRACE_HBP_ADDR_SZ);
529 if (ret)
530 return ret;
531 ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
532 if (ret)
533 return ret;
534 offset += PTRACE_HBP_ADDR_SZ;
535
536 if (!count)
537 break;
538 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
539 offset, offset + PTRACE_HBP_CTRL_SZ);
540 if (ret)
541 return ret;
542 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
543 if (ret)
544 return ret;
545 offset += PTRACE_HBP_CTRL_SZ;
546
547 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
548 offset,
549 offset + PTRACE_HBP_PAD_SZ);
550 if (ret)
551 return ret;
552 offset += PTRACE_HBP_PAD_SZ;
553 idx++;
554 }
555
556 return 0;
557 }
558 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
559
gpr_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)560 static int gpr_get(struct task_struct *target,
561 const struct user_regset *regset,
562 struct membuf to)
563 {
564 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
565 return membuf_write(&to, uregs, sizeof(*uregs));
566 }
567
gpr_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)568 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
569 unsigned int pos, unsigned int count,
570 const void *kbuf, const void __user *ubuf)
571 {
572 int ret;
573 struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
574
575 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
576 if (ret)
577 return ret;
578
579 if (!valid_user_regs(&newregs, target))
580 return -EINVAL;
581
582 task_pt_regs(target)->user_regs = newregs;
583 return 0;
584 }
585
fpr_active(struct task_struct * target,const struct user_regset * regset)586 static int fpr_active(struct task_struct *target, const struct user_regset *regset)
587 {
588 if (!system_supports_fpsimd())
589 return -ENODEV;
590 return regset->n;
591 }
592
593 /*
594 * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
595 */
__fpr_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)596 static int __fpr_get(struct task_struct *target,
597 const struct user_regset *regset,
598 struct membuf to)
599 {
600 struct user_fpsimd_state *uregs;
601
602 sve_sync_to_fpsimd(target);
603
604 uregs = &target->thread.uw.fpsimd_state;
605
606 return membuf_write(&to, uregs, sizeof(*uregs));
607 }
608
fpr_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)609 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
610 struct membuf to)
611 {
612 if (!system_supports_fpsimd())
613 return -EINVAL;
614
615 if (target == current)
616 fpsimd_preserve_current_state();
617
618 return __fpr_get(target, regset, to);
619 }
620
__fpr_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf,unsigned int start_pos)621 static int __fpr_set(struct task_struct *target,
622 const struct user_regset *regset,
623 unsigned int pos, unsigned int count,
624 const void *kbuf, const void __user *ubuf,
625 unsigned int start_pos)
626 {
627 int ret;
628 struct user_fpsimd_state newstate;
629
630 /*
631 * Ensure target->thread.uw.fpsimd_state is up to date, so that a
632 * short copyin can't resurrect stale data.
633 */
634 sve_sync_to_fpsimd(target);
635
636 newstate = target->thread.uw.fpsimd_state;
637
638 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate,
639 start_pos, start_pos + sizeof(newstate));
640 if (ret)
641 return ret;
642
643 target->thread.uw.fpsimd_state = newstate;
644
645 return ret;
646 }
647
fpr_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)648 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
649 unsigned int pos, unsigned int count,
650 const void *kbuf, const void __user *ubuf)
651 {
652 int ret;
653
654 if (!system_supports_fpsimd())
655 return -EINVAL;
656
657 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
658 if (ret)
659 return ret;
660
661 sve_sync_from_fpsimd_zeropad(target);
662 fpsimd_flush_task_state(target);
663
664 return ret;
665 }
666
tls_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)667 static int tls_get(struct task_struct *target, const struct user_regset *regset,
668 struct membuf to)
669 {
670 if (target == current)
671 tls_preserve_current_state();
672
673 return membuf_store(&to, target->thread.uw.tp_value);
674 }
675
tls_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)676 static int tls_set(struct task_struct *target, const struct user_regset *regset,
677 unsigned int pos, unsigned int count,
678 const void *kbuf, const void __user *ubuf)
679 {
680 int ret;
681 unsigned long tls = target->thread.uw.tp_value;
682
683 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
684 if (ret)
685 return ret;
686
687 target->thread.uw.tp_value = tls;
688 return ret;
689 }
690
system_call_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)691 static int system_call_get(struct task_struct *target,
692 const struct user_regset *regset,
693 struct membuf to)
694 {
695 return membuf_store(&to, task_pt_regs(target)->syscallno);
696 }
697
system_call_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)698 static int system_call_set(struct task_struct *target,
699 const struct user_regset *regset,
700 unsigned int pos, unsigned int count,
701 const void *kbuf, const void __user *ubuf)
702 {
703 int syscallno = task_pt_regs(target)->syscallno;
704 int ret;
705
706 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
707 if (ret)
708 return ret;
709
710 task_pt_regs(target)->syscallno = syscallno;
711 return ret;
712 }
713
714 #ifdef CONFIG_ARM64_SVE
715
sve_init_header_from_task(struct user_sve_header * header,struct task_struct * target,enum vec_type type)716 static void sve_init_header_from_task(struct user_sve_header *header,
717 struct task_struct *target,
718 enum vec_type type)
719 {
720 unsigned int vq;
721 bool active;
722 bool fpsimd_only;
723 enum vec_type task_type;
724
725 memset(header, 0, sizeof(*header));
726
727 /* Check if the requested registers are active for the task */
728 if (thread_sm_enabled(&target->thread))
729 task_type = ARM64_VEC_SME;
730 else
731 task_type = ARM64_VEC_SVE;
732 active = (task_type == type);
733
734 switch (type) {
735 case ARM64_VEC_SVE:
736 if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
737 header->flags |= SVE_PT_VL_INHERIT;
738 fpsimd_only = !test_tsk_thread_flag(target, TIF_SVE);
739 break;
740 case ARM64_VEC_SME:
741 if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT))
742 header->flags |= SVE_PT_VL_INHERIT;
743 fpsimd_only = false;
744 break;
745 default:
746 WARN_ON_ONCE(1);
747 return;
748 }
749
750 if (active) {
751 if (fpsimd_only) {
752 header->flags |= SVE_PT_REGS_FPSIMD;
753 } else {
754 header->flags |= SVE_PT_REGS_SVE;
755 }
756 }
757
758 header->vl = task_get_vl(target, type);
759 vq = sve_vq_from_vl(header->vl);
760
761 header->max_vl = vec_max_vl(type);
762 header->size = SVE_PT_SIZE(vq, header->flags);
763 header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
764 SVE_PT_REGS_SVE);
765 }
766
sve_size_from_header(struct user_sve_header const * header)767 static unsigned int sve_size_from_header(struct user_sve_header const *header)
768 {
769 return ALIGN(header->size, SVE_VQ_BYTES);
770 }
771
sve_get_common(struct task_struct * target,const struct user_regset * regset,struct membuf to,enum vec_type type)772 static int sve_get_common(struct task_struct *target,
773 const struct user_regset *regset,
774 struct membuf to,
775 enum vec_type type)
776 {
777 struct user_sve_header header;
778 unsigned int vq;
779 unsigned long start, end;
780
781 /* Header */
782 sve_init_header_from_task(&header, target, type);
783 vq = sve_vq_from_vl(header.vl);
784
785 membuf_write(&to, &header, sizeof(header));
786
787 if (target == current)
788 fpsimd_preserve_current_state();
789
790 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
791 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
792
793 switch ((header.flags & SVE_PT_REGS_MASK)) {
794 case SVE_PT_REGS_FPSIMD:
795 return __fpr_get(target, regset, to);
796
797 case SVE_PT_REGS_SVE:
798 start = SVE_PT_SVE_OFFSET;
799 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
800 membuf_write(&to, target->thread.sve_state, end - start);
801
802 start = end;
803 end = SVE_PT_SVE_FPSR_OFFSET(vq);
804 membuf_zero(&to, end - start);
805
806 /*
807 * Copy fpsr, and fpcr which must follow contiguously in
808 * struct fpsimd_state:
809 */
810 start = end;
811 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
812 membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr,
813 end - start);
814
815 start = end;
816 end = sve_size_from_header(&header);
817 return membuf_zero(&to, end - start);
818
819 default:
820 return 0;
821 }
822 }
823
sve_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)824 static int sve_get(struct task_struct *target,
825 const struct user_regset *regset,
826 struct membuf to)
827 {
828 if (!system_supports_sve())
829 return -EINVAL;
830
831 return sve_get_common(target, regset, to, ARM64_VEC_SVE);
832 }
833
sve_set_common(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf,enum vec_type type)834 static int sve_set_common(struct task_struct *target,
835 const struct user_regset *regset,
836 unsigned int pos, unsigned int count,
837 const void *kbuf, const void __user *ubuf,
838 enum vec_type type)
839 {
840 int ret;
841 struct user_sve_header header;
842 unsigned int vq;
843 unsigned long start, end;
844
845 /* Header */
846 if (count < sizeof(header))
847 return -EINVAL;
848 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
849 0, sizeof(header));
850 if (ret)
851 goto out;
852
853 /*
854 * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by
855 * vec_set_vector_length(), which will also validate them for us:
856 */
857 ret = vec_set_vector_length(target, type, header.vl,
858 ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16);
859 if (ret)
860 goto out;
861
862 /* Actual VL set may be less than the user asked for: */
863 vq = sve_vq_from_vl(task_get_vl(target, type));
864
865 /* Enter/exit streaming mode */
866 if (system_supports_sme()) {
867 u64 old_svcr = target->thread.svcr;
868
869 switch (type) {
870 case ARM64_VEC_SVE:
871 target->thread.svcr &= ~SVCR_SM_MASK;
872 break;
873 case ARM64_VEC_SME:
874 target->thread.svcr |= SVCR_SM_MASK;
875 break;
876 default:
877 WARN_ON_ONCE(1);
878 return -EINVAL;
879 }
880
881 /*
882 * If we switched then invalidate any existing SVE
883 * state and ensure there's storage.
884 */
885 if (target->thread.svcr != old_svcr)
886 sve_alloc(target, true);
887 }
888
889 /* Registers: FPSIMD-only case */
890
891 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
892 if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) {
893 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf,
894 SVE_PT_FPSIMD_OFFSET);
895 clear_tsk_thread_flag(target, TIF_SVE);
896 if (type == ARM64_VEC_SME)
897 fpsimd_force_sync_to_sve(target);
898 goto out;
899 }
900
901 /*
902 * Otherwise: no registers or full SVE case. For backwards
903 * compatibility reasons we treat empty flags as SVE registers.
904 */
905
906 /*
907 * If setting a different VL from the requested VL and there is
908 * register data, the data layout will be wrong: don't even
909 * try to set the registers in this case.
910 */
911 if (count && vq != sve_vq_from_vl(header.vl)) {
912 ret = -EIO;
913 goto out;
914 }
915
916 sve_alloc(target, true);
917 if (!target->thread.sve_state) {
918 ret = -ENOMEM;
919 clear_tsk_thread_flag(target, TIF_SVE);
920 goto out;
921 }
922
923 /*
924 * Ensure target->thread.sve_state is up to date with target's
925 * FPSIMD regs, so that a short copyin leaves trailing
926 * registers unmodified. Always enable SVE even if going into
927 * streaming mode.
928 */
929 fpsimd_sync_to_sve(target);
930 set_tsk_thread_flag(target, TIF_SVE);
931
932 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
933 start = SVE_PT_SVE_OFFSET;
934 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
935 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
936 target->thread.sve_state,
937 start, end);
938 if (ret)
939 goto out;
940
941 start = end;
942 end = SVE_PT_SVE_FPSR_OFFSET(vq);
943 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
944 start, end);
945 if (ret)
946 goto out;
947
948 /*
949 * Copy fpsr, and fpcr which must follow contiguously in
950 * struct fpsimd_state:
951 */
952 start = end;
953 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
954 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
955 &target->thread.uw.fpsimd_state.fpsr,
956 start, end);
957
958 out:
959 fpsimd_flush_task_state(target);
960 return ret;
961 }
962
sve_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)963 static int sve_set(struct task_struct *target,
964 const struct user_regset *regset,
965 unsigned int pos, unsigned int count,
966 const void *kbuf, const void __user *ubuf)
967 {
968 if (!system_supports_sve())
969 return -EINVAL;
970
971 return sve_set_common(target, regset, pos, count, kbuf, ubuf,
972 ARM64_VEC_SVE);
973 }
974
975 #endif /* CONFIG_ARM64_SVE */
976
977 #ifdef CONFIG_ARM64_SME
978
ssve_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)979 static int ssve_get(struct task_struct *target,
980 const struct user_regset *regset,
981 struct membuf to)
982 {
983 if (!system_supports_sme())
984 return -EINVAL;
985
986 return sve_get_common(target, regset, to, ARM64_VEC_SME);
987 }
988
ssve_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)989 static int ssve_set(struct task_struct *target,
990 const struct user_regset *regset,
991 unsigned int pos, unsigned int count,
992 const void *kbuf, const void __user *ubuf)
993 {
994 if (!system_supports_sme())
995 return -EINVAL;
996
997 return sve_set_common(target, regset, pos, count, kbuf, ubuf,
998 ARM64_VEC_SME);
999 }
1000
za_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1001 static int za_get(struct task_struct *target,
1002 const struct user_regset *regset,
1003 struct membuf to)
1004 {
1005 struct user_za_header header;
1006 unsigned int vq;
1007 unsigned long start, end;
1008
1009 if (!system_supports_sme())
1010 return -EINVAL;
1011
1012 /* Header */
1013 memset(&header, 0, sizeof(header));
1014
1015 if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT))
1016 header.flags |= ZA_PT_VL_INHERIT;
1017
1018 header.vl = task_get_sme_vl(target);
1019 vq = sve_vq_from_vl(header.vl);
1020 header.max_vl = sme_max_vl();
1021 header.max_size = ZA_PT_SIZE(vq);
1022
1023 /* If ZA is not active there is only the header */
1024 if (thread_za_enabled(&target->thread))
1025 header.size = ZA_PT_SIZE(vq);
1026 else
1027 header.size = ZA_PT_ZA_OFFSET;
1028
1029 membuf_write(&to, &header, sizeof(header));
1030
1031 BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header));
1032 end = ZA_PT_ZA_OFFSET;
1033
1034 if (target == current)
1035 fpsimd_preserve_current_state();
1036
1037 /* Any register data to include? */
1038 if (thread_za_enabled(&target->thread)) {
1039 start = end;
1040 end = ZA_PT_SIZE(vq);
1041 membuf_write(&to, target->thread.za_state, end - start);
1042 }
1043
1044 /* Zero any trailing padding */
1045 start = end;
1046 end = ALIGN(header.size, SVE_VQ_BYTES);
1047 return membuf_zero(&to, end - start);
1048 }
1049
za_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1050 static int za_set(struct task_struct *target,
1051 const struct user_regset *regset,
1052 unsigned int pos, unsigned int count,
1053 const void *kbuf, const void __user *ubuf)
1054 {
1055 int ret;
1056 struct user_za_header header;
1057 unsigned int vq;
1058 unsigned long start, end;
1059
1060 if (!system_supports_sme())
1061 return -EINVAL;
1062
1063 /* Header */
1064 if (count < sizeof(header))
1065 return -EINVAL;
1066 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
1067 0, sizeof(header));
1068 if (ret)
1069 goto out;
1070
1071 /*
1072 * All current ZA_PT_* flags are consumed by
1073 * vec_set_vector_length(), which will also validate them for
1074 * us:
1075 */
1076 ret = vec_set_vector_length(target, ARM64_VEC_SME, header.vl,
1077 ((unsigned long)header.flags) << 16);
1078 if (ret)
1079 goto out;
1080
1081 /* Actual VL set may be less than the user asked for: */
1082 vq = sve_vq_from_vl(task_get_sme_vl(target));
1083
1084 /* Ensure there is some SVE storage for streaming mode */
1085 if (!target->thread.sve_state) {
1086 sve_alloc(target, false);
1087 if (!target->thread.sve_state) {
1088 clear_thread_flag(TIF_SME);
1089 ret = -ENOMEM;
1090 goto out;
1091 }
1092 }
1093
1094 /* Allocate/reinit ZA storage */
1095 sme_alloc(target);
1096 if (!target->thread.za_state) {
1097 ret = -ENOMEM;
1098 clear_tsk_thread_flag(target, TIF_SME);
1099 goto out;
1100 }
1101
1102 /* If there is no data then disable ZA */
1103 if (!count) {
1104 target->thread.svcr &= ~SVCR_ZA_MASK;
1105 goto out;
1106 }
1107
1108 /*
1109 * If setting a different VL from the requested VL and there is
1110 * register data, the data layout will be wrong: don't even
1111 * try to set the registers in this case.
1112 */
1113 if (vq != sve_vq_from_vl(header.vl)) {
1114 ret = -EIO;
1115 goto out;
1116 }
1117
1118 BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header));
1119 start = ZA_PT_ZA_OFFSET;
1120 end = ZA_PT_SIZE(vq);
1121 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1122 target->thread.za_state,
1123 start, end);
1124 if (ret)
1125 goto out;
1126
1127 /* Mark ZA as active and let userspace use it */
1128 set_tsk_thread_flag(target, TIF_SME);
1129 target->thread.svcr |= SVCR_ZA_MASK;
1130
1131 out:
1132 fpsimd_flush_task_state(target);
1133 return ret;
1134 }
1135
1136 #endif /* CONFIG_ARM64_SME */
1137
1138 #ifdef CONFIG_ARM64_PTR_AUTH
pac_mask_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1139 static int pac_mask_get(struct task_struct *target,
1140 const struct user_regset *regset,
1141 struct membuf to)
1142 {
1143 /*
1144 * The PAC bits can differ across data and instruction pointers
1145 * depending on TCR_EL1.TBID*, which we may make use of in future, so
1146 * we expose separate masks.
1147 */
1148 unsigned long mask = ptrauth_user_pac_mask();
1149 struct user_pac_mask uregs = {
1150 .data_mask = mask,
1151 .insn_mask = mask,
1152 };
1153
1154 if (!system_supports_address_auth())
1155 return -EINVAL;
1156
1157 return membuf_write(&to, &uregs, sizeof(uregs));
1158 }
1159
pac_enabled_keys_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1160 static int pac_enabled_keys_get(struct task_struct *target,
1161 const struct user_regset *regset,
1162 struct membuf to)
1163 {
1164 long enabled_keys = ptrauth_get_enabled_keys(target);
1165
1166 if (IS_ERR_VALUE(enabled_keys))
1167 return enabled_keys;
1168
1169 return membuf_write(&to, &enabled_keys, sizeof(enabled_keys));
1170 }
1171
pac_enabled_keys_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1172 static int pac_enabled_keys_set(struct task_struct *target,
1173 const struct user_regset *regset,
1174 unsigned int pos, unsigned int count,
1175 const void *kbuf, const void __user *ubuf)
1176 {
1177 int ret;
1178 long enabled_keys = ptrauth_get_enabled_keys(target);
1179
1180 if (IS_ERR_VALUE(enabled_keys))
1181 return enabled_keys;
1182
1183 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &enabled_keys, 0,
1184 sizeof(long));
1185 if (ret)
1186 return ret;
1187
1188 return ptrauth_set_enabled_keys(target, PR_PAC_ENABLED_KEYS_MASK,
1189 enabled_keys);
1190 }
1191
1192 #ifdef CONFIG_CHECKPOINT_RESTORE
pac_key_to_user(const struct ptrauth_key * key)1193 static __uint128_t pac_key_to_user(const struct ptrauth_key *key)
1194 {
1195 return (__uint128_t)key->hi << 64 | key->lo;
1196 }
1197
pac_key_from_user(__uint128_t ukey)1198 static struct ptrauth_key pac_key_from_user(__uint128_t ukey)
1199 {
1200 struct ptrauth_key key = {
1201 .lo = (unsigned long)ukey,
1202 .hi = (unsigned long)(ukey >> 64),
1203 };
1204
1205 return key;
1206 }
1207
pac_address_keys_to_user(struct user_pac_address_keys * ukeys,const struct ptrauth_keys_user * keys)1208 static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys,
1209 const struct ptrauth_keys_user *keys)
1210 {
1211 ukeys->apiakey = pac_key_to_user(&keys->apia);
1212 ukeys->apibkey = pac_key_to_user(&keys->apib);
1213 ukeys->apdakey = pac_key_to_user(&keys->apda);
1214 ukeys->apdbkey = pac_key_to_user(&keys->apdb);
1215 }
1216
pac_address_keys_from_user(struct ptrauth_keys_user * keys,const struct user_pac_address_keys * ukeys)1217 static void pac_address_keys_from_user(struct ptrauth_keys_user *keys,
1218 const struct user_pac_address_keys *ukeys)
1219 {
1220 keys->apia = pac_key_from_user(ukeys->apiakey);
1221 keys->apib = pac_key_from_user(ukeys->apibkey);
1222 keys->apda = pac_key_from_user(ukeys->apdakey);
1223 keys->apdb = pac_key_from_user(ukeys->apdbkey);
1224 }
1225
pac_address_keys_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1226 static int pac_address_keys_get(struct task_struct *target,
1227 const struct user_regset *regset,
1228 struct membuf to)
1229 {
1230 struct ptrauth_keys_user *keys = &target->thread.keys_user;
1231 struct user_pac_address_keys user_keys;
1232
1233 if (!system_supports_address_auth())
1234 return -EINVAL;
1235
1236 pac_address_keys_to_user(&user_keys, keys);
1237
1238 return membuf_write(&to, &user_keys, sizeof(user_keys));
1239 }
1240
pac_address_keys_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1241 static int pac_address_keys_set(struct task_struct *target,
1242 const struct user_regset *regset,
1243 unsigned int pos, unsigned int count,
1244 const void *kbuf, const void __user *ubuf)
1245 {
1246 struct ptrauth_keys_user *keys = &target->thread.keys_user;
1247 struct user_pac_address_keys user_keys;
1248 int ret;
1249
1250 if (!system_supports_address_auth())
1251 return -EINVAL;
1252
1253 pac_address_keys_to_user(&user_keys, keys);
1254 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1255 &user_keys, 0, -1);
1256 if (ret)
1257 return ret;
1258 pac_address_keys_from_user(keys, &user_keys);
1259
1260 return 0;
1261 }
1262
pac_generic_keys_to_user(struct user_pac_generic_keys * ukeys,const struct ptrauth_keys_user * keys)1263 static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys,
1264 const struct ptrauth_keys_user *keys)
1265 {
1266 ukeys->apgakey = pac_key_to_user(&keys->apga);
1267 }
1268
pac_generic_keys_from_user(struct ptrauth_keys_user * keys,const struct user_pac_generic_keys * ukeys)1269 static void pac_generic_keys_from_user(struct ptrauth_keys_user *keys,
1270 const struct user_pac_generic_keys *ukeys)
1271 {
1272 keys->apga = pac_key_from_user(ukeys->apgakey);
1273 }
1274
pac_generic_keys_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1275 static int pac_generic_keys_get(struct task_struct *target,
1276 const struct user_regset *regset,
1277 struct membuf to)
1278 {
1279 struct ptrauth_keys_user *keys = &target->thread.keys_user;
1280 struct user_pac_generic_keys user_keys;
1281
1282 if (!system_supports_generic_auth())
1283 return -EINVAL;
1284
1285 pac_generic_keys_to_user(&user_keys, keys);
1286
1287 return membuf_write(&to, &user_keys, sizeof(user_keys));
1288 }
1289
pac_generic_keys_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1290 static int pac_generic_keys_set(struct task_struct *target,
1291 const struct user_regset *regset,
1292 unsigned int pos, unsigned int count,
1293 const void *kbuf, const void __user *ubuf)
1294 {
1295 struct ptrauth_keys_user *keys = &target->thread.keys_user;
1296 struct user_pac_generic_keys user_keys;
1297 int ret;
1298
1299 if (!system_supports_generic_auth())
1300 return -EINVAL;
1301
1302 pac_generic_keys_to_user(&user_keys, keys);
1303 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1304 &user_keys, 0, -1);
1305 if (ret)
1306 return ret;
1307 pac_generic_keys_from_user(keys, &user_keys);
1308
1309 return 0;
1310 }
1311 #endif /* CONFIG_CHECKPOINT_RESTORE */
1312 #endif /* CONFIG_ARM64_PTR_AUTH */
1313
1314 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
tagged_addr_ctrl_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1315 static int tagged_addr_ctrl_get(struct task_struct *target,
1316 const struct user_regset *regset,
1317 struct membuf to)
1318 {
1319 long ctrl = get_tagged_addr_ctrl(target);
1320
1321 if (IS_ERR_VALUE(ctrl))
1322 return ctrl;
1323
1324 return membuf_write(&to, &ctrl, sizeof(ctrl));
1325 }
1326
tagged_addr_ctrl_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1327 static int tagged_addr_ctrl_set(struct task_struct *target, const struct
1328 user_regset *regset, unsigned int pos,
1329 unsigned int count, const void *kbuf, const
1330 void __user *ubuf)
1331 {
1332 int ret;
1333 long ctrl;
1334
1335 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1);
1336 if (ret)
1337 return ret;
1338
1339 return set_tagged_addr_ctrl(target, ctrl);
1340 }
1341 #endif
1342
1343 enum aarch64_regset {
1344 REGSET_GPR,
1345 REGSET_FPR,
1346 REGSET_TLS,
1347 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1348 REGSET_HW_BREAK,
1349 REGSET_HW_WATCH,
1350 #endif
1351 REGSET_SYSTEM_CALL,
1352 #ifdef CONFIG_ARM64_SVE
1353 REGSET_SVE,
1354 #endif
1355 #ifdef CONFIG_ARM64_SME
1356 REGSET_SSVE,
1357 REGSET_ZA,
1358 #endif
1359 #ifdef CONFIG_ARM64_PTR_AUTH
1360 REGSET_PAC_MASK,
1361 REGSET_PAC_ENABLED_KEYS,
1362 #ifdef CONFIG_CHECKPOINT_RESTORE
1363 REGSET_PACA_KEYS,
1364 REGSET_PACG_KEYS,
1365 #endif
1366 #endif
1367 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
1368 REGSET_TAGGED_ADDR_CTRL,
1369 #endif
1370 };
1371
1372 static const struct user_regset aarch64_regsets[] = {
1373 [REGSET_GPR] = {
1374 .core_note_type = NT_PRSTATUS,
1375 .n = sizeof(struct user_pt_regs) / sizeof(u64),
1376 .size = sizeof(u64),
1377 .align = sizeof(u64),
1378 .regset_get = gpr_get,
1379 .set = gpr_set
1380 },
1381 [REGSET_FPR] = {
1382 .core_note_type = NT_PRFPREG,
1383 .n = sizeof(struct user_fpsimd_state) / sizeof(u32),
1384 /*
1385 * We pretend we have 32-bit registers because the fpsr and
1386 * fpcr are 32-bits wide.
1387 */
1388 .size = sizeof(u32),
1389 .align = sizeof(u32),
1390 .active = fpr_active,
1391 .regset_get = fpr_get,
1392 .set = fpr_set
1393 },
1394 [REGSET_TLS] = {
1395 .core_note_type = NT_ARM_TLS,
1396 .n = 1,
1397 .size = sizeof(void *),
1398 .align = sizeof(void *),
1399 .regset_get = tls_get,
1400 .set = tls_set,
1401 },
1402 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1403 [REGSET_HW_BREAK] = {
1404 .core_note_type = NT_ARM_HW_BREAK,
1405 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1406 .size = sizeof(u32),
1407 .align = sizeof(u32),
1408 .regset_get = hw_break_get,
1409 .set = hw_break_set,
1410 },
1411 [REGSET_HW_WATCH] = {
1412 .core_note_type = NT_ARM_HW_WATCH,
1413 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1414 .size = sizeof(u32),
1415 .align = sizeof(u32),
1416 .regset_get = hw_break_get,
1417 .set = hw_break_set,
1418 },
1419 #endif
1420 [REGSET_SYSTEM_CALL] = {
1421 .core_note_type = NT_ARM_SYSTEM_CALL,
1422 .n = 1,
1423 .size = sizeof(int),
1424 .align = sizeof(int),
1425 .regset_get = system_call_get,
1426 .set = system_call_set,
1427 },
1428 #ifdef CONFIG_ARM64_SVE
1429 [REGSET_SVE] = { /* Scalable Vector Extension */
1430 .core_note_type = NT_ARM_SVE,
1431 .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE),
1432 SVE_VQ_BYTES),
1433 .size = SVE_VQ_BYTES,
1434 .align = SVE_VQ_BYTES,
1435 .regset_get = sve_get,
1436 .set = sve_set,
1437 },
1438 #endif
1439 #ifdef CONFIG_ARM64_SME
1440 [REGSET_SSVE] = { /* Streaming mode SVE */
1441 .core_note_type = NT_ARM_SSVE,
1442 .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE),
1443 SVE_VQ_BYTES),
1444 .size = SVE_VQ_BYTES,
1445 .align = SVE_VQ_BYTES,
1446 .regset_get = ssve_get,
1447 .set = ssve_set,
1448 },
1449 [REGSET_ZA] = { /* SME ZA */
1450 .core_note_type = NT_ARM_ZA,
1451 .n = DIV_ROUND_UP(ZA_PT_ZA_SIZE(SVE_VQ_MAX), SVE_VQ_BYTES),
1452 .size = SVE_VQ_BYTES,
1453 .align = SVE_VQ_BYTES,
1454 .regset_get = za_get,
1455 .set = za_set,
1456 },
1457 #endif
1458 #ifdef CONFIG_ARM64_PTR_AUTH
1459 [REGSET_PAC_MASK] = {
1460 .core_note_type = NT_ARM_PAC_MASK,
1461 .n = sizeof(struct user_pac_mask) / sizeof(u64),
1462 .size = sizeof(u64),
1463 .align = sizeof(u64),
1464 .regset_get = pac_mask_get,
1465 /* this cannot be set dynamically */
1466 },
1467 [REGSET_PAC_ENABLED_KEYS] = {
1468 .core_note_type = NT_ARM_PAC_ENABLED_KEYS,
1469 .n = 1,
1470 .size = sizeof(long),
1471 .align = sizeof(long),
1472 .regset_get = pac_enabled_keys_get,
1473 .set = pac_enabled_keys_set,
1474 },
1475 #ifdef CONFIG_CHECKPOINT_RESTORE
1476 [REGSET_PACA_KEYS] = {
1477 .core_note_type = NT_ARM_PACA_KEYS,
1478 .n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t),
1479 .size = sizeof(__uint128_t),
1480 .align = sizeof(__uint128_t),
1481 .regset_get = pac_address_keys_get,
1482 .set = pac_address_keys_set,
1483 },
1484 [REGSET_PACG_KEYS] = {
1485 .core_note_type = NT_ARM_PACG_KEYS,
1486 .n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t),
1487 .size = sizeof(__uint128_t),
1488 .align = sizeof(__uint128_t),
1489 .regset_get = pac_generic_keys_get,
1490 .set = pac_generic_keys_set,
1491 },
1492 #endif
1493 #endif
1494 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
1495 [REGSET_TAGGED_ADDR_CTRL] = {
1496 .core_note_type = NT_ARM_TAGGED_ADDR_CTRL,
1497 .n = 1,
1498 .size = sizeof(long),
1499 .align = sizeof(long),
1500 .regset_get = tagged_addr_ctrl_get,
1501 .set = tagged_addr_ctrl_set,
1502 },
1503 #endif
1504 };
1505
1506 static const struct user_regset_view user_aarch64_view = {
1507 .name = "aarch64", .e_machine = EM_AARCH64,
1508 .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
1509 };
1510
1511 #ifdef CONFIG_COMPAT
1512 enum compat_regset {
1513 REGSET_COMPAT_GPR,
1514 REGSET_COMPAT_VFP,
1515 };
1516
compat_get_user_reg(struct task_struct * task,int idx)1517 static inline compat_ulong_t compat_get_user_reg(struct task_struct *task, int idx)
1518 {
1519 struct pt_regs *regs = task_pt_regs(task);
1520
1521 switch (idx) {
1522 case 15:
1523 return regs->pc;
1524 case 16:
1525 return pstate_to_compat_psr(regs->pstate);
1526 case 17:
1527 return regs->orig_x0;
1528 default:
1529 return regs->regs[idx];
1530 }
1531 }
1532
compat_gpr_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1533 static int compat_gpr_get(struct task_struct *target,
1534 const struct user_regset *regset,
1535 struct membuf to)
1536 {
1537 int i = 0;
1538
1539 while (to.left)
1540 membuf_store(&to, compat_get_user_reg(target, i++));
1541 return 0;
1542 }
1543
compat_gpr_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1544 static int compat_gpr_set(struct task_struct *target,
1545 const struct user_regset *regset,
1546 unsigned int pos, unsigned int count,
1547 const void *kbuf, const void __user *ubuf)
1548 {
1549 struct pt_regs newregs;
1550 int ret = 0;
1551 unsigned int i, start, num_regs;
1552
1553 /* Calculate the number of AArch32 registers contained in count */
1554 num_regs = count / regset->size;
1555
1556 /* Convert pos into an register number */
1557 start = pos / regset->size;
1558
1559 if (start + num_regs > regset->n)
1560 return -EIO;
1561
1562 newregs = *task_pt_regs(target);
1563
1564 for (i = 0; i < num_regs; ++i) {
1565 unsigned int idx = start + i;
1566 compat_ulong_t reg;
1567
1568 if (kbuf) {
1569 memcpy(®, kbuf, sizeof(reg));
1570 kbuf += sizeof(reg);
1571 } else {
1572 ret = copy_from_user(®, ubuf, sizeof(reg));
1573 if (ret) {
1574 ret = -EFAULT;
1575 break;
1576 }
1577
1578 ubuf += sizeof(reg);
1579 }
1580
1581 switch (idx) {
1582 case 15:
1583 newregs.pc = reg;
1584 break;
1585 case 16:
1586 reg = compat_psr_to_pstate(reg);
1587 newregs.pstate = reg;
1588 break;
1589 case 17:
1590 newregs.orig_x0 = reg;
1591 break;
1592 default:
1593 newregs.regs[idx] = reg;
1594 }
1595
1596 }
1597
1598 if (valid_user_regs(&newregs.user_regs, target))
1599 *task_pt_regs(target) = newregs;
1600 else
1601 ret = -EINVAL;
1602
1603 return ret;
1604 }
1605
compat_vfp_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1606 static int compat_vfp_get(struct task_struct *target,
1607 const struct user_regset *regset,
1608 struct membuf to)
1609 {
1610 struct user_fpsimd_state *uregs;
1611 compat_ulong_t fpscr;
1612
1613 if (!system_supports_fpsimd())
1614 return -EINVAL;
1615
1616 uregs = &target->thread.uw.fpsimd_state;
1617
1618 if (target == current)
1619 fpsimd_preserve_current_state();
1620
1621 /*
1622 * The VFP registers are packed into the fpsimd_state, so they all sit
1623 * nicely together for us. We just need to create the fpscr separately.
1624 */
1625 membuf_write(&to, uregs, VFP_STATE_SIZE - sizeof(compat_ulong_t));
1626 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
1627 (uregs->fpcr & VFP_FPSCR_CTRL_MASK);
1628 return membuf_store(&to, fpscr);
1629 }
1630
compat_vfp_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1631 static int compat_vfp_set(struct task_struct *target,
1632 const struct user_regset *regset,
1633 unsigned int pos, unsigned int count,
1634 const void *kbuf, const void __user *ubuf)
1635 {
1636 struct user_fpsimd_state *uregs;
1637 compat_ulong_t fpscr;
1638 int ret, vregs_end_pos;
1639
1640 if (!system_supports_fpsimd())
1641 return -EINVAL;
1642
1643 uregs = &target->thread.uw.fpsimd_state;
1644
1645 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
1646 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
1647 vregs_end_pos);
1648
1649 if (count && !ret) {
1650 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr,
1651 vregs_end_pos, VFP_STATE_SIZE);
1652 if (!ret) {
1653 uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
1654 uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
1655 }
1656 }
1657
1658 fpsimd_flush_task_state(target);
1659 return ret;
1660 }
1661
compat_tls_get(struct task_struct * target,const struct user_regset * regset,struct membuf to)1662 static int compat_tls_get(struct task_struct *target,
1663 const struct user_regset *regset,
1664 struct membuf to)
1665 {
1666 return membuf_store(&to, (compat_ulong_t)target->thread.uw.tp_value);
1667 }
1668
compat_tls_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1669 static int compat_tls_set(struct task_struct *target,
1670 const struct user_regset *regset, unsigned int pos,
1671 unsigned int count, const void *kbuf,
1672 const void __user *ubuf)
1673 {
1674 int ret;
1675 compat_ulong_t tls = target->thread.uw.tp_value;
1676
1677 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
1678 if (ret)
1679 return ret;
1680
1681 target->thread.uw.tp_value = tls;
1682 return ret;
1683 }
1684
1685 static const struct user_regset aarch32_regsets[] = {
1686 [REGSET_COMPAT_GPR] = {
1687 .core_note_type = NT_PRSTATUS,
1688 .n = COMPAT_ELF_NGREG,
1689 .size = sizeof(compat_elf_greg_t),
1690 .align = sizeof(compat_elf_greg_t),
1691 .regset_get = compat_gpr_get,
1692 .set = compat_gpr_set
1693 },
1694 [REGSET_COMPAT_VFP] = {
1695 .core_note_type = NT_ARM_VFP,
1696 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1697 .size = sizeof(compat_ulong_t),
1698 .align = sizeof(compat_ulong_t),
1699 .active = fpr_active,
1700 .regset_get = compat_vfp_get,
1701 .set = compat_vfp_set
1702 },
1703 };
1704
1705 static const struct user_regset_view user_aarch32_view = {
1706 .name = "aarch32", .e_machine = EM_ARM,
1707 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
1708 };
1709
1710 static const struct user_regset aarch32_ptrace_regsets[] = {
1711 [REGSET_GPR] = {
1712 .core_note_type = NT_PRSTATUS,
1713 .n = COMPAT_ELF_NGREG,
1714 .size = sizeof(compat_elf_greg_t),
1715 .align = sizeof(compat_elf_greg_t),
1716 .regset_get = compat_gpr_get,
1717 .set = compat_gpr_set
1718 },
1719 [REGSET_FPR] = {
1720 .core_note_type = NT_ARM_VFP,
1721 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1722 .size = sizeof(compat_ulong_t),
1723 .align = sizeof(compat_ulong_t),
1724 .regset_get = compat_vfp_get,
1725 .set = compat_vfp_set
1726 },
1727 [REGSET_TLS] = {
1728 .core_note_type = NT_ARM_TLS,
1729 .n = 1,
1730 .size = sizeof(compat_ulong_t),
1731 .align = sizeof(compat_ulong_t),
1732 .regset_get = compat_tls_get,
1733 .set = compat_tls_set,
1734 },
1735 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1736 [REGSET_HW_BREAK] = {
1737 .core_note_type = NT_ARM_HW_BREAK,
1738 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1739 .size = sizeof(u32),
1740 .align = sizeof(u32),
1741 .regset_get = hw_break_get,
1742 .set = hw_break_set,
1743 },
1744 [REGSET_HW_WATCH] = {
1745 .core_note_type = NT_ARM_HW_WATCH,
1746 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1747 .size = sizeof(u32),
1748 .align = sizeof(u32),
1749 .regset_get = hw_break_get,
1750 .set = hw_break_set,
1751 },
1752 #endif
1753 [REGSET_SYSTEM_CALL] = {
1754 .core_note_type = NT_ARM_SYSTEM_CALL,
1755 .n = 1,
1756 .size = sizeof(int),
1757 .align = sizeof(int),
1758 .regset_get = system_call_get,
1759 .set = system_call_set,
1760 },
1761 };
1762
1763 static const struct user_regset_view user_aarch32_ptrace_view = {
1764 .name = "aarch32", .e_machine = EM_ARM,
1765 .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets)
1766 };
1767
compat_ptrace_read_user(struct task_struct * tsk,compat_ulong_t off,compat_ulong_t __user * ret)1768 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
1769 compat_ulong_t __user *ret)
1770 {
1771 compat_ulong_t tmp;
1772
1773 if (off & 3)
1774 return -EIO;
1775
1776 if (off == COMPAT_PT_TEXT_ADDR)
1777 tmp = tsk->mm->start_code;
1778 else if (off == COMPAT_PT_DATA_ADDR)
1779 tmp = tsk->mm->start_data;
1780 else if (off == COMPAT_PT_TEXT_END_ADDR)
1781 tmp = tsk->mm->end_code;
1782 else if (off < sizeof(compat_elf_gregset_t))
1783 tmp = compat_get_user_reg(tsk, off >> 2);
1784 else if (off >= COMPAT_USER_SZ)
1785 return -EIO;
1786 else
1787 tmp = 0;
1788
1789 return put_user(tmp, ret);
1790 }
1791
compat_ptrace_write_user(struct task_struct * tsk,compat_ulong_t off,compat_ulong_t val)1792 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
1793 compat_ulong_t val)
1794 {
1795 struct pt_regs newregs = *task_pt_regs(tsk);
1796 unsigned int idx = off / 4;
1797
1798 if (off & 3 || off >= COMPAT_USER_SZ)
1799 return -EIO;
1800
1801 if (off >= sizeof(compat_elf_gregset_t))
1802 return 0;
1803
1804 switch (idx) {
1805 case 15:
1806 newregs.pc = val;
1807 break;
1808 case 16:
1809 newregs.pstate = compat_psr_to_pstate(val);
1810 break;
1811 case 17:
1812 newregs.orig_x0 = val;
1813 break;
1814 default:
1815 newregs.regs[idx] = val;
1816 }
1817
1818 if (!valid_user_regs(&newregs.user_regs, tsk))
1819 return -EINVAL;
1820
1821 *task_pt_regs(tsk) = newregs;
1822 return 0;
1823 }
1824
1825 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1826
1827 /*
1828 * Convert a virtual register number into an index for a thread_info
1829 * breakpoint array. Breakpoints are identified using positive numbers
1830 * whilst watchpoints are negative. The registers are laid out as pairs
1831 * of (address, control), each pair mapping to a unique hw_breakpoint struct.
1832 * Register 0 is reserved for describing resource information.
1833 */
compat_ptrace_hbp_num_to_idx(compat_long_t num)1834 static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
1835 {
1836 return (abs(num) - 1) >> 1;
1837 }
1838
compat_ptrace_hbp_get_resource_info(u32 * kdata)1839 static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
1840 {
1841 u8 num_brps, num_wrps, debug_arch, wp_len;
1842 u32 reg = 0;
1843
1844 num_brps = hw_breakpoint_slots(TYPE_INST);
1845 num_wrps = hw_breakpoint_slots(TYPE_DATA);
1846
1847 debug_arch = debug_monitors_arch();
1848 wp_len = 8;
1849 reg |= debug_arch;
1850 reg <<= 8;
1851 reg |= wp_len;
1852 reg <<= 8;
1853 reg |= num_wrps;
1854 reg <<= 8;
1855 reg |= num_brps;
1856
1857 *kdata = reg;
1858 return 0;
1859 }
1860
compat_ptrace_hbp_get(unsigned int note_type,struct task_struct * tsk,compat_long_t num,u32 * kdata)1861 static int compat_ptrace_hbp_get(unsigned int note_type,
1862 struct task_struct *tsk,
1863 compat_long_t num,
1864 u32 *kdata)
1865 {
1866 u64 addr = 0;
1867 u32 ctrl = 0;
1868
1869 int err, idx = compat_ptrace_hbp_num_to_idx(num);
1870
1871 if (num & 1) {
1872 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
1873 *kdata = (u32)addr;
1874 } else {
1875 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
1876 *kdata = ctrl;
1877 }
1878
1879 return err;
1880 }
1881
compat_ptrace_hbp_set(unsigned int note_type,struct task_struct * tsk,compat_long_t num,u32 * kdata)1882 static int compat_ptrace_hbp_set(unsigned int note_type,
1883 struct task_struct *tsk,
1884 compat_long_t num,
1885 u32 *kdata)
1886 {
1887 u64 addr;
1888 u32 ctrl;
1889
1890 int err, idx = compat_ptrace_hbp_num_to_idx(num);
1891
1892 if (num & 1) {
1893 addr = *kdata;
1894 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
1895 } else {
1896 ctrl = *kdata;
1897 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
1898 }
1899
1900 return err;
1901 }
1902
compat_ptrace_gethbpregs(struct task_struct * tsk,compat_long_t num,compat_ulong_t __user * data)1903 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
1904 compat_ulong_t __user *data)
1905 {
1906 int ret;
1907 u32 kdata;
1908
1909 /* Watchpoint */
1910 if (num < 0) {
1911 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
1912 /* Resource info */
1913 } else if (num == 0) {
1914 ret = compat_ptrace_hbp_get_resource_info(&kdata);
1915 /* Breakpoint */
1916 } else {
1917 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
1918 }
1919
1920 if (!ret)
1921 ret = put_user(kdata, data);
1922
1923 return ret;
1924 }
1925
compat_ptrace_sethbpregs(struct task_struct * tsk,compat_long_t num,compat_ulong_t __user * data)1926 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
1927 compat_ulong_t __user *data)
1928 {
1929 int ret;
1930 u32 kdata = 0;
1931
1932 if (num == 0)
1933 return 0;
1934
1935 ret = get_user(kdata, data);
1936 if (ret)
1937 return ret;
1938
1939 if (num < 0)
1940 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
1941 else
1942 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
1943
1944 return ret;
1945 }
1946 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1947
compat_arch_ptrace(struct task_struct * child,compat_long_t request,compat_ulong_t caddr,compat_ulong_t cdata)1948 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1949 compat_ulong_t caddr, compat_ulong_t cdata)
1950 {
1951 unsigned long addr = caddr;
1952 unsigned long data = cdata;
1953 void __user *datap = compat_ptr(data);
1954 int ret;
1955
1956 switch (request) {
1957 case PTRACE_PEEKUSR:
1958 ret = compat_ptrace_read_user(child, addr, datap);
1959 break;
1960
1961 case PTRACE_POKEUSR:
1962 ret = compat_ptrace_write_user(child, addr, data);
1963 break;
1964
1965 case COMPAT_PTRACE_GETREGS:
1966 ret = copy_regset_to_user(child,
1967 &user_aarch32_view,
1968 REGSET_COMPAT_GPR,
1969 0, sizeof(compat_elf_gregset_t),
1970 datap);
1971 break;
1972
1973 case COMPAT_PTRACE_SETREGS:
1974 ret = copy_regset_from_user(child,
1975 &user_aarch32_view,
1976 REGSET_COMPAT_GPR,
1977 0, sizeof(compat_elf_gregset_t),
1978 datap);
1979 break;
1980
1981 case COMPAT_PTRACE_GET_THREAD_AREA:
1982 ret = put_user((compat_ulong_t)child->thread.uw.tp_value,
1983 (compat_ulong_t __user *)datap);
1984 break;
1985
1986 case COMPAT_PTRACE_SET_SYSCALL:
1987 task_pt_regs(child)->syscallno = data;
1988 ret = 0;
1989 break;
1990
1991 case COMPAT_PTRACE_GETVFPREGS:
1992 ret = copy_regset_to_user(child,
1993 &user_aarch32_view,
1994 REGSET_COMPAT_VFP,
1995 0, VFP_STATE_SIZE,
1996 datap);
1997 break;
1998
1999 case COMPAT_PTRACE_SETVFPREGS:
2000 ret = copy_regset_from_user(child,
2001 &user_aarch32_view,
2002 REGSET_COMPAT_VFP,
2003 0, VFP_STATE_SIZE,
2004 datap);
2005 break;
2006
2007 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2008 case COMPAT_PTRACE_GETHBPREGS:
2009 ret = compat_ptrace_gethbpregs(child, addr, datap);
2010 break;
2011
2012 case COMPAT_PTRACE_SETHBPREGS:
2013 ret = compat_ptrace_sethbpregs(child, addr, datap);
2014 break;
2015 #endif
2016
2017 default:
2018 ret = compat_ptrace_request(child, request, addr,
2019 data);
2020 break;
2021 }
2022
2023 return ret;
2024 }
2025 #endif /* CONFIG_COMPAT */
2026
task_user_regset_view(struct task_struct * task)2027 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
2028 {
2029 #ifdef CONFIG_COMPAT
2030 /*
2031 * Core dumping of 32-bit tasks or compat ptrace requests must use the
2032 * user_aarch32_view compatible with arm32. Native ptrace requests on
2033 * 32-bit children use an extended user_aarch32_ptrace_view to allow
2034 * access to the TLS register.
2035 */
2036 if (is_compat_task())
2037 return &user_aarch32_view;
2038 else if (is_compat_thread(task_thread_info(task)))
2039 return &user_aarch32_ptrace_view;
2040 #endif
2041 return &user_aarch64_view;
2042 }
2043
arch_ptrace(struct task_struct * child,long request,unsigned long addr,unsigned long data)2044 long arch_ptrace(struct task_struct *child, long request,
2045 unsigned long addr, unsigned long data)
2046 {
2047 switch (request) {
2048 case PTRACE_PEEKMTETAGS:
2049 case PTRACE_POKEMTETAGS:
2050 return mte_ptrace_copy_tags(child, request, addr, data);
2051 }
2052
2053 return ptrace_request(child, request, addr, data);
2054 }
2055
2056 enum ptrace_syscall_dir {
2057 PTRACE_SYSCALL_ENTER = 0,
2058 PTRACE_SYSCALL_EXIT,
2059 };
2060
tracehook_report_syscall(struct pt_regs * regs,enum ptrace_syscall_dir dir)2061 static void tracehook_report_syscall(struct pt_regs *regs,
2062 enum ptrace_syscall_dir dir)
2063 {
2064 int regno;
2065 unsigned long saved_reg;
2066
2067 /*
2068 * We have some ABI weirdness here in the way that we handle syscall
2069 * exit stops because we indicate whether or not the stop has been
2070 * signalled from syscall entry or syscall exit by clobbering a general
2071 * purpose register (ip/r12 for AArch32, x7 for AArch64) in the tracee
2072 * and restoring its old value after the stop. This means that:
2073 *
2074 * - Any writes by the tracer to this register during the stop are
2075 * ignored/discarded.
2076 *
2077 * - The actual value of the register is not available during the stop,
2078 * so the tracer cannot save it and restore it later.
2079 *
2080 * - Syscall stops behave differently to seccomp and pseudo-step traps
2081 * (the latter do not nobble any registers).
2082 */
2083 regno = (is_compat_task() ? 12 : 7);
2084 saved_reg = regs->regs[regno];
2085 regs->regs[regno] = dir;
2086
2087 if (dir == PTRACE_SYSCALL_ENTER) {
2088 if (tracehook_report_syscall_entry(regs))
2089 forget_syscall(regs);
2090 regs->regs[regno] = saved_reg;
2091 } else if (!test_thread_flag(TIF_SINGLESTEP)) {
2092 tracehook_report_syscall_exit(regs, 0);
2093 regs->regs[regno] = saved_reg;
2094 } else {
2095 regs->regs[regno] = saved_reg;
2096
2097 /*
2098 * Signal a pseudo-step exception since we are stepping but
2099 * tracer modifications to the registers may have rewound the
2100 * state machine.
2101 */
2102 tracehook_report_syscall_exit(regs, 1);
2103 }
2104 }
2105
syscall_trace_enter(struct pt_regs * regs)2106 int syscall_trace_enter(struct pt_regs *regs)
2107 {
2108 unsigned long flags = READ_ONCE(current_thread_info()->flags);
2109
2110 if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) {
2111 tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
2112 if (flags & _TIF_SYSCALL_EMU)
2113 return NO_SYSCALL;
2114 }
2115
2116 /* Do the secure computing after ptrace; failures should be fast. */
2117 if (secure_computing() == -1)
2118 return NO_SYSCALL;
2119
2120 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
2121 trace_sys_enter(regs, regs->syscallno);
2122
2123 audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1],
2124 regs->regs[2], regs->regs[3]);
2125
2126 return regs->syscallno;
2127 }
2128
syscall_trace_exit(struct pt_regs * regs)2129 void syscall_trace_exit(struct pt_regs *regs)
2130 {
2131 unsigned long flags = READ_ONCE(current_thread_info()->flags);
2132
2133 audit_syscall_exit(regs);
2134
2135 if (flags & _TIF_SYSCALL_TRACEPOINT)
2136 trace_sys_exit(regs, syscall_get_return_value(current, regs));
2137
2138 if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP))
2139 tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
2140
2141 rseq_syscall(regs);
2142 }
2143
2144 /*
2145 * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
2146 * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
2147 * not described in ARM DDI 0487D.a.
2148 * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
2149 * be allocated an EL0 meaning in future.
2150 * Userspace cannot use these until they have an architectural meaning.
2151 * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
2152 * We also reserve IL for the kernel; SS is handled dynamically.
2153 */
2154 #define SPSR_EL1_AARCH64_RES0_BITS \
2155 (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 26) | GENMASK_ULL(23, 22) | \
2156 GENMASK_ULL(20, 13) | GENMASK_ULL(5, 5))
2157 #define SPSR_EL1_AARCH32_RES0_BITS \
2158 (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
2159
valid_compat_regs(struct user_pt_regs * regs)2160 static int valid_compat_regs(struct user_pt_regs *regs)
2161 {
2162 regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS;
2163
2164 if (!system_supports_mixed_endian_el0()) {
2165 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
2166 regs->pstate |= PSR_AA32_E_BIT;
2167 else
2168 regs->pstate &= ~PSR_AA32_E_BIT;
2169 }
2170
2171 if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) &&
2172 (regs->pstate & PSR_AA32_A_BIT) == 0 &&
2173 (regs->pstate & PSR_AA32_I_BIT) == 0 &&
2174 (regs->pstate & PSR_AA32_F_BIT) == 0) {
2175 return 1;
2176 }
2177
2178 /*
2179 * Force PSR to a valid 32-bit EL0t, preserving the same bits as
2180 * arch/arm.
2181 */
2182 regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT |
2183 PSR_AA32_C_BIT | PSR_AA32_V_BIT |
2184 PSR_AA32_Q_BIT | PSR_AA32_IT_MASK |
2185 PSR_AA32_GE_MASK | PSR_AA32_E_BIT |
2186 PSR_AA32_T_BIT;
2187 regs->pstate |= PSR_MODE32_BIT;
2188
2189 return 0;
2190 }
2191
valid_native_regs(struct user_pt_regs * regs)2192 static int valid_native_regs(struct user_pt_regs *regs)
2193 {
2194 regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS;
2195
2196 if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) &&
2197 (regs->pstate & PSR_D_BIT) == 0 &&
2198 (regs->pstate & PSR_A_BIT) == 0 &&
2199 (regs->pstate & PSR_I_BIT) == 0 &&
2200 (regs->pstate & PSR_F_BIT) == 0) {
2201 return 1;
2202 }
2203
2204 /* Force PSR to a valid 64-bit EL0t */
2205 regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT;
2206
2207 return 0;
2208 }
2209
2210 /*
2211 * Are the current registers suitable for user mode? (used to maintain
2212 * security in signal handlers)
2213 */
valid_user_regs(struct user_pt_regs * regs,struct task_struct * task)2214 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
2215 {
2216 /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */
2217 user_regs_reset_single_step(regs, task);
2218
2219 if (is_compat_thread(task_thread_info(task)))
2220 return valid_compat_regs(regs);
2221 else
2222 return valid_native_regs(regs);
2223 }
2224