• Home
  • Raw
  • Download

Lines Matching +full:data +full:- +full:addr

1 // SPDX-License-Identifier: GPL-2.0
46 struct thread_struct *thread = &task->thread; in update_cr_regs()
60 if (task->thread.per_flags & PER_FLAG_NO_TE) in update_cr_regs()
64 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) { in update_cr_regs()
65 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND) in update_cr_regs()
74 if (task->thread.gs_cb) in update_cr_regs()
85 new.control = thread->per_user.control; in update_cr_regs()
86 new.start = thread->per_user.start; in update_cr_regs()
87 new.end = thread->per_user.end; in update_cr_regs()
101 new.end = -1UL; in update_cr_regs()
106 regs->psw.mask &= ~PSW_MASK_PER; in update_cr_regs()
109 regs->psw.mask |= PSW_MASK_PER; in update_cr_regs()
140 memset(&task->thread.per_user, 0, sizeof(task->thread.per_user)); in ptrace_disable()
141 memset(&task->thread.per_event, 0, sizeof(task->thread.per_event)); in ptrace_disable()
144 task->thread.per_flags = 0; in ptrace_disable()
150 addr_t addr) in __peek_user_per() argument
154 if (addr == (addr_t) &dummy->cr9) in __peek_user_per()
157 PER_EVENT_IFETCH : child->thread.per_user.control; in __peek_user_per()
158 else if (addr == (addr_t) &dummy->cr10) in __peek_user_per()
161 0 : child->thread.per_user.start; in __peek_user_per()
162 else if (addr == (addr_t) &dummy->cr11) in __peek_user_per()
165 -1UL : child->thread.per_user.end; in __peek_user_per()
166 else if (addr == (addr_t) &dummy->bits) in __peek_user_per()
167 /* Single-step bit. */ in __peek_user_per()
169 (1UL << (BITS_PER_LONG - 1)) : 0; in __peek_user_per()
170 else if (addr == (addr_t) &dummy->starting_addr) in __peek_user_per()
172 return child->thread.per_user.start; in __peek_user_per()
173 else if (addr == (addr_t) &dummy->ending_addr) in __peek_user_per()
175 return child->thread.per_user.end; in __peek_user_per()
176 else if (addr == (addr_t) &dummy->perc_atmid) in __peek_user_per()
179 child->thread.per_event.cause << (BITS_PER_LONG - 16); in __peek_user_per()
180 else if (addr == (addr_t) &dummy->address) in __peek_user_per()
182 return child->thread.per_event.address; in __peek_user_per()
183 else if (addr == (addr_t) &dummy->access_id) in __peek_user_per()
186 child->thread.per_event.paid << (BITS_PER_LONG - 8); in __peek_user_per()
191 * Read the word at offset addr from the user area of a process. The
199 static unsigned long __peek_user(struct task_struct *child, addr_t addr) in __peek_user() argument
204 if (addr < (addr_t) &dummy->regs.acrs) { in __peek_user()
208 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr); in __peek_user()
209 if (addr == (addr_t) &dummy->regs.psw.mask) { in __peek_user()
215 } else if (addr < (addr_t) &dummy->regs.orig_gpr2) { in __peek_user()
219 offset = addr - (addr_t) &dummy->regs.acrs; in __peek_user()
225 if (addr == (addr_t) &dummy->regs.acrs[15]) in __peek_user()
226 tmp = ((unsigned long) child->thread.acrs[15]) << 32; in __peek_user()
228 tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset); in __peek_user()
230 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { in __peek_user()
234 tmp = (addr_t) task_pt_regs(child)->orig_gpr2; in __peek_user()
236 } else if (addr < (addr_t) &dummy->regs.fp_regs) { in __peek_user()
243 } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) { in __peek_user()
247 tmp = child->thread.fpu.fpc; in __peek_user()
248 tmp <<= BITS_PER_LONG - 32; in __peek_user()
250 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { in __peek_user()
252 * floating point regs. are either in child->thread.fpu in __peek_user()
253 * or the child->thread.fpu.vxrs array in __peek_user()
255 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; in __peek_user()
258 ((addr_t) child->thread.fpu.vxrs + 2*offset); in __peek_user()
261 ((addr_t) child->thread.fpu.fprs + offset); in __peek_user()
263 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { in __peek_user()
267 addr -= (addr_t) &dummy->regs.per_info; in __peek_user()
268 tmp = __peek_user_per(child, addr); in __peek_user()
277 peek_user(struct task_struct *child, addr_t addr, addr_t data) in peek_user() argument
286 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && in peek_user()
287 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) in peek_user()
289 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) in peek_user()
290 return -EIO; in peek_user()
292 tmp = __peek_user(child, addr); in peek_user()
293 return put_user(tmp, (addr_t __user *) data); in peek_user()
297 addr_t addr, addr_t data) in __poke_user_per() argument
313 if (addr == (addr_t) &dummy->cr9) in __poke_user_per()
315 child->thread.per_user.control = in __poke_user_per()
316 data & (PER_EVENT_MASK | PER_CONTROL_MASK); in __poke_user_per()
317 else if (addr == (addr_t) &dummy->starting_addr) in __poke_user_per()
319 child->thread.per_user.start = data; in __poke_user_per()
320 else if (addr == (addr_t) &dummy->ending_addr) in __poke_user_per()
322 child->thread.per_user.end = data; in __poke_user_per()
325 static void fixup_int_code(struct task_struct *child, addr_t data) in fixup_int_code() argument
328 int ilc = regs->int_code >> 16; in fixup_int_code()
334 if (ptrace_access_vm(child, regs->psw.addr - (regs->int_code >> 16), in fixup_int_code()
342 regs->int_code = 0x20000 | (data & 0xffff); in fixup_int_code()
345 * Write a word to the user area of a process at location addr. This
350 static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) in __poke_user() argument
356 if (addr < (addr_t) &dummy->regs.acrs) { in __poke_user()
361 if (addr == (addr_t) &dummy->regs.psw.mask) { in __poke_user()
365 if ((data ^ PSW_USER_BITS) & ~mask) in __poke_user()
367 return -EINVAL; in __poke_user()
368 if ((data & PSW_MASK_ASC) == PSW_ASC_HOME) in __poke_user()
369 /* Invalid address-space-control bits */ in __poke_user()
370 return -EINVAL; in __poke_user()
371 if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA)) in __poke_user()
373 return -EINVAL; in __poke_user()
377 addr == offsetof(struct user, regs.gprs[2])) in __poke_user()
378 fixup_int_code(child, data); in __poke_user()
379 *(addr_t *)((addr_t) &regs->psw + addr) = data; in __poke_user()
381 } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { in __poke_user()
385 offset = addr - (addr_t) &dummy->regs.acrs; in __poke_user()
392 if (addr == (addr_t) &dummy->regs.acrs[15]) in __poke_user()
393 child->thread.acrs[15] = (unsigned int) (data >> 32); in __poke_user()
395 *(addr_t *)((addr_t) &child->thread.acrs + offset) = data; in __poke_user()
397 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { in __poke_user()
401 task_pt_regs(child)->orig_gpr2 = data; in __poke_user()
403 } else if (addr < (addr_t) &dummy->regs.fp_regs) { in __poke_user()
410 } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) { in __poke_user()
414 if ((unsigned int) data != 0 || in __poke_user()
415 test_fp_ctl(data >> (BITS_PER_LONG - 32))) in __poke_user()
416 return -EINVAL; in __poke_user()
417 child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32); in __poke_user()
419 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { in __poke_user()
421 * floating point regs. are either in child->thread.fpu in __poke_user()
422 * or the child->thread.fpu.vxrs array in __poke_user()
424 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; in __poke_user()
427 child->thread.fpu.vxrs + 2*offset) = data; in __poke_user()
430 child->thread.fpu.fprs + offset) = data; in __poke_user()
432 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { in __poke_user()
436 addr -= (addr_t) &dummy->regs.per_info; in __poke_user()
437 __poke_user_per(child, addr, data); in __poke_user()
444 static int poke_user(struct task_struct *child, addr_t addr, addr_t data) in poke_user() argument
453 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && in poke_user()
454 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) in poke_user()
456 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) in poke_user()
457 return -EIO; in poke_user()
459 return __poke_user(child, addr, data); in poke_user()
463 unsigned long addr, unsigned long data) in arch_ptrace() argument
470 /* read the word at location addr in the USER area. */ in arch_ptrace()
471 return peek_user(child, addr, data); in arch_ptrace()
474 /* write the word at location addr in the USER area */ in arch_ptrace()
475 return poke_user(child, addr, data); in arch_ptrace()
479 if (copy_from_user(&parea, (void __force __user *) addr, in arch_ptrace()
481 return -EFAULT; in arch_ptrace()
482 addr = parea.kernel_addr; in arch_ptrace()
483 data = parea.process_addr; in arch_ptrace()
487 ret = peek_user(child, addr, data); in arch_ptrace()
491 (addr_t __force __user *) data)) in arch_ptrace()
492 return -EFAULT; in arch_ptrace()
493 ret = poke_user(child, addr, utmp); in arch_ptrace()
497 addr += sizeof(unsigned long); in arch_ptrace()
498 data += sizeof(unsigned long); in arch_ptrace()
503 return put_user(child->thread.last_break, (unsigned long __user *)data); in arch_ptrace()
506 return -EIO; in arch_ptrace()
507 child->thread.per_flags &= ~PER_FLAG_NO_TE; in arch_ptrace()
511 return -EIO; in arch_ptrace()
512 child->thread.per_flags |= PER_FLAG_NO_TE; in arch_ptrace()
513 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND; in arch_ptrace()
516 if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE)) in arch_ptrace()
517 return -EIO; in arch_ptrace()
518 switch (data) { in arch_ptrace()
520 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND; in arch_ptrace()
523 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND; in arch_ptrace()
524 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND; in arch_ptrace()
527 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND; in arch_ptrace()
528 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND; in arch_ptrace()
531 return -EINVAL; in arch_ptrace()
535 return ptrace_request(child, request, addr, data); in arch_ptrace()
551 * a 64 bit program is a no-no.
558 addr_t addr) in __peek_user_per_compat() argument
562 if (addr == (addr_t) &dummy32->cr9) in __peek_user_per_compat()
565 PER_EVENT_IFETCH : child->thread.per_user.control; in __peek_user_per_compat()
566 else if (addr == (addr_t) &dummy32->cr10) in __peek_user_per_compat()
569 0 : child->thread.per_user.start; in __peek_user_per_compat()
570 else if (addr == (addr_t) &dummy32->cr11) in __peek_user_per_compat()
573 PSW32_ADDR_INSN : child->thread.per_user.end; in __peek_user_per_compat()
574 else if (addr == (addr_t) &dummy32->bits) in __peek_user_per_compat()
575 /* Single-step bit. */ in __peek_user_per_compat()
578 else if (addr == (addr_t) &dummy32->starting_addr) in __peek_user_per_compat()
580 return (__u32) child->thread.per_user.start; in __peek_user_per_compat()
581 else if (addr == (addr_t) &dummy32->ending_addr) in __peek_user_per_compat()
583 return (__u32) child->thread.per_user.end; in __peek_user_per_compat()
584 else if (addr == (addr_t) &dummy32->perc_atmid) in __peek_user_per_compat()
586 return (__u32) child->thread.per_event.cause << 16; in __peek_user_per_compat()
587 else if (addr == (addr_t) &dummy32->address) in __peek_user_per_compat()
589 return (__u32) child->thread.per_event.address; in __peek_user_per_compat()
590 else if (addr == (addr_t) &dummy32->access_id) in __peek_user_per_compat()
592 return (__u32) child->thread.per_event.paid << 24; in __peek_user_per_compat()
599 static u32 __peek_user_compat(struct task_struct *child, addr_t addr) in __peek_user_compat() argument
605 if (addr < (addr_t) &dummy32->regs.acrs) { in __peek_user_compat()
610 if (addr == (addr_t) &dummy32->regs.psw.mask) { in __peek_user_compat()
612 tmp = (__u32)(regs->psw.mask >> 32); in __peek_user_compat()
615 } else if (addr == (addr_t) &dummy32->regs.psw.addr) { in __peek_user_compat()
617 tmp = (__u32) regs->psw.addr | in __peek_user_compat()
618 (__u32)(regs->psw.mask & PSW_MASK_BA); in __peek_user_compat()
620 /* gpr 0-15 */ in __peek_user_compat()
621 tmp = *(__u32 *)((addr_t) &regs->psw + addr*2 + 4); in __peek_user_compat()
623 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { in __peek_user_compat()
627 offset = addr - (addr_t) &dummy32->regs.acrs; in __peek_user_compat()
628 tmp = *(__u32*)((addr_t) &child->thread.acrs + offset); in __peek_user_compat()
630 } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) { in __peek_user_compat()
634 tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4); in __peek_user_compat()
636 } else if (addr < (addr_t) &dummy32->regs.fp_regs) { in __peek_user_compat()
643 } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) { in __peek_user_compat()
647 tmp = child->thread.fpu.fpc; in __peek_user_compat()
649 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { in __peek_user_compat()
651 * floating point regs. are either in child->thread.fpu in __peek_user_compat()
652 * or the child->thread.fpu.vxrs array in __peek_user_compat()
654 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; in __peek_user_compat()
657 ((addr_t) child->thread.fpu.vxrs + 2*offset); in __peek_user_compat()
660 ((addr_t) child->thread.fpu.fprs + offset); in __peek_user_compat()
662 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { in __peek_user_compat()
666 addr -= (addr_t) &dummy32->regs.per_info; in __peek_user_compat()
667 tmp = __peek_user_per_compat(child, addr); in __peek_user_compat()
676 addr_t addr, addr_t data) in peek_user_compat() argument
680 if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3) in peek_user_compat()
681 return -EIO; in peek_user_compat()
683 tmp = __peek_user_compat(child, addr); in peek_user_compat()
684 return put_user(tmp, (__u32 __user *) data); in peek_user_compat()
691 addr_t addr, __u32 data) in __poke_user_per_compat() argument
695 if (addr == (addr_t) &dummy32->cr9) in __poke_user_per_compat()
697 child->thread.per_user.control = in __poke_user_per_compat()
698 data & (PER_EVENT_MASK | PER_CONTROL_MASK); in __poke_user_per_compat()
699 else if (addr == (addr_t) &dummy32->starting_addr) in __poke_user_per_compat()
701 child->thread.per_user.start = data; in __poke_user_per_compat()
702 else if (addr == (addr_t) &dummy32->ending_addr) in __poke_user_per_compat()
704 child->thread.per_user.end = data; in __poke_user_per_compat()
711 addr_t addr, addr_t data) in __poke_user_compat() argument
714 __u32 tmp = (__u32) data; in __poke_user_compat()
717 if (addr < (addr_t) &dummy32->regs.acrs) { in __poke_user_compat()
722 if (addr == (addr_t) &dummy32->regs.psw.mask) { in __poke_user_compat()
729 return -EINVAL; in __poke_user_compat()
730 if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME) in __poke_user_compat()
731 /* Invalid address-space-control bits */ in __poke_user_compat()
732 return -EINVAL; in __poke_user_compat()
733 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | in __poke_user_compat()
734 (regs->psw.mask & PSW_MASK_BA) | in __poke_user_compat()
736 } else if (addr == (addr_t) &dummy32->regs.psw.addr) { in __poke_user_compat()
738 regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN; in __poke_user_compat()
740 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) | in __poke_user_compat()
745 addr == offsetof(struct compat_user, regs.gprs[2])) in __poke_user_compat()
746 fixup_int_code(child, data); in __poke_user_compat()
747 /* gpr 0-15 */ in __poke_user_compat()
748 *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp; in __poke_user_compat()
750 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { in __poke_user_compat()
754 offset = addr - (addr_t) &dummy32->regs.acrs; in __poke_user_compat()
755 *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp; in __poke_user_compat()
757 } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) { in __poke_user_compat()
761 *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp; in __poke_user_compat()
763 } else if (addr < (addr_t) &dummy32->regs.fp_regs) { in __poke_user_compat()
770 } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) { in __poke_user_compat()
775 return -EINVAL; in __poke_user_compat()
776 child->thread.fpu.fpc = data; in __poke_user_compat()
778 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { in __poke_user_compat()
780 * floating point regs. are either in child->thread.fpu in __poke_user_compat()
781 * or the child->thread.fpu.vxrs array in __poke_user_compat()
783 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; in __poke_user_compat()
786 child->thread.fpu.vxrs + 2*offset) = tmp; in __poke_user_compat()
789 child->thread.fpu.fprs + offset) = tmp; in __poke_user_compat()
791 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { in __poke_user_compat()
795 addr -= (addr_t) &dummy32->regs.per_info; in __poke_user_compat()
796 __poke_user_per_compat(child, addr, data); in __poke_user_compat()
803 addr_t addr, addr_t data) in poke_user_compat() argument
805 if (!is_compat_task() || (addr & 3) || in poke_user_compat()
806 addr > sizeof(struct compat_user) - 3) in poke_user_compat()
807 return -EIO; in poke_user_compat()
809 return __poke_user_compat(child, addr, data); in poke_user_compat()
815 unsigned long addr = caddr; in compat_arch_ptrace() local
816 unsigned long data = cdata; in compat_arch_ptrace() local
822 /* read the word at location addr in the USER area. */ in compat_arch_ptrace()
823 return peek_user_compat(child, addr, data); in compat_arch_ptrace()
826 /* write the word at location addr in the USER area */ in compat_arch_ptrace()
827 return poke_user_compat(child, addr, data); in compat_arch_ptrace()
831 if (copy_from_user(&parea, (void __force __user *) addr, in compat_arch_ptrace()
833 return -EFAULT; in compat_arch_ptrace()
834 addr = parea.kernel_addr; in compat_arch_ptrace()
835 data = parea.process_addr; in compat_arch_ptrace()
839 ret = peek_user_compat(child, addr, data); in compat_arch_ptrace()
843 (__u32 __force __user *) data)) in compat_arch_ptrace()
844 return -EFAULT; in compat_arch_ptrace()
845 ret = poke_user_compat(child, addr, utmp); in compat_arch_ptrace()
849 addr += sizeof(unsigned int); in compat_arch_ptrace()
850 data += sizeof(unsigned int); in compat_arch_ptrace()
855 return put_user(child->thread.last_break, (unsigned int __user *)data); in compat_arch_ptrace()
857 return compat_ptrace_request(child, request, addr, data); in compat_arch_ptrace()
863 unsigned long mask = -1UL; in do_syscall_trace_enter()
864 long ret = -1; in do_syscall_trace_enter()
888 sd.instruction_pointer = regs->psw.addr & 0x7fffffff; in do_syscall_trace_enter()
891 sd.instruction_pointer = regs->psw.addr; in do_syscall_trace_enter()
895 sd.nr = regs->int_code & 0xffff; in do_syscall_trace_enter()
896 sd.args[0] = regs->orig_gpr2 & mask; in do_syscall_trace_enter()
897 sd.args[1] = regs->gprs[3] & mask; in do_syscall_trace_enter()
898 sd.args[2] = regs->gprs[4] & mask; in do_syscall_trace_enter()
899 sd.args[3] = regs->gprs[5] & mask; in do_syscall_trace_enter()
900 sd.args[4] = regs->gprs[6] & mask; in do_syscall_trace_enter()
901 sd.args[5] = regs->gprs[7] & mask; in do_syscall_trace_enter()
903 if (__secure_computing(&sd) == -1) in do_syscall_trace_enter()
909 trace_sys_enter(regs, regs->int_code & 0xffff); in do_syscall_trace_enter()
912 audit_syscall_entry(regs->int_code & 0xffff, regs->orig_gpr2 & mask, in do_syscall_trace_enter()
913 regs->gprs[3] &mask, regs->gprs[4] &mask, in do_syscall_trace_enter()
914 regs->gprs[5] &mask); in do_syscall_trace_enter()
916 if ((signed long)regs->gprs[2] >= NR_syscalls) { in do_syscall_trace_enter()
917 regs->gprs[2] = -ENOSYS; in do_syscall_trace_enter()
918 ret = -ENOSYS; in do_syscall_trace_enter()
920 return regs->gprs[2]; in do_syscall_trace_enter()
931 trace_sys_exit(regs, regs->gprs[2]); in do_syscall_trace_exit()
947 save_access_regs(target->thread.acrs); in s390_regs_get()
962 save_access_regs(target->thread.acrs); in s390_regs_set()
968 count -= sizeof(*k); in s390_regs_set()
979 count -= sizeof(*u); in s390_regs_set()
985 restore_access_regs(target->thread.acrs); in s390_regs_set()
999 fp_regs.fpc = target->thread.fpu.fpc; in s390_fpregs_get()
1000 fpregs_store(&fp_regs, &target->thread.fpu); in s390_fpregs_get()
1017 convert_vx_to_fp(fprs, target->thread.fpu.vxrs); in s390_fpregs_set()
1019 memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs)); in s390_fpregs_set()
1023 u32 ufpc[2] = { target->thread.fpu.fpc, 0 }; in s390_fpregs_set()
1029 return -EINVAL; in s390_fpregs_set()
1030 target->thread.fpu.fpc = ufpc[0]; in s390_fpregs_set()
1035 fprs, offsetof(s390_fp_regs, fprs), -1); in s390_fpregs_set()
1040 convert_fp_to_vx(target->thread.fpu.vxrs, fprs); in s390_fpregs_set()
1042 memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs)); in s390_fpregs_set()
1051 return membuf_store(&to, target->thread.last_break); in s390_last_break_get()
1068 if (!(regs->int_code & 0x200)) in s390_tdb_get()
1069 return -ENODATA; in s390_tdb_get()
1070 return membuf_write(&to, target->thread.trap_tdb, 256); in s390_tdb_get()
1089 return -ENODEV; in s390_vxrs_low_get()
1093 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1); in s390_vxrs_low_get()
1106 return -ENODEV; in s390_vxrs_low_set()
1111 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1); in s390_vxrs_low_set()
1113 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); in s390_vxrs_low_set()
1116 *((__u64 *)(target->thread.fpu.vxrs + i) + 1) = vxrs[i]; in s390_vxrs_low_set()
1126 return -ENODEV; in s390_vxrs_high_get()
1129 return membuf_write(&to, target->thread.fpu.vxrs + __NUM_VXRS_LOW, in s390_vxrs_high_get()
1141 return -ENODEV; in s390_vxrs_high_set()
1146 target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1); in s390_vxrs_high_set()
1154 return membuf_store(&to, target->thread.system_call); in s390_system_call_get()
1162 unsigned int *data = &target->thread.system_call; in s390_system_call_set() local
1164 data, 0, sizeof(unsigned int)); in s390_system_call_set()
1171 struct gs_cb *data = target->thread.gs_cb; in s390_gs_cb_get() local
1174 return -ENODEV; in s390_gs_cb_get()
1175 if (!data) in s390_gs_cb_get()
1176 return -ENODATA; in s390_gs_cb_get()
1178 save_gs_cb(data); in s390_gs_cb_get()
1179 return membuf_write(&to, data, sizeof(struct gs_cb)); in s390_gs_cb_get()
1187 struct gs_cb gs_cb = { }, *data = NULL; in s390_gs_cb_set() local
1191 return -ENODEV; in s390_gs_cb_set()
1192 if (!target->thread.gs_cb) { in s390_gs_cb_set()
1193 data = kzalloc(sizeof(*data), GFP_KERNEL); in s390_gs_cb_set()
1194 if (!data) in s390_gs_cb_set()
1195 return -ENOMEM; in s390_gs_cb_set()
1197 if (!target->thread.gs_cb) in s390_gs_cb_set()
1202 gs_cb = *target->thread.gs_cb; in s390_gs_cb_set()
1206 kfree(data); in s390_gs_cb_set()
1207 return -EFAULT; in s390_gs_cb_set()
1210 if (!target->thread.gs_cb) in s390_gs_cb_set()
1211 target->thread.gs_cb = data; in s390_gs_cb_set()
1212 *target->thread.gs_cb = gs_cb; in s390_gs_cb_set()
1215 restore_gs_cb(target->thread.gs_cb); in s390_gs_cb_set()
1225 struct gs_cb *data = target->thread.gs_bc_cb; in s390_gs_bc_get() local
1228 return -ENODEV; in s390_gs_bc_get()
1229 if (!data) in s390_gs_bc_get()
1230 return -ENODATA; in s390_gs_bc_get()
1231 return membuf_write(&to, data, sizeof(struct gs_cb)); in s390_gs_bc_get()
1239 struct gs_cb *data = target->thread.gs_bc_cb; in s390_gs_bc_set() local
1242 return -ENODEV; in s390_gs_bc_set()
1243 if (!data) { in s390_gs_bc_set()
1244 data = kzalloc(sizeof(*data), GFP_KERNEL); in s390_gs_bc_set()
1245 if (!data) in s390_gs_bc_set()
1246 return -ENOMEM; in s390_gs_bc_set()
1247 target->thread.gs_bc_cb = data; in s390_gs_bc_set()
1250 data, 0, sizeof(struct gs_cb)); in s390_gs_bc_set()
1255 return (cb->rca & 0x1f) == 0 && in is_ri_cb_valid()
1256 (cb->roa & 0xfff) == 0 && in is_ri_cb_valid()
1257 (cb->rla & 0xfff) == 0xfff && in is_ri_cb_valid()
1258 cb->s == 1 && in is_ri_cb_valid()
1259 cb->k == 1 && in is_ri_cb_valid()
1260 cb->h == 0 && in is_ri_cb_valid()
1261 cb->reserved1 == 0 && in is_ri_cb_valid()
1262 cb->ps == 1 && in is_ri_cb_valid()
1263 cb->qs == 0 && in is_ri_cb_valid()
1264 cb->pc == 1 && in is_ri_cb_valid()
1265 cb->qc == 0 && in is_ri_cb_valid()
1266 cb->reserved2 == 0 && in is_ri_cb_valid()
1267 cb->reserved3 == 0 && in is_ri_cb_valid()
1268 cb->reserved4 == 0 && in is_ri_cb_valid()
1269 cb->reserved5 == 0 && in is_ri_cb_valid()
1270 cb->reserved6 == 0 && in is_ri_cb_valid()
1271 cb->reserved7 == 0 && in is_ri_cb_valid()
1272 cb->reserved8 == 0 && in is_ri_cb_valid()
1273 cb->rla >= cb->roa && in is_ri_cb_valid()
1274 cb->rca >= cb->roa && in is_ri_cb_valid()
1275 cb->rca <= cb->rla+1 && in is_ri_cb_valid()
1276 cb->m < 3; in is_ri_cb_valid()
1283 struct runtime_instr_cb *data = target->thread.ri_cb; in s390_runtime_instr_get() local
1286 return -ENODEV; in s390_runtime_instr_get()
1287 if (!data) in s390_runtime_instr_get()
1288 return -ENODATA; in s390_runtime_instr_get()
1290 return membuf_write(&to, data, sizeof(struct runtime_instr_cb)); in s390_runtime_instr_get()
1298 struct runtime_instr_cb ri_cb = { }, *data = NULL; in s390_runtime_instr_set() local
1302 return -ENODEV; in s390_runtime_instr_set()
1304 if (!target->thread.ri_cb) { in s390_runtime_instr_set()
1305 data = kzalloc(sizeof(*data), GFP_KERNEL); in s390_runtime_instr_set()
1306 if (!data) in s390_runtime_instr_set()
1307 return -ENOMEM; in s390_runtime_instr_set()
1310 if (target->thread.ri_cb) { in s390_runtime_instr_set()
1314 ri_cb = *target->thread.ri_cb; in s390_runtime_instr_set()
1320 kfree(data); in s390_runtime_instr_set()
1321 return -EFAULT; in s390_runtime_instr_set()
1325 kfree(data); in s390_runtime_instr_set()
1326 return -EINVAL; in s390_runtime_instr_set()
1334 if (!target->thread.ri_cb) in s390_runtime_instr_set()
1335 target->thread.ri_cb = data; in s390_runtime_instr_set()
1336 *target->thread.ri_cb = ri_cb; in s390_runtime_instr_set()
1338 load_runtime_instr_cb(target->thread.ri_cb); in s390_runtime_instr_set()
1442 save_access_regs(target->thread.acrs); in s390_compat_regs_get()
1457 save_access_regs(target->thread.acrs); in s390_compat_regs_set()
1463 count -= sizeof(*k); in s390_compat_regs_set()
1474 count -= sizeof(*u); in s390_compat_regs_set()
1480 restore_access_regs(target->thread.acrs); in s390_compat_regs_set()
1492 gprs_high = (compat_ulong_t *)task_pt_regs(target)->gprs; in s390_compat_regs_high_get()
1507 &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)]; in s390_compat_regs_high_set()
1513 count -= sizeof(*k); in s390_compat_regs_high_set()
1524 count -= sizeof(*u); in s390_compat_regs_high_set()
1535 compat_ulong_t last_break = target->thread.last_break; in s390_compat_last_break_get()
1665 return regs->gprs[offset]; in regs_get_register()
1673 return -EINVAL; in regs_query_register_offset()
1675 return -EINVAL; in regs_query_register_offset()
1677 return -EINVAL; in regs_query_register_offset()
1688 static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) in regs_within_kernel_stack() argument
1692 return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1)); in regs_within_kernel_stack()
1696 * regs_get_kernel_stack_nth() - get Nth entry of the stack
1706 unsigned long addr; in regs_get_kernel_stack_nth() local
1708 addr = kernel_stack_pointer(regs) + n * sizeof(long); in regs_get_kernel_stack_nth()
1709 if (!regs_within_kernel_stack(regs, addr)) in regs_get_kernel_stack_nth()
1711 return *(unsigned long *)addr; in regs_get_kernel_stack_nth()