Lines Matching +full:data +full:- +full:addr
1 // SPDX-License-Identifier: GPL-2.0
43 struct thread_struct *thread = &task->thread; in update_cr_regs()
57 if (task->thread.per_flags & PER_FLAG_NO_TE) in update_cr_regs()
61 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) { in update_cr_regs()
62 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND) in update_cr_regs()
71 if (task->thread.gs_cb) in update_cr_regs()
82 new.control = thread->per_user.control; in update_cr_regs()
83 new.start = thread->per_user.start; in update_cr_regs()
84 new.end = thread->per_user.end; in update_cr_regs()
98 new.end = -1UL; in update_cr_regs()
103 regs->psw.mask &= ~PSW_MASK_PER; in update_cr_regs()
106 regs->psw.mask |= PSW_MASK_PER; in update_cr_regs()
137 memset(&task->thread.per_user, 0, sizeof(task->thread.per_user)); in ptrace_disable()
138 memset(&task->thread.per_event, 0, sizeof(task->thread.per_event)); in ptrace_disable()
141 task->thread.per_flags = 0; in ptrace_disable()
147 addr_t addr) in __peek_user_per() argument
149 if (addr == offsetof(struct per_struct_kernel, cr9)) in __peek_user_per()
152 PER_EVENT_IFETCH : child->thread.per_user.control; in __peek_user_per()
153 else if (addr == offsetof(struct per_struct_kernel, cr10)) in __peek_user_per()
156 0 : child->thread.per_user.start; in __peek_user_per()
157 else if (addr == offsetof(struct per_struct_kernel, cr11)) in __peek_user_per()
160 -1UL : child->thread.per_user.end; in __peek_user_per()
161 else if (addr == offsetof(struct per_struct_kernel, bits)) in __peek_user_per()
162 /* Single-step bit. */ in __peek_user_per()
164 (1UL << (BITS_PER_LONG - 1)) : 0; in __peek_user_per()
165 else if (addr == offsetof(struct per_struct_kernel, starting_addr)) in __peek_user_per()
167 return child->thread.per_user.start; in __peek_user_per()
168 else if (addr == offsetof(struct per_struct_kernel, ending_addr)) in __peek_user_per()
170 return child->thread.per_user.end; in __peek_user_per()
171 else if (addr == offsetof(struct per_struct_kernel, perc_atmid)) in __peek_user_per()
174 child->thread.per_event.cause << (BITS_PER_LONG - 16); in __peek_user_per()
175 else if (addr == offsetof(struct per_struct_kernel, address)) in __peek_user_per()
177 return child->thread.per_event.address; in __peek_user_per()
178 else if (addr == offsetof(struct per_struct_kernel, access_id)) in __peek_user_per()
181 child->thread.per_event.paid << (BITS_PER_LONG - 8); in __peek_user_per()
186 * Read the word at offset addr from the user area of a process. The
194 static unsigned long __peek_user(struct task_struct *child, addr_t addr) in __peek_user() argument
198 if (addr < offsetof(struct user, regs.acrs)) { in __peek_user()
202 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr); in __peek_user()
203 if (addr == offsetof(struct user, regs.psw.mask)) { in __peek_user()
209 } else if (addr < offsetof(struct user, regs.orig_gpr2)) { in __peek_user()
213 offset = addr - offsetof(struct user, regs.acrs); in __peek_user()
219 if (addr == offsetof(struct user, regs.acrs[15])) in __peek_user()
220 tmp = ((unsigned long) child->thread.acrs[15]) << 32; in __peek_user()
222 tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset); in __peek_user()
224 } else if (addr == offsetof(struct user, regs.orig_gpr2)) { in __peek_user()
228 tmp = (addr_t) task_pt_regs(child)->orig_gpr2; in __peek_user()
230 } else if (addr < offsetof(struct user, regs.fp_regs)) { in __peek_user()
237 } else if (addr == offsetof(struct user, regs.fp_regs.fpc)) { in __peek_user()
241 tmp = child->thread.fpu.fpc; in __peek_user()
242 tmp <<= BITS_PER_LONG - 32; in __peek_user()
244 } else if (addr < offsetof(struct user, regs.fp_regs) + sizeof(s390_fp_regs)) { in __peek_user()
246 * floating point regs. are either in child->thread.fpu in __peek_user()
247 * or the child->thread.fpu.vxrs array in __peek_user()
249 offset = addr - offsetof(struct user, regs.fp_regs.fprs); in __peek_user()
252 ((addr_t) child->thread.fpu.vxrs + 2*offset); in __peek_user()
255 ((addr_t) child->thread.fpu.fprs + offset); in __peek_user()
257 } else if (addr < offsetof(struct user, regs.per_info) + sizeof(per_struct)) { in __peek_user()
261 addr -= offsetof(struct user, regs.per_info); in __peek_user()
262 tmp = __peek_user_per(child, addr); in __peek_user()
271 peek_user(struct task_struct *child, addr_t addr, addr_t data) in peek_user() argument
280 if (addr >= offsetof(struct user, regs.acrs) && in peek_user()
281 addr < offsetof(struct user, regs.orig_gpr2)) in peek_user()
283 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) in peek_user()
284 return -EIO; in peek_user()
286 tmp = __peek_user(child, addr); in peek_user()
287 return put_user(tmp, (addr_t __user *) data); in peek_user()
291 addr_t addr, addr_t data) in __poke_user_per() argument
305 if (addr == offsetof(struct per_struct_kernel, cr9)) in __poke_user_per()
307 child->thread.per_user.control = in __poke_user_per()
308 data & (PER_EVENT_MASK | PER_CONTROL_MASK); in __poke_user_per()
309 else if (addr == offsetof(struct per_struct_kernel, starting_addr)) in __poke_user_per()
311 child->thread.per_user.start = data; in __poke_user_per()
312 else if (addr == offsetof(struct per_struct_kernel, ending_addr)) in __poke_user_per()
314 child->thread.per_user.end = data; in __poke_user_per()
318 * Write a word to the user area of a process at location addr. This
323 static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) in __poke_user() argument
328 if (addr < offsetof(struct user, regs.acrs)) { in __poke_user()
333 if (addr == offsetof(struct user, regs.psw.mask)) { in __poke_user()
337 if ((data ^ PSW_USER_BITS) & ~mask) in __poke_user()
339 return -EINVAL; in __poke_user()
340 if ((data & PSW_MASK_ASC) == PSW_ASC_HOME) in __poke_user()
341 /* Invalid address-space-control bits */ in __poke_user()
342 return -EINVAL; in __poke_user()
343 if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA)) in __poke_user()
345 return -EINVAL; in __poke_user()
349 addr == offsetof(struct user, regs.gprs[2])) { in __poke_user()
352 regs->int_code = 0x20000 | (data & 0xffff); in __poke_user()
354 *(addr_t *)((addr_t) ®s->psw + addr) = data; in __poke_user()
355 } else if (addr < offsetof(struct user, regs.orig_gpr2)) { in __poke_user()
359 offset = addr - offsetof(struct user, regs.acrs); in __poke_user()
366 if (addr == offsetof(struct user, regs.acrs[15])) in __poke_user()
367 child->thread.acrs[15] = (unsigned int) (data >> 32); in __poke_user()
369 *(addr_t *)((addr_t) &child->thread.acrs + offset) = data; in __poke_user()
371 } else if (addr == offsetof(struct user, regs.orig_gpr2)) { in __poke_user()
375 task_pt_regs(child)->orig_gpr2 = data; in __poke_user()
377 } else if (addr < offsetof(struct user, regs.fp_regs)) { in __poke_user()
384 } else if (addr == offsetof(struct user, regs.fp_regs.fpc)) { in __poke_user()
389 if ((unsigned int) data != 0 || in __poke_user()
390 test_fp_ctl(data >> (BITS_PER_LONG - 32))) in __poke_user()
391 return -EINVAL; in __poke_user()
392 child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32); in __poke_user()
394 } else if (addr < offsetof(struct user, regs.fp_regs) + sizeof(s390_fp_regs)) { in __poke_user()
396 * floating point regs. are either in child->thread.fpu in __poke_user()
397 * or the child->thread.fpu.vxrs array in __poke_user()
399 offset = addr - offsetof(struct user, regs.fp_regs.fprs); in __poke_user()
402 child->thread.fpu.vxrs + 2*offset) = data; in __poke_user()
405 child->thread.fpu.fprs + offset) = data; in __poke_user()
407 } else if (addr < offsetof(struct user, regs.per_info) + sizeof(per_struct)) { in __poke_user()
411 addr -= offsetof(struct user, regs.per_info); in __poke_user()
412 __poke_user_per(child, addr, data); in __poke_user()
419 static int poke_user(struct task_struct *child, addr_t addr, addr_t data) in poke_user() argument
428 if (addr >= offsetof(struct user, regs.acrs) && in poke_user()
429 addr < offsetof(struct user, regs.orig_gpr2)) in poke_user()
431 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) in poke_user()
432 return -EIO; in poke_user()
434 return __poke_user(child, addr, data); in poke_user()
438 unsigned long addr, unsigned long data) in arch_ptrace() argument
445 /* read the word at location addr in the USER area. */ in arch_ptrace()
446 return peek_user(child, addr, data); in arch_ptrace()
449 /* write the word at location addr in the USER area */ in arch_ptrace()
450 return poke_user(child, addr, data); in arch_ptrace()
454 if (copy_from_user(&parea, (void __force __user *) addr, in arch_ptrace()
456 return -EFAULT; in arch_ptrace()
457 addr = parea.kernel_addr; in arch_ptrace()
458 data = parea.process_addr; in arch_ptrace()
462 ret = peek_user(child, addr, data); in arch_ptrace()
466 (addr_t __force __user *) data)) in arch_ptrace()
467 return -EFAULT; in arch_ptrace()
468 ret = poke_user(child, addr, utmp); in arch_ptrace()
472 addr += sizeof(unsigned long); in arch_ptrace()
473 data += sizeof(unsigned long); in arch_ptrace()
478 return put_user(child->thread.last_break, (unsigned long __user *)data); in arch_ptrace()
481 return -EIO; in arch_ptrace()
482 child->thread.per_flags &= ~PER_FLAG_NO_TE; in arch_ptrace()
486 return -EIO; in arch_ptrace()
487 child->thread.per_flags |= PER_FLAG_NO_TE; in arch_ptrace()
488 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND; in arch_ptrace()
491 if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE)) in arch_ptrace()
492 return -EIO; in arch_ptrace()
493 switch (data) { in arch_ptrace()
495 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND; in arch_ptrace()
498 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND; in arch_ptrace()
499 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND; in arch_ptrace()
502 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND; in arch_ptrace()
503 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND; in arch_ptrace()
506 return -EINVAL; in arch_ptrace()
510 return ptrace_request(child, request, addr, data); in arch_ptrace()
526 * a 64 bit program is a no-no.
533 addr_t addr) in __peek_user_per_compat() argument
535 if (addr == offsetof(struct compat_per_struct_kernel, cr9)) in __peek_user_per_compat()
538 PER_EVENT_IFETCH : child->thread.per_user.control; in __peek_user_per_compat()
539 else if (addr == offsetof(struct compat_per_struct_kernel, cr10)) in __peek_user_per_compat()
542 0 : child->thread.per_user.start; in __peek_user_per_compat()
543 else if (addr == offsetof(struct compat_per_struct_kernel, cr11)) in __peek_user_per_compat()
546 PSW32_ADDR_INSN : child->thread.per_user.end; in __peek_user_per_compat()
547 else if (addr == offsetof(struct compat_per_struct_kernel, bits)) in __peek_user_per_compat()
548 /* Single-step bit. */ in __peek_user_per_compat()
551 else if (addr == offsetof(struct compat_per_struct_kernel, starting_addr)) in __peek_user_per_compat()
553 return (__u32) child->thread.per_user.start; in __peek_user_per_compat()
554 else if (addr == offsetof(struct compat_per_struct_kernel, ending_addr)) in __peek_user_per_compat()
556 return (__u32) child->thread.per_user.end; in __peek_user_per_compat()
557 else if (addr == offsetof(struct compat_per_struct_kernel, perc_atmid)) in __peek_user_per_compat()
559 return (__u32) child->thread.per_event.cause << 16; in __peek_user_per_compat()
560 else if (addr == offsetof(struct compat_per_struct_kernel, address)) in __peek_user_per_compat()
562 return (__u32) child->thread.per_event.address; in __peek_user_per_compat()
563 else if (addr == offsetof(struct compat_per_struct_kernel, access_id)) in __peek_user_per_compat()
565 return (__u32) child->thread.per_event.paid << 24; in __peek_user_per_compat()
572 static u32 __peek_user_compat(struct task_struct *child, addr_t addr) in __peek_user_compat() argument
577 if (addr < offsetof(struct compat_user, regs.acrs)) { in __peek_user_compat()
582 if (addr == offsetof(struct compat_user, regs.psw.mask)) { in __peek_user_compat()
584 tmp = (__u32)(regs->psw.mask >> 32); in __peek_user_compat()
587 } else if (addr == offsetof(struct compat_user, regs.psw.addr)) { in __peek_user_compat()
589 tmp = (__u32) regs->psw.addr | in __peek_user_compat()
590 (__u32)(regs->psw.mask & PSW_MASK_BA); in __peek_user_compat()
592 /* gpr 0-15 */ in __peek_user_compat()
593 tmp = *(__u32 *)((addr_t) ®s->psw + addr*2 + 4); in __peek_user_compat()
595 } else if (addr < offsetof(struct compat_user, regs.orig_gpr2)) { in __peek_user_compat()
599 offset = addr - offsetof(struct compat_user, regs.acrs); in __peek_user_compat()
600 tmp = *(__u32*)((addr_t) &child->thread.acrs + offset); in __peek_user_compat()
602 } else if (addr == offsetof(struct compat_user, regs.orig_gpr2)) { in __peek_user_compat()
606 tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4); in __peek_user_compat()
608 } else if (addr < offsetof(struct compat_user, regs.fp_regs)) { in __peek_user_compat()
615 } else if (addr == offsetof(struct compat_user, regs.fp_regs.fpc)) { in __peek_user_compat()
619 tmp = child->thread.fpu.fpc; in __peek_user_compat()
621 } else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) { in __peek_user_compat()
623 * floating point regs. are either in child->thread.fpu in __peek_user_compat()
624 * or the child->thread.fpu.vxrs array in __peek_user_compat()
626 offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs); in __peek_user_compat()
629 ((addr_t) child->thread.fpu.vxrs + 2*offset); in __peek_user_compat()
632 ((addr_t) child->thread.fpu.fprs + offset); in __peek_user_compat()
634 …} else if (addr < offsetof(struct compat_user, regs.per_info) + sizeof(struct compat_per_struct_ke… in __peek_user_compat()
638 addr -= offsetof(struct compat_user, regs.per_info); in __peek_user_compat()
639 tmp = __peek_user_per_compat(child, addr); in __peek_user_compat()
648 addr_t addr, addr_t data) in peek_user_compat() argument
652 if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3) in peek_user_compat()
653 return -EIO; in peek_user_compat()
655 tmp = __peek_user_compat(child, addr); in peek_user_compat()
656 return put_user(tmp, (__u32 __user *) data); in peek_user_compat()
663 addr_t addr, __u32 data) in __poke_user_per_compat() argument
665 if (addr == offsetof(struct compat_per_struct_kernel, cr9)) in __poke_user_per_compat()
667 child->thread.per_user.control = in __poke_user_per_compat()
668 data & (PER_EVENT_MASK | PER_CONTROL_MASK); in __poke_user_per_compat()
669 else if (addr == offsetof(struct compat_per_struct_kernel, starting_addr)) in __poke_user_per_compat()
671 child->thread.per_user.start = data; in __poke_user_per_compat()
672 else if (addr == offsetof(struct compat_per_struct_kernel, ending_addr)) in __poke_user_per_compat()
674 child->thread.per_user.end = data; in __poke_user_per_compat()
681 addr_t addr, addr_t data) in __poke_user_compat() argument
683 __u32 tmp = (__u32) data; in __poke_user_compat()
686 if (addr < offsetof(struct compat_user, regs.acrs)) { in __poke_user_compat()
691 if (addr == offsetof(struct compat_user, regs.psw.mask)) { in __poke_user_compat()
698 return -EINVAL; in __poke_user_compat()
699 if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME) in __poke_user_compat()
700 /* Invalid address-space-control bits */ in __poke_user_compat()
701 return -EINVAL; in __poke_user_compat()
702 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | in __poke_user_compat()
703 (regs->psw.mask & PSW_MASK_BA) | in __poke_user_compat()
705 } else if (addr == offsetof(struct compat_user, regs.psw.addr)) { in __poke_user_compat()
707 regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN; in __poke_user_compat()
709 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) | in __poke_user_compat()
713 addr == offsetof(struct compat_user, regs.gprs[2])) { in __poke_user_compat()
716 regs->int_code = 0x20000 | (data & 0xffff); in __poke_user_compat()
718 /* gpr 0-15 */ in __poke_user_compat()
719 *(__u32*)((addr_t) ®s->psw + addr*2 + 4) = tmp; in __poke_user_compat()
721 } else if (addr < offsetof(struct compat_user, regs.orig_gpr2)) { in __poke_user_compat()
725 offset = addr - offsetof(struct compat_user, regs.acrs); in __poke_user_compat()
726 *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp; in __poke_user_compat()
728 } else if (addr == offsetof(struct compat_user, regs.orig_gpr2)) { in __poke_user_compat()
732 *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp; in __poke_user_compat()
734 } else if (addr < offsetof(struct compat_user, regs.fp_regs)) { in __poke_user_compat()
741 } else if (addr == offsetof(struct compat_user, regs.fp_regs.fpc)) { in __poke_user_compat()
747 return -EINVAL; in __poke_user_compat()
748 child->thread.fpu.fpc = data; in __poke_user_compat()
750 } else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) { in __poke_user_compat()
752 * floating point regs. are either in child->thread.fpu in __poke_user_compat()
753 * or the child->thread.fpu.vxrs array in __poke_user_compat()
755 offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs); in __poke_user_compat()
758 child->thread.fpu.vxrs + 2*offset) = tmp; in __poke_user_compat()
761 child->thread.fpu.fprs + offset) = tmp; in __poke_user_compat()
763 …} else if (addr < offsetof(struct compat_user, regs.per_info) + sizeof(struct compat_per_struct_ke… in __poke_user_compat()
767 addr -= offsetof(struct compat_user, regs.per_info); in __poke_user_compat()
768 __poke_user_per_compat(child, addr, data); in __poke_user_compat()
775 addr_t addr, addr_t data) in poke_user_compat() argument
777 if (!is_compat_task() || (addr & 3) || in poke_user_compat()
778 addr > sizeof(struct compat_user) - 3) in poke_user_compat()
779 return -EIO; in poke_user_compat()
781 return __poke_user_compat(child, addr, data); in poke_user_compat()
787 unsigned long addr = caddr; in compat_arch_ptrace() local
788 unsigned long data = cdata; in compat_arch_ptrace() local
794 /* read the word at location addr in the USER area. */ in compat_arch_ptrace()
795 return peek_user_compat(child, addr, data); in compat_arch_ptrace()
798 /* write the word at location addr in the USER area */ in compat_arch_ptrace()
799 return poke_user_compat(child, addr, data); in compat_arch_ptrace()
803 if (copy_from_user(&parea, (void __force __user *) addr, in compat_arch_ptrace()
805 return -EFAULT; in compat_arch_ptrace()
806 addr = parea.kernel_addr; in compat_arch_ptrace()
807 data = parea.process_addr; in compat_arch_ptrace()
811 ret = peek_user_compat(child, addr, data); in compat_arch_ptrace()
815 (__u32 __force __user *) data)) in compat_arch_ptrace()
816 return -EFAULT; in compat_arch_ptrace()
817 ret = poke_user_compat(child, addr, utmp); in compat_arch_ptrace()
821 addr += sizeof(unsigned int); in compat_arch_ptrace()
822 data += sizeof(unsigned int); in compat_arch_ptrace()
827 return put_user(child->thread.last_break, (unsigned int __user *)data); in compat_arch_ptrace()
829 return compat_ptrace_request(child, request, addr, data); in compat_arch_ptrace()
843 save_access_regs(target->thread.acrs); in s390_regs_get()
858 save_access_regs(target->thread.acrs); in s390_regs_set()
864 count -= sizeof(*k); in s390_regs_set()
875 count -= sizeof(*u); in s390_regs_set()
881 restore_access_regs(target->thread.acrs); in s390_regs_set()
895 fp_regs.fpc = target->thread.fpu.fpc; in s390_fpregs_get()
896 fpregs_store(&fp_regs, &target->thread.fpu); in s390_fpregs_get()
911 convert_vx_to_fp(fprs, target->thread.fpu.vxrs); in s390_fpregs_set()
913 memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs)); in s390_fpregs_set()
917 u32 ufpc[2] = { target->thread.fpu.fpc, 0 }; in s390_fpregs_set()
923 return -EINVAL; in s390_fpregs_set()
924 target->thread.fpu.fpc = ufpc[0]; in s390_fpregs_set()
929 fprs, offsetof(s390_fp_regs, fprs), -1); in s390_fpregs_set()
934 convert_fp_to_vx(target->thread.fpu.vxrs, fprs); in s390_fpregs_set()
936 memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs)); in s390_fpregs_set()
945 return membuf_store(&to, target->thread.last_break); in s390_last_break_get()
963 if (!(regs->int_code & 0x200)) in s390_tdb_get()
964 return -ENODATA; in s390_tdb_get()
965 size = sizeof(target->thread.trap_tdb.data); in s390_tdb_get()
966 return membuf_write(&to, target->thread.trap_tdb.data, size); in s390_tdb_get()
985 return -ENODEV; in s390_vxrs_low_get()
989 vxrs[i] = target->thread.fpu.vxrs[i].low; in s390_vxrs_low_get()
1002 return -ENODEV; in s390_vxrs_low_set()
1007 vxrs[i] = target->thread.fpu.vxrs[i].low; in s390_vxrs_low_set()
1009 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); in s390_vxrs_low_set()
1012 target->thread.fpu.vxrs[i].low = vxrs[i]; in s390_vxrs_low_set()
1022 return -ENODEV; in s390_vxrs_high_get()
1025 return membuf_write(&to, target->thread.fpu.vxrs + __NUM_VXRS_LOW, in s390_vxrs_high_get()
1037 return -ENODEV; in s390_vxrs_high_set()
1042 target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1); in s390_vxrs_high_set()
1050 return membuf_store(&to, target->thread.system_call); in s390_system_call_get()
1058 unsigned int *data = &target->thread.system_call; in s390_system_call_set() local
1060 data, 0, sizeof(unsigned int)); in s390_system_call_set()
1067 struct gs_cb *data = target->thread.gs_cb; in s390_gs_cb_get() local
1070 return -ENODEV; in s390_gs_cb_get()
1071 if (!data) in s390_gs_cb_get()
1072 return -ENODATA; in s390_gs_cb_get()
1074 save_gs_cb(data); in s390_gs_cb_get()
1075 return membuf_write(&to, data, sizeof(struct gs_cb)); in s390_gs_cb_get()
1083 struct gs_cb gs_cb = { }, *data = NULL; in s390_gs_cb_set() local
1087 return -ENODEV; in s390_gs_cb_set()
1088 if (!target->thread.gs_cb) { in s390_gs_cb_set()
1089 data = kzalloc(sizeof(*data), GFP_KERNEL); in s390_gs_cb_set()
1090 if (!data) in s390_gs_cb_set()
1091 return -ENOMEM; in s390_gs_cb_set()
1093 if (!target->thread.gs_cb) in s390_gs_cb_set()
1098 gs_cb = *target->thread.gs_cb; in s390_gs_cb_set()
1102 kfree(data); in s390_gs_cb_set()
1103 return -EFAULT; in s390_gs_cb_set()
1106 if (!target->thread.gs_cb) in s390_gs_cb_set()
1107 target->thread.gs_cb = data; in s390_gs_cb_set()
1108 *target->thread.gs_cb = gs_cb; in s390_gs_cb_set()
1111 restore_gs_cb(target->thread.gs_cb); in s390_gs_cb_set()
1121 struct gs_cb *data = target->thread.gs_bc_cb; in s390_gs_bc_get() local
1124 return -ENODEV; in s390_gs_bc_get()
1125 if (!data) in s390_gs_bc_get()
1126 return -ENODATA; in s390_gs_bc_get()
1127 return membuf_write(&to, data, sizeof(struct gs_cb)); in s390_gs_bc_get()
1135 struct gs_cb *data = target->thread.gs_bc_cb; in s390_gs_bc_set() local
1138 return -ENODEV; in s390_gs_bc_set()
1139 if (!data) { in s390_gs_bc_set()
1140 data = kzalloc(sizeof(*data), GFP_KERNEL); in s390_gs_bc_set()
1141 if (!data) in s390_gs_bc_set()
1142 return -ENOMEM; in s390_gs_bc_set()
1143 target->thread.gs_bc_cb = data; in s390_gs_bc_set()
1146 data, 0, sizeof(struct gs_cb)); in s390_gs_bc_set()
1151 return (cb->rca & 0x1f) == 0 && in is_ri_cb_valid()
1152 (cb->roa & 0xfff) == 0 && in is_ri_cb_valid()
1153 (cb->rla & 0xfff) == 0xfff && in is_ri_cb_valid()
1154 cb->s == 1 && in is_ri_cb_valid()
1155 cb->k == 1 && in is_ri_cb_valid()
1156 cb->h == 0 && in is_ri_cb_valid()
1157 cb->reserved1 == 0 && in is_ri_cb_valid()
1158 cb->ps == 1 && in is_ri_cb_valid()
1159 cb->qs == 0 && in is_ri_cb_valid()
1160 cb->pc == 1 && in is_ri_cb_valid()
1161 cb->qc == 0 && in is_ri_cb_valid()
1162 cb->reserved2 == 0 && in is_ri_cb_valid()
1163 cb->reserved3 == 0 && in is_ri_cb_valid()
1164 cb->reserved4 == 0 && in is_ri_cb_valid()
1165 cb->reserved5 == 0 && in is_ri_cb_valid()
1166 cb->reserved6 == 0 && in is_ri_cb_valid()
1167 cb->reserved7 == 0 && in is_ri_cb_valid()
1168 cb->reserved8 == 0 && in is_ri_cb_valid()
1169 cb->rla >= cb->roa && in is_ri_cb_valid()
1170 cb->rca >= cb->roa && in is_ri_cb_valid()
1171 cb->rca <= cb->rla+1 && in is_ri_cb_valid()
1172 cb->m < 3; in is_ri_cb_valid()
1179 struct runtime_instr_cb *data = target->thread.ri_cb; in s390_runtime_instr_get() local
1182 return -ENODEV; in s390_runtime_instr_get()
1183 if (!data) in s390_runtime_instr_get()
1184 return -ENODATA; in s390_runtime_instr_get()
1186 return membuf_write(&to, data, sizeof(struct runtime_instr_cb)); in s390_runtime_instr_get()
1194 struct runtime_instr_cb ri_cb = { }, *data = NULL; in s390_runtime_instr_set() local
1198 return -ENODEV; in s390_runtime_instr_set()
1200 if (!target->thread.ri_cb) { in s390_runtime_instr_set()
1201 data = kzalloc(sizeof(*data), GFP_KERNEL); in s390_runtime_instr_set()
1202 if (!data) in s390_runtime_instr_set()
1203 return -ENOMEM; in s390_runtime_instr_set()
1206 if (target->thread.ri_cb) { in s390_runtime_instr_set()
1210 ri_cb = *target->thread.ri_cb; in s390_runtime_instr_set()
1216 kfree(data); in s390_runtime_instr_set()
1217 return -EFAULT; in s390_runtime_instr_set()
1221 kfree(data); in s390_runtime_instr_set()
1222 return -EINVAL; in s390_runtime_instr_set()
1230 if (!target->thread.ri_cb) in s390_runtime_instr_set()
1231 target->thread.ri_cb = data; in s390_runtime_instr_set()
1232 *target->thread.ri_cb = ri_cb; in s390_runtime_instr_set()
1234 load_runtime_instr_cb(target->thread.ri_cb); in s390_runtime_instr_set()
1338 save_access_regs(target->thread.acrs); in s390_compat_regs_get()
1353 save_access_regs(target->thread.acrs); in s390_compat_regs_set()
1359 count -= sizeof(*k); in s390_compat_regs_set()
1370 count -= sizeof(*u); in s390_compat_regs_set()
1376 restore_access_regs(target->thread.acrs); in s390_compat_regs_set()
1388 gprs_high = (compat_ulong_t *)task_pt_regs(target)->gprs; in s390_compat_regs_high_get()
1403 &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)]; in s390_compat_regs_high_set()
1409 count -= sizeof(*k); in s390_compat_regs_high_set()
1420 count -= sizeof(*u); in s390_compat_regs_high_set()
1431 compat_ulong_t last_break = target->thread.last_break; in s390_compat_last_break_get()
1561 return regs->gprs[offset]; in regs_get_register()
1569 return -EINVAL; in regs_query_register_offset()
1571 return -EINVAL; in regs_query_register_offset()
1573 return -EINVAL; in regs_query_register_offset()
1584 static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) in regs_within_kernel_stack() argument
1588 return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1)); in regs_within_kernel_stack()
1592 * regs_get_kernel_stack_nth() - get Nth entry of the stack
1602 unsigned long addr; in regs_get_kernel_stack_nth() local
1604 addr = kernel_stack_pointer(regs) + n * sizeof(long); in regs_get_kernel_stack_nth()
1605 if (!regs_within_kernel_stack(regs, addr)) in regs_get_kernel_stack_nth()
1607 return *(unsigned long *)addr; in regs_get_kernel_stack_nth()