/arch/sparc/kernel/ |
D | sigutil_32.c | 19 if (test_tsk_thread_flag(current, TIF_USEDFPU)) { in save_fpu_state() 21 fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, in save_fpu_state() 22 ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); in save_fpu_state() 24 clear_tsk_thread_flag(current, TIF_USEDFPU); in save_fpu_state() 27 if (current == last_task_used_math) { in save_fpu_state() 29 fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, in save_fpu_state() 30 ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); in save_fpu_state() 36 ¤t->thread.float_regs[0], in save_fpu_state() 38 err |= __put_user(current->thread.fsr, &fpu->si_fsr); in save_fpu_state() 39 err |= __put_user(current->thread.fpqdepth, &fpu->si_fpqdepth); in save_fpu_state() [all …]
|
D | traps_32.c | 62 printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter); in die_if_kernel() 117 send_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)pc, current); in do_illegal_instruction() 125 send_sig_fault(SIGILL, ILL_PRVOPC, (void __user *)pc, current); in do_priv_instruction() 146 current); in do_memaccess_unaligned() 166 if(last_task_used_math == current) in do_fpd_trap() 174 last_task_used_math = current; in do_fpd_trap() 176 fpload(¤t->thread.float_regs[0], ¤t->thread.fsr); in do_fpd_trap() 187 fpload(¤t->thread.float_regs[0], ¤t->thread.fsr); in do_fpd_trap() 208 struct task_struct *fpt = current; 255 fpload(¤t->thread.float_regs[0], ¤t->thread.fsr); [all …]
|
/arch/s390/kernel/ |
D | guarded_storage.c | 25 if (!current->thread.gs_cb) { in gs_enable() 33 current->thread.gs_cb = gs_cb; in gs_enable() 41 if (current->thread.gs_cb) { in gs_disable() 43 kfree(current->thread.gs_cb); in gs_disable() 44 current->thread.gs_cb = NULL; in gs_disable() 55 gs_cb = current->thread.gs_bc_cb; in gs_set_bc_cb() 60 current->thread.gs_bc_cb = gs_cb; in gs_set_bc_cb() 71 gs_cb = current->thread.gs_bc_cb; in gs_clear_bc_cb() 72 current->thread.gs_bc_cb = NULL; in gs_clear_bc_cb() 83 gs_cb = current->thread.gs_bc_cb; in gs_load_bc_cb() [all …]
|
D | signal.c | 111 save_access_regs(current->thread.acrs); in store_sigregs() 118 restore_access_regs(current->thread.acrs); in load_sigregs() 132 memcpy(&user_sregs.regs.acrs, current->thread.acrs, in save_sigregs() 134 fpregs_store(&user_sregs.fpregs, ¤t->thread.fpu); in save_sigregs() 145 current->restart_block.fn = do_no_restart_syscall; in restore_sigregs() 150 if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW_MASK_RI)) in restore_sigregs() 169 memcpy(¤t->thread.acrs, &user_sregs.regs.acrs, in restore_sigregs() 170 sizeof(current->thread.acrs)); in restore_sigregs() 172 fpregs_load(&user_sregs.fpregs, ¤t->thread.fpu); in restore_sigregs() 188 vxrs[i] = *((__u64 *)(current->thread.fpu.vxrs + i) + 1); in save_sigregs_ext() [all …]
|
/arch/h8300/kernel/ |
D | traps.c | 45 current->thread.esp0 = ssp; in set_esp0() 59 pr_info("COMM=%s PID=%d\n", current->comm, current->pid); in dump() 60 if (current->mm) { in dump() 62 (int) current->mm->start_code, in dump() 63 (int) current->mm->end_code, in dump() 64 (int) current->mm->start_data, in dump() 65 (int) current->mm->end_data, in dump() 66 (int) current->mm->end_data, in dump() 67 (int) current->mm->brk); in dump() 69 (int) current->mm->start_stack, in dump() [all …]
|
/arch/m68k/mm/ |
D | fault.c | 27 signo = current->thread.signo; in send_fault_sig() 28 si_code = current->thread.code; in send_fault_sig() 29 addr = (void __user *)current->thread.faddr; in send_fault_sig() 71 struct mm_struct *mm = current->mm; in do_page_fault() 185 current->thread.signo = SIGBUS; in do_page_fault() 186 current->thread.faddr = address; in do_page_fault() 190 current->thread.signo = SIGBUS; in do_page_fault() 191 current->thread.code = BUS_ADRERR; in do_page_fault() 192 current->thread.faddr = address; in do_page_fault() 196 current->thread.signo = SIGSEGV; in do_page_fault() [all …]
|
/arch/powerpc/kernel/ |
D | signal_32.c | 244 flush_fp_to_thread(current); in prepare_save_user_regs() 246 if (current->thread.used_vr) in prepare_save_user_regs() 247 flush_altivec_to_thread(current); in prepare_save_user_regs() 249 current->thread.vrsave = mfspr(SPRN_VRSAVE); in prepare_save_user_regs() 252 if (current->thread.used_vsr && ctx_has_vsx_region) in prepare_save_user_regs() 253 flush_vsx_to_thread(current); in prepare_save_user_regs() 256 if (current->thread.used_spe) in prepare_save_user_regs() 257 flush_spe_to_thread(current); in prepare_save_user_regs() 272 if (current->thread.used_vr) { in __unsafe_save_user_regs() 273 unsafe_copy_to_user(&frame->mc_vregs, ¤t->thread.vr_state, in __unsafe_save_user_regs() [all …]
|
D | uprobes.c | 61 struct arch_uprobe_task *autask = ¤t->utask->autask; in arch_uprobe_pre_xol() 63 autask->saved_trap_nr = current->thread.trap_nr; in arch_uprobe_pre_xol() 64 current->thread.trap_nr = UPROBE_TRAP_NR; in arch_uprobe_pre_xol() 65 regs_set_return_ip(regs, current->utask->xol_vaddr); in arch_uprobe_pre_xol() 67 user_enable_single_step(current); in arch_uprobe_pre_xol() 109 struct uprobe_task *utask = current->utask; in arch_uprobe_post_xol() 111 WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR); in arch_uprobe_post_xol() 113 current->thread.trap_nr = utask->autask.saved_trap_nr; in arch_uprobe_post_xol() 124 user_disable_single_step(current); in arch_uprobe_post_xol() 165 struct uprobe_task *utask = current->utask; in arch_uprobe_abort_xol() [all …]
|
D | process.c | 96 if (tsk == current && tsk->thread.regs && in check_if_tm_restore_required() 202 BUG_ON(tsk != current); in flush_fp_to_thread() 218 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) { in enable_kernel_fp() 219 check_if_tm_restore_required(current); in enable_kernel_fp() 228 MSR_TM_ACTIVE(current->thread.regs->msr)) in enable_kernel_fp() 230 __giveup_fpu(current); in enable_kernel_fp() 269 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) { in enable_kernel_altivec() 270 check_if_tm_restore_required(current); in enable_kernel_altivec() 279 MSR_TM_ACTIVE(current->thread.regs->msr)) in enable_kernel_altivec() 281 __giveup_altivec(current); in enable_kernel_altivec() [all …]
|
D | traps.c | 145 if (kexec_should_crash(current)) in die_will_crash() 148 !current->pid || is_global_init(current)) in die_will_crash() 229 if (kexec_should_crash(current)) in oops_end() 241 if (in_interrupt() || panic_on_oops || !current->pid || in oops_end() 242 is_global_init(current)) { in oops_end() 318 if (!unhandled_signal(current, signr)) in show_signal_msg() 325 current->comm, current->pid, signame(signr), signr, in show_signal_msg() 354 current->thread.trap_nr = code; in exception_common() 579 #define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC) 580 #define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC) [all …]
|
/arch/nds32/include/asm/ |
D | fpu.h | 78 if (last_task_used_math == current) { 81 if (test_tsk_fpu(task_pt_regs(current))) { 83 save_fpu(current); 85 disable_ptreg_fpu(task_pt_regs(current)); 93 if (last_task_used_math != current) { 96 load_fpu(¤t->thread.fpu); 97 last_task_used_math = current; 100 if (!test_tsk_fpu(task_pt_regs(current))) { 101 load_fpu(¤t->thread.fpu); 104 enable_ptreg_fpu(task_pt_regs(current));
|
/arch/mips/math-emu/ |
D | dsemul.c | 77 mm_context_t *mm_ctx = ¤t->mm->context; in alloc_emuframe() 119 pr_debug("allocate emuframe %d to %d\n", idx, current->pid); in alloc_emuframe() 131 pr_debug("free emuframe %d from %d\n", idx, current->pid); in free_emuframe() 183 fr_idx = atomic_read(¤t->thread.bd_emu_frame); in dsemul_thread_rollback() 196 regs->cp0_epc = current->thread.bd_emu_branch_pc; in dsemul_thread_rollback() 198 regs->cp0_epc = current->thread.bd_emu_cont_pc; in dsemul_thread_rollback() 200 atomic_set(¤t->thread.bd_emu_frame, BD_EMUFRAME_NONE); in dsemul_thread_rollback() 201 free_emuframe(fr_idx, current->mm); in dsemul_thread_rollback() 249 fr_idx = atomic_read(¤t->thread.bd_emu_frame); in mips_dsemul() 276 ret = access_process_vm(current, fr_uaddr, &fr, sizeof(fr), in mips_dsemul() [all …]
|
/arch/powerpc/include/asm/book3s/32/ |
D | kup.h | 82 unsigned long kuap = current->thread.kuap; in kuap_save_and_lock() 91 current->thread.kuap = KUAP_NONE; in kuap_save_and_lock() 105 current->thread.kuap = KUAP_NONE; in kuap_kernel_restore() 112 current->thread.kuap = regs->kuap; in kuap_kernel_restore() 119 unsigned long kuap = current->thread.kuap; in kuap_get_and_assert_locked() 145 current->thread.kuap = (__force u32)to; in allow_user_access() 151 u32 kuap = current->thread.kuap; in prevent_user_access() 161 current->thread.kuap = KUAP_NONE; in prevent_user_access() 167 unsigned long flags = current->thread.kuap; in prevent_user_access_return() 173 current->thread.kuap = KUAP_NONE; in prevent_user_access_return() [all …]
|
/arch/um/kernel/ |
D | process.c | 91 arch_switch_to(current); in __switch_to() 93 return current->thread.prev_sched; in __switch_to() 98 struct pt_regs *regs = ¤t->thread.regs; in interrupt_end() 111 return task_pid_nr(current); in get_current_pid() 123 if (current->thread.prev_sched != NULL) in new_thread_handler() 124 schedule_tail(current->thread.prev_sched); in new_thread_handler() 125 current->thread.prev_sched = NULL; in new_thread_handler() 127 fn = current->thread.request.u.thread.proc; in new_thread_handler() 128 arg = current->thread.request.u.thread.arg; in new_thread_handler() 134 userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs); in new_thread_handler() [all …]
|
D | trap.c | 27 struct mm_struct *mm = current->mm; in handle_page_fault() 76 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) in handle_page_fault() 134 struct task_struct *tsk = current; in show_segv_info() 155 current->thread.arch.faultinfo = fi; in bad_segv() 162 do_signal(¤t->thread.regs); in fatal_sigsegv() 209 current->thread.segv_regs = container_of(regs, struct pt_regs, regs); in segv() 215 else if (current->mm == NULL) { in segv() 238 catcher = current->thread.fault_catcher; in segv() 242 current->thread.fault_addr = (void *) address; in segv() 245 else if (current->thread.fault_addr != NULL) in segv() [all …]
|
/arch/csky/kernel/probes/ |
D | uprobes.c | 50 struct uprobe_task *utask = current->utask; in arch_uprobe_pre_xol() 52 utask->autask.saved_trap_no = current->thread.trap_no; in arch_uprobe_pre_xol() 53 current->thread.trap_no = UPROBE_TRAP_NR; in arch_uprobe_pre_xol() 57 user_enable_single_step(current); in arch_uprobe_pre_xol() 64 struct uprobe_task *utask = current->utask; in arch_uprobe_post_xol() 66 WARN_ON_ONCE(current->thread.trap_no != UPROBE_TRAP_NR); in arch_uprobe_post_xol() 70 user_disable_single_step(current); in arch_uprobe_post_xol() 102 struct uprobe_task *utask = current->utask; in arch_uprobe_abort_xol() 110 user_disable_single_step(current); in arch_uprobe_abort_xol()
|
/arch/sparc/include/asm/ |
D | mman.h | 22 if (current->mm == mm) { in ipi_set_tstate_mcde() 25 regs = task_pt_regs(current); in ipi_set_tstate_mcde() 36 if (!current->mm->context.adi) { in sparc_calc_vm_prot_bits() 37 regs = task_pt_regs(current); in sparc_calc_vm_prot_bits() 39 current->mm->context.adi = true; in sparc_calc_vm_prot_bits() 40 on_each_cpu_mask(mm_cpumask(current->mm), in sparc_calc_vm_prot_bits() 41 ipi_set_tstate_mcde, current->mm, 0); in sparc_calc_vm_prot_bits()
|
/arch/nds32/kernel/ |
D | fpu.c | 152 unlazy_fpu(current); in store_fpu_for_suspend() 154 clear_fpu(task_pt_regs(current)); in store_fpu_for_suspend() 168 if (last_task_used_math == current) in do_fpu_context_switch() 173 last_task_used_math = current; in do_fpu_context_switch() 176 load_fpu(¤t->thread.fpu); in do_fpu_context_switch() 181 current->thread.fpu.UDF_IEX_trap = init_fpuregs.UDF_IEX_trap; in do_fpu_context_switch() 215 fpcsr = current->thread.fpu.fpcsr; in handle_fpu_exception() 218 si_signo = do_fpuemu(regs, ¤t->thread.fpu); in handle_fpu_exception() 219 fpcsr = current->thread.fpu.fpcsr; in handle_fpu_exception() 221 current->thread.fpu.fpcsr &= ~(redo_except); in handle_fpu_exception()
|
/arch/mips/kernel/ |
D | pm.c | 30 save_dsp(current); in mips_cpu_save() 44 if (current->mm) in mips_cpu_restore() 45 write_c0_entryhi(cpu_asid(cpu, current->mm)); in mips_cpu_restore() 48 restore_dsp(current); in mips_cpu_restore() 55 __restore_watch(current); in mips_cpu_restore()
|
/arch/arm64/kernel/probes/ |
D | uprobes.c | 64 struct uprobe_task *utask = current->utask; in arch_uprobe_pre_xol() 67 current->thread.fault_code = UPROBE_INV_FAULT_CODE; in arch_uprobe_pre_xol() 72 user_enable_single_step(current); in arch_uprobe_pre_xol() 79 struct uprobe_task *utask = current->utask; in arch_uprobe_post_xol() 81 WARN_ON_ONCE(current->thread.fault_code != UPROBE_INV_FAULT_CODE); in arch_uprobe_post_xol() 86 user_disable_single_step(current); in arch_uprobe_post_xol() 122 struct uprobe_task *utask = current->utask; in arch_uprobe_abort_xol() 130 user_disable_single_step(current); in arch_uprobe_abort_xol() 180 struct uprobe_task *utask = current->utask; in uprobe_single_step_handler()
|
/arch/arm64/include/asm/ |
D | stackprotector.h | 38 current->stack_canary = canary; in boot_init_stack_canary() 40 __stack_chk_guard = current->stack_canary; in boot_init_stack_canary() 42 ptrauth_thread_init_kernel(current); in boot_init_stack_canary() 43 ptrauth_thread_switch_kernel(current); in boot_init_stack_canary()
|
/arch/powerpc/math-emu/ |
D | math.c | 332 op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f); in do_mathemu() 333 op1 = (void *)¤t->thread.TS_FPR((insn >> 16) & 0x1f); in do_mathemu() 334 op2 = (void *)¤t->thread.TS_FPR((insn >> 11) & 0x1f); in do_mathemu() 338 op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f); in do_mathemu() 339 op1 = (void *)¤t->thread.TS_FPR((insn >> 16) & 0x1f); in do_mathemu() 340 op2 = (void *)¤t->thread.TS_FPR((insn >> 6) & 0x1f); in do_mathemu() 344 op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f); in do_mathemu() 345 op1 = (void *)¤t->thread.TS_FPR((insn >> 16) & 0x1f); in do_mathemu() 346 op2 = (void *)¤t->thread.TS_FPR((insn >> 11) & 0x1f); in do_mathemu() 347 op3 = (void *)¤t->thread.TS_FPR((insn >> 6) & 0x1f); in do_mathemu() [all …]
|
/arch/mips/power/ |
D | cpu.c | 21 save_fp(current); in save_processor_state() 23 save_dsp(current); in save_processor_state() 31 restore_fp(current); in restore_processor_state() 33 restore_dsp(current); in restore_processor_state()
|
/arch/arm64/kernel/ |
D | fpsimd.c | 295 sve_load_state(sve_pffr(¤t->thread), in task_fpsimd_load() 296 ¤t->thread.uw.fpsimd_state.fpsr, in task_fpsimd_load() 297 sve_vq_from_vl(current->thread.sve_vl) - 1); in task_fpsimd_load() 299 fpsimd_load_state(¤t->thread.uw.fpsimd_state); in task_fpsimd_load() 624 if (task == current) { in sve_set_vector_length() 634 if (task == current) in sve_set_vector_length() 663 ret = current->thread.sve_vl_onexec; in sve_prctl_status() 665 ret = current->thread.sve_vl; in sve_prctl_status() 685 ret = sve_set_vector_length(current, vl, flags); in sve_set_current_vl() 945 sve_alloc(current); in do_sve_acc() [all …]
|
/arch/x86/kernel/ |
D | ioport.c | 25 if (current->thread.io_bitmap) { in io_bitmap_share() 30 refcount_inc(¤t->thread.io_bitmap->refcnt); in io_bitmap_share() 31 tsk->thread.io_bitmap = current->thread.io_bitmap; in io_bitmap_share() 67 struct thread_struct *t = ¤t->thread; in ksys_ioperm() 105 io_bitmap_exit(current); in ksys_ioperm() 137 io_bitmap_exit(current); in ksys_ioperm() 175 struct thread_struct *t = ¤t->thread; in SYSCALL_DEFINE1() 195 task_update_io_bitmap(current); in SYSCALL_DEFINE1()
|