/arch/mips/include/asm/ |
D | asmmacro-32.h | 16 .macro fpu_save_single thread tmp=t0 20 s.d $f0, THREAD_FPR0(\thread) 21 s.d $f2, THREAD_FPR2(\thread) 22 s.d $f4, THREAD_FPR4(\thread) 23 s.d $f6, THREAD_FPR6(\thread) 24 s.d $f8, THREAD_FPR8(\thread) 25 s.d $f10, THREAD_FPR10(\thread) 26 s.d $f12, THREAD_FPR12(\thread) 27 s.d $f14, THREAD_FPR14(\thread) 28 s.d $f16, THREAD_FPR16(\thread) [all …]
|
D | asmmacro-64.h | 17 .macro cpu_save_nonscratch thread 18 LONG_S s0, THREAD_REG16(\thread) 19 LONG_S s1, THREAD_REG17(\thread) 20 LONG_S s2, THREAD_REG18(\thread) 21 LONG_S s3, THREAD_REG19(\thread) 22 LONG_S s4, THREAD_REG20(\thread) 23 LONG_S s5, THREAD_REG21(\thread) 24 LONG_S s6, THREAD_REG22(\thread) 25 LONG_S s7, THREAD_REG23(\thread) 26 LONG_S sp, THREAD_REG29(\thread) [all …]
|
D | dsp.h | 41 tsk->thread.dsp.dspr[0] = mfhi1(); \ 42 tsk->thread.dsp.dspr[1] = mflo1(); \ 43 tsk->thread.dsp.dspr[2] = mfhi2(); \ 44 tsk->thread.dsp.dspr[3] = mflo2(); \ 45 tsk->thread.dsp.dspr[4] = mfhi3(); \ 46 tsk->thread.dsp.dspr[5] = mflo3(); \ 47 tsk->thread.dsp.dspcontrol = rddsp(DSP_MASK); \ 58 mthi1(tsk->thread.dsp.dspr[0]); \ 59 mtlo1(tsk->thread.dsp.dspr[1]); \ 60 mthi2(tsk->thread.dsp.dspr[2]); \ [all …]
|
D | asmmacro.h | 84 .macro fpu_save_16even thread tmp=t0 88 sdc1 $f0, THREAD_FPR0(\thread) 89 sdc1 $f2, THREAD_FPR2(\thread) 90 sdc1 $f4, THREAD_FPR4(\thread) 91 sdc1 $f6, THREAD_FPR6(\thread) 92 sdc1 $f8, THREAD_FPR8(\thread) 93 sdc1 $f10, THREAD_FPR10(\thread) 94 sdc1 $f12, THREAD_FPR12(\thread) 95 sdc1 $f14, THREAD_FPR14(\thread) 96 sdc1 $f16, THREAD_FPR16(\thread) [all …]
|
/arch/riscv/kernel/ |
D | asm-offsets.c | 18 OFFSET(TASK_THREAD_RA, task_struct, thread.ra); in asm_offsets() 19 OFFSET(TASK_THREAD_SP, task_struct, thread.sp); in asm_offsets() 20 OFFSET(TASK_THREAD_S0, task_struct, thread.s[0]); in asm_offsets() 21 OFFSET(TASK_THREAD_S1, task_struct, thread.s[1]); in asm_offsets() 22 OFFSET(TASK_THREAD_S2, task_struct, thread.s[2]); in asm_offsets() 23 OFFSET(TASK_THREAD_S3, task_struct, thread.s[3]); in asm_offsets() 24 OFFSET(TASK_THREAD_S4, task_struct, thread.s[4]); in asm_offsets() 25 OFFSET(TASK_THREAD_S5, task_struct, thread.s[5]); in asm_offsets() 26 OFFSET(TASK_THREAD_S6, task_struct, thread.s[6]); in asm_offsets() 27 OFFSET(TASK_THREAD_S7, task_struct, thread.s[7]); in asm_offsets() [all …]
|
/arch/powerpc/kernel/ptrace/ |
D | ptrace-adv.c | 10 struct pt_regs *regs = task->thread.regs; in user_enable_single_step() 13 task->thread.debug.dbcr0 &= ~DBCR0_BT; in user_enable_single_step() 14 task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC; in user_enable_single_step() 22 struct pt_regs *regs = task->thread.regs; in user_enable_block_step() 25 task->thread.debug.dbcr0 &= ~DBCR0_IC; in user_enable_block_step() 26 task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT; in user_enable_block_step() 34 struct pt_regs *regs = task->thread.regs; in user_disable_single_step() 43 task->thread.debug.dbcr0 &= ~(DBCR0_IC | DBCR0_BT); in user_disable_single_step() 47 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0, in user_disable_single_step() 48 task->thread.debug.dbcr1)) { in user_disable_single_step() [all …]
|
D | ptrace-noadv.c | 12 struct pt_regs *regs = task->thread.regs; in user_enable_single_step() 21 struct pt_regs *regs = task->thread.regs; in user_enable_block_step() 30 struct pt_regs *regs = task->thread.regs; in user_disable_single_step() 68 dabr_fake = ((child->thread.hw_brk[0].address & (~HW_BRK_TYPE_DABR)) | in ptrace_get_debugreg() 69 (child->thread.hw_brk[0].type & HW_BRK_TYPE_DABR)); in ptrace_get_debugreg() 82 struct thread_struct *thread = &task->thread; in ptrace_set_debugreg() local 121 bp = thread->ptrace_bps[0]; in ptrace_set_debugreg() 125 thread->ptrace_bps[0] = NULL; in ptrace_set_debugreg() 142 thread->ptrace_bps[0] = bp; in ptrace_set_debugreg() 143 thread->hw_brk[0] = hw_brk; in ptrace_set_debugreg() [all …]
|
D | ptrace-tm.c | 28 tm_save_sprs(&tsk->thread); in flush_tmregs_to_thread() 34 return task->thread.ckpt_regs.msr | task->thread.fpexc_mode; in get_user_ckpt_msr() 39 task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE; in set_user_ckpt_msr() 40 task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE; in set_user_ckpt_msr() 46 set_trap(&task->thread.ckpt_regs, trap); in set_user_ckpt_trap() 63 if (!MSR_TM_ACTIVE(target->thread.regs->msr)) in tm_cgpr_active() 97 if (!MSR_TM_ACTIVE(target->thread.regs->msr)) in tm_cgpr_get() 104 membuf_write(&to, &target->thread.ckpt_regs, sizeof(struct user_pt_regs)); in tm_cgpr_get() 144 if (!MSR_TM_ACTIVE(target->thread.regs->msr)) in tm_cgpr_set() 152 &target->thread.ckpt_regs, in tm_cgpr_set() [all …]
|
D | ptrace-spe.c | 22 return target->thread.used_spe ? regset->n : 0; in evr_active() 30 membuf_write(&to, &target->thread.evr, sizeof(target->thread.evr)); in evr_get() 35 return membuf_write(&to, &target->thread.acc, in evr_get() 48 &target->thread.evr, in evr_set() 49 0, sizeof(target->thread.evr)); in evr_set() 56 &target->thread.acc, in evr_set() 57 sizeof(target->thread.evr), -1); in evr_set()
|
/arch/parisc/kernel/ |
D | asm-offsets.c | 52 DEFINE(TASK_REGS, offsetof(struct task_struct, thread.regs)); in main() 53 DEFINE(TASK_PT_PSW, offsetof(struct task_struct, thread.regs.gr[ 0])); in main() 54 DEFINE(TASK_PT_GR1, offsetof(struct task_struct, thread.regs.gr[ 1])); in main() 55 DEFINE(TASK_PT_GR2, offsetof(struct task_struct, thread.regs.gr[ 2])); in main() 56 DEFINE(TASK_PT_GR3, offsetof(struct task_struct, thread.regs.gr[ 3])); in main() 57 DEFINE(TASK_PT_GR4, offsetof(struct task_struct, thread.regs.gr[ 4])); in main() 58 DEFINE(TASK_PT_GR5, offsetof(struct task_struct, thread.regs.gr[ 5])); in main() 59 DEFINE(TASK_PT_GR6, offsetof(struct task_struct, thread.regs.gr[ 6])); in main() 60 DEFINE(TASK_PT_GR7, offsetof(struct task_struct, thread.regs.gr[ 7])); in main() 61 DEFINE(TASK_PT_GR8, offsetof(struct task_struct, thread.regs.gr[ 8])); in main() [all …]
|
/arch/powerpc/kernel/ |
D | process.c | 96 if (tsk == current && tsk->thread.regs && in check_if_tm_restore_required() 97 MSR_TM_ACTIVE(tsk->thread.regs->msr) && in check_if_tm_restore_required() 99 regs_set_return_msr(&tsk->thread.ckpt_regs, in check_if_tm_restore_required() 100 tsk->thread.regs->msr); in check_if_tm_restore_required() 161 msr = tsk->thread.regs->msr; in __giveup_fpu() 165 regs_set_return_msr(tsk->thread.regs, msr); in __giveup_fpu() 184 if (tsk->thread.regs) { in flush_fp_to_thread() 194 if (tsk->thread.regs->msr & MSR_FP) { in flush_fp_to_thread() 218 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) { in enable_kernel_fp() 228 MSR_TM_ACTIVE(current->thread.regs->msr)) in enable_kernel_fp() [all …]
|
D | signal_64.c | 86 if (tsk->thread.used_vr) in prepare_setup_sigcontext() 89 tsk->thread.vrsave = mfspr(SPRN_VRSAVE); in prepare_setup_sigcontext() 95 if (tsk->thread.used_vsr) in prepare_setup_sigcontext() 124 struct pt_regs *regs = tsk->thread.regs; in __unsafe_setup_sigcontext() 135 if (tsk->thread.used_vr) { in __unsafe_setup_sigcontext() 137 unsafe_copy_to_user(v_regs, &tsk->thread.vr_state, in __unsafe_setup_sigcontext() 147 unsafe_put_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33], efault_out); in __unsafe_setup_sigcontext() 165 if (tsk->thread.used_vsr && ctx_has_vsx_region) { in __unsafe_setup_sigcontext() 219 struct pt_regs *regs = tsk->thread.regs; in setup_tm_sigcontexts() 232 msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX); in setup_tm_sigcontexts() [all …]
|
D | signal.c | 37 buf[i] = task->thread.TS_FPR(i); in copy_fpr_to_user() 38 buf[i] = task->thread.fp_state.fpscr; in copy_fpr_to_user() 51 task->thread.TS_FPR(i) = buf[i]; in copy_fpr_from_user() 52 task->thread.fp_state.fpscr = buf[i]; in copy_fpr_from_user() 65 buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET]; in copy_vsx_to_user() 78 task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i]; in copy_vsx_from_user() 91 buf[i] = task->thread.TS_CKFPR(i); in copy_ckfpr_to_user() 92 buf[i] = task->thread.ckfp_state.fpscr; in copy_ckfpr_to_user() 105 task->thread.TS_CKFPR(i) = buf[i]; in copy_ckfpr_from_user() 106 task->thread.ckfp_state.fpscr = buf[i]; in copy_ckfpr_from_user() [all …]
|
/arch/s390/kernel/ |
D | guarded_storage.c | 17 kfree(tsk->thread.gs_cb); in guarded_storage_release() 18 kfree(tsk->thread.gs_bc_cb); in guarded_storage_release() 25 if (!current->thread.gs_cb) { in gs_enable() 33 current->thread.gs_cb = gs_cb; in gs_enable() 41 if (current->thread.gs_cb) { in gs_disable() 43 kfree(current->thread.gs_cb); in gs_disable() 44 current->thread.gs_cb = NULL; in gs_disable() 55 gs_cb = current->thread.gs_bc_cb; in gs_set_bc_cb() 60 current->thread.gs_bc_cb = gs_cb; in gs_set_bc_cb() 71 gs_cb = current->thread.gs_bc_cb; in gs_clear_bc_cb() [all …]
|
D | process.c | 93 dst->thread.fpu.regs = dst->thread.fpu.fprs; in arch_dup_task_struct() 102 dst->thread.ri_cb = NULL; in arch_dup_task_struct() 103 dst->thread.gs_cb = NULL; in arch_dup_task_struct() 104 dst->thread.gs_bc_cb = NULL; in arch_dup_task_struct() 119 p->thread.ksp = (unsigned long) frame; in copy_thread() 121 save_access_regs(&p->thread.acrs[0]); in copy_thread() 124 memset(&p->thread.per_user, 0, sizeof(p->thread.per_user)); in copy_thread() 125 memset(&p->thread.per_event, 0, sizeof(p->thread.per_event)); in copy_thread() 127 p->thread.per_flags = 0; in copy_thread() 129 p->thread.user_timer = 0; in copy_thread() [all …]
|
D | ptrace.c | 44 struct thread_struct *thread = &task->thread; in update_cr_regs() local 58 if (task->thread.per_flags & PER_FLAG_NO_TE) in update_cr_regs() 62 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) { in update_cr_regs() 63 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND) in update_cr_regs() 72 if (task->thread.gs_cb) in update_cr_regs() 83 new.control = thread->per_user.control; in update_cr_regs() 84 new.start = thread->per_user.start; in update_cr_regs() 85 new.end = thread->per_user.end; in update_cr_regs() 138 memset(&task->thread.per_user, 0, sizeof(task->thread.per_user)); in ptrace_disable() 139 memset(&task->thread.per_event, 0, sizeof(task->thread.per_event)); in ptrace_disable() [all …]
|
/arch/um/kernel/ |
D | process.c | 87 to->thread.prev_sched = from; in __switch_to() 90 switch_threads(&from->thread.switch_buf, &to->thread.switch_buf); in __switch_to() 93 return current->thread.prev_sched; in __switch_to() 98 struct pt_regs *regs = ¤t->thread.regs; in interrupt_end() 123 if (current->thread.prev_sched != NULL) in new_thread_handler() 124 schedule_tail(current->thread.prev_sched); in new_thread_handler() 125 current->thread.prev_sched = NULL; in new_thread_handler() 127 fn = current->thread.request.u.thread.proc; in new_thread_handler() 128 arg = current->thread.request.u.thread.arg; in new_thread_handler() 134 userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs); in new_thread_handler() [all …]
|
/arch/sh/kernel/cpu/sh4/ |
D | fpu.c | 84 :"0"((char *)(&tsk->thread.xstate->hardfpu.status)), in save_fpu() 134 :"0" (tsk->thread.xstate), "r" (FPSCR_RCHG) in restore_fpu() 230 if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR)) in ieee_fpe_handler() 232 denormal_to_double(&tsk->thread.xstate->hardfpu, in ieee_fpe_handler() 248 hx = tsk->thread.xstate->hardfpu.fp_regs[n]; in ieee_fpe_handler() 249 hy = tsk->thread.xstate->hardfpu.fp_regs[m]; in ieee_fpe_handler() 250 fpscr = tsk->thread.xstate->hardfpu.fpscr; in ieee_fpe_handler() 260 | tsk->thread.xstate->hardfpu.fp_regs[n + 1]; in ieee_fpe_handler() 262 | tsk->thread.xstate->hardfpu.fp_regs[m + 1]; in ieee_fpe_handler() 264 tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32; in ieee_fpe_handler() [all …]
|
/arch/sparc/kernel/ |
D | sigutil_32.c | 21 fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, in save_fpu_state() 22 ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); in save_fpu_state() 29 fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, in save_fpu_state() 30 ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); in save_fpu_state() 36 ¤t->thread.float_regs[0], in save_fpu_state() 38 err |= __put_user(current->thread.fsr, &fpu->si_fsr); in save_fpu_state() 39 err |= __put_user(current->thread.fpqdepth, &fpu->si_fpqdepth); in save_fpu_state() 40 if (current->thread.fpqdepth != 0) in save_fpu_state() 42 ¤t->thread.fpqueue[0], in save_fpu_state() 71 err = __copy_from_user(¤t->thread.float_regs[0], &fpu->si_float_regs[0], in restore_fpu_state() [all …]
|
/arch/mips/kernel/ |
D | asm-offsets.c | 111 OFFSET(THREAD_REG16, task_struct, thread.reg16); in output_thread_defines() 112 OFFSET(THREAD_REG17, task_struct, thread.reg17); in output_thread_defines() 113 OFFSET(THREAD_REG18, task_struct, thread.reg18); in output_thread_defines() 114 OFFSET(THREAD_REG19, task_struct, thread.reg19); in output_thread_defines() 115 OFFSET(THREAD_REG20, task_struct, thread.reg20); in output_thread_defines() 116 OFFSET(THREAD_REG21, task_struct, thread.reg21); in output_thread_defines() 117 OFFSET(THREAD_REG22, task_struct, thread.reg22); in output_thread_defines() 118 OFFSET(THREAD_REG23, task_struct, thread.reg23); in output_thread_defines() 119 OFFSET(THREAD_REG29, task_struct, thread.reg29); in output_thread_defines() 120 OFFSET(THREAD_REG30, task_struct, thread.reg30); in output_thread_defines() [all …]
|
/arch/m68k/kernel/ |
D | process.c | 95 current->thread.fc = USER_DATA; in flush_thread() 151 p->thread.ksp = (unsigned long)frame; in copy_thread() 152 p->thread.esp0 = (unsigned long)&frame->regs; in copy_thread() 158 p->thread.fc = USER_DATA; in copy_thread() 167 p->thread.usp = 0; in copy_thread() 174 p->thread.usp = usp ?: rdusp(); in copy_thread() 182 asm volatile ("fsave %0" : : "m" (p->thread.fpstate[0]) : "memory"); in copy_thread() 184 if (!CPU_IS_060 ? p->thread.fpstate[0] : p->thread.fpstate[2]) { in copy_thread() 191 : "m" (p->thread.fp[0]), in copy_thread() 192 "m" (p->thread.fpcntl[0]), in copy_thread() [all …]
|
/arch/m68k/mm/ |
D | fault.c | 27 signo = current->thread.signo; in send_fault_sig() 28 si_code = current->thread.code; in send_fault_sig() 29 addr = (void __user *)current->thread.faddr; in send_fault_sig() 185 current->thread.signo = SIGBUS; in do_page_fault() 186 current->thread.faddr = address; in do_page_fault() 190 current->thread.signo = SIGBUS; in do_page_fault() 191 current->thread.code = BUS_ADRERR; in do_page_fault() 192 current->thread.faddr = address; in do_page_fault() 196 current->thread.signo = SIGSEGV; in do_page_fault() 197 current->thread.code = SEGV_MAPERR; in do_page_fault() [all …]
|
/arch/sh/kernel/ |
D | process.c | 29 if (src->thread.xstate) { in arch_dup_task_struct() 30 dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep, in arch_dup_task_struct() 32 if (!dst->thread.xstate) in arch_dup_task_struct() 34 memcpy(dst->thread.xstate, src->thread.xstate, xstate_size); in arch_dup_task_struct() 42 if (tsk->thread.xstate) { in free_thread_xstate() 43 kmem_cache_free(task_xstate_cachep, tsk->thread.xstate); in free_thread_xstate() 44 tsk->thread.xstate = NULL; in free_thread_xstate()
|
/arch/s390/include/asm/ |
D | switch_to.h | 39 save_access_regs(&prev->thread.acrs[0]); \ 40 save_ri_cb(prev->thread.ri_cb); \ 41 save_gs_cb(prev->thread.gs_cb); \ 43 restore_access_regs(&next->thread.acrs[0]); \ 44 restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \ 45 restore_gs_cb(next->thread.gs_cb); \
|
/arch/arm/vfp/ |
D | vfpmodule.c | 62 static bool vfp_state_in_hw(unsigned int cpu, struct thread_info *thread) in vfp_state_in_hw() argument 65 if (thread->vfpstate.hard.cpu != cpu) in vfp_state_in_hw() 68 return vfp_current_hw_state[cpu] == &thread->vfpstate; in vfp_state_in_hw() 76 static void vfp_force_reload(unsigned int cpu, struct thread_info *thread) in vfp_force_reload() argument 78 if (vfp_state_in_hw(cpu, thread)) { in vfp_force_reload() 83 thread->vfpstate.hard.cpu = NR_CPUS; in vfp_force_reload() 90 static void vfp_thread_flush(struct thread_info *thread) in vfp_thread_flush() argument 92 union vfp_state *vfp = &thread->vfpstate; in vfp_thread_flush() 118 static void vfp_thread_exit(struct thread_info *thread) in vfp_thread_exit() argument 121 union vfp_state *vfp = &thread->vfpstate; in vfp_thread_exit() [all …]
|