/kernel/linux/linux-5.10/tools/perf/util/ |
D | thread.c | 22 int thread__init_maps(struct thread *thread, struct machine *machine) in thread__init_maps() argument 24 pid_t pid = thread->pid_; in thread__init_maps() 26 if (pid == thread->tid || pid == -1) { in thread__init_maps() 27 thread->maps = maps__new(machine); in thread__init_maps() 29 struct thread *leader = __machine__findnew_thread(machine, pid, pid); in thread__init_maps() 31 thread->maps = maps__get(leader->maps); in thread__init_maps() 36 return thread->maps ? 0 : -1; in thread__init_maps() 39 struct thread *thread__new(pid_t pid, pid_t tid) in thread__new() 43 struct thread *thread = zalloc(sizeof(*thread)); in thread__new() local 45 if (thread != NULL) { in thread__new() [all …]
|
D | thread.h | 32 struct thread { struct 68 struct thread *thread__new(pid_t pid, pid_t tid); argument 69 int thread__init_maps(struct thread *thread, struct machine *machine); 70 void thread__delete(struct thread *thread); 72 struct thread *thread__get(struct thread *thread); 73 void thread__put(struct thread *thread); 75 static inline void __thread__zput(struct thread **thread) in __thread__zput() argument 77 thread__put(*thread); in __thread__zput() 78 *thread = NULL; in __thread__zput() 81 #define thread__zput(thread) __thread__zput(&thread) argument [all …]
|
D | thread-stack.h | 14 struct thread; 55 struct thread *thread; member 83 int thread_stack__event(struct thread *thread, int cpu, u32 flags, u64 from_ip, 86 void thread_stack__set_trace_nr(struct thread *thread, int cpu, u64 trace_nr); 87 void thread_stack__sample(struct thread *thread, int cpu, struct ip_callchain *chain, 89 void thread_stack__sample_late(struct thread *thread, int cpu, 92 void thread_stack__br_sample(struct thread *thread, int cpu, 94 void thread_stack__br_sample_late(struct thread *thread, int cpu, 97 int thread_stack__flush(struct thread *thread); 98 void thread_stack__free(struct thread *thread); [all …]
|
D | thread-stack.c | 113 static inline bool thread_stack__per_cpu(struct thread *thread) in thread_stack__per_cpu() argument 115 return !(thread->tid || thread->pid_); in thread_stack__per_cpu() 136 static int thread_stack__init(struct thread_stack *ts, struct thread *thread, in thread_stack__init() argument 158 if (thread->maps && thread->maps->machine) { in thread_stack__init() 159 struct machine *machine = thread->maps->machine; in thread_stack__init() 173 static struct thread_stack *thread_stack__new(struct thread *thread, int cpu, in thread_stack__new() argument 178 struct thread_stack *ts = thread->ts, *new_ts; in thread_stack__new() 182 if (thread_stack__per_cpu(thread) && cpu > 0) in thread_stack__new() 192 zfree(&thread->ts); in thread_stack__new() 193 thread->ts = new_ts; in thread_stack__new() [all …]
|
D | db-export.c | 62 int db_export__thread(struct db_export *dbe, struct thread *thread, in db_export__thread() argument 63 struct machine *machine, struct thread *main_thread) in db_export__thread() 67 if (thread->db_id) in db_export__thread() 70 thread->db_id = ++dbe->thread_last_db_id; in db_export__thread() 76 return dbe->export_thread(dbe, thread, main_thread_db_id, in db_export__thread() 83 struct thread *thread) in __db_export__comm() argument 88 return dbe->export_comm(dbe, comm, thread); in __db_export__comm() 94 struct thread *thread) in db_export__comm() argument 99 return __db_export__comm(dbe, comm, thread); in db_export__comm() 109 struct thread *main_thread) in db_export__exec_comm() [all …]
|
/kernel/linux/linux-5.10/arch/mips/include/asm/ |
D | asmmacro-32.h | 16 .macro fpu_save_single thread tmp=t0 20 s.d $f0, THREAD_FPR0(\thread) 21 s.d $f2, THREAD_FPR2(\thread) 22 s.d $f4, THREAD_FPR4(\thread) 23 s.d $f6, THREAD_FPR6(\thread) 24 s.d $f8, THREAD_FPR8(\thread) 25 s.d $f10, THREAD_FPR10(\thread) 26 s.d $f12, THREAD_FPR12(\thread) 27 s.d $f14, THREAD_FPR14(\thread) 28 s.d $f16, THREAD_FPR16(\thread) [all …]
|
D | asmmacro-64.h | 17 .macro cpu_save_nonscratch thread 18 LONG_S s0, THREAD_REG16(\thread) 19 LONG_S s1, THREAD_REG17(\thread) 20 LONG_S s2, THREAD_REG18(\thread) 21 LONG_S s3, THREAD_REG19(\thread) 22 LONG_S s4, THREAD_REG20(\thread) 23 LONG_S s5, THREAD_REG21(\thread) 24 LONG_S s6, THREAD_REG22(\thread) 25 LONG_S s7, THREAD_REG23(\thread) 26 LONG_S sp, THREAD_REG29(\thread) [all …]
|
D | asmmacro.h | 84 .macro fpu_save_16even thread tmp=t0 88 sdc1 $f0, THREAD_FPR0(\thread) 89 sdc1 $f2, THREAD_FPR2(\thread) 90 sdc1 $f4, THREAD_FPR4(\thread) 91 sdc1 $f6, THREAD_FPR6(\thread) 92 sdc1 $f8, THREAD_FPR8(\thread) 93 sdc1 $f10, THREAD_FPR10(\thread) 94 sdc1 $f12, THREAD_FPR12(\thread) 95 sdc1 $f14, THREAD_FPR14(\thread) 96 sdc1 $f16, THREAD_FPR16(\thread) [all …]
|
D | dsp.h | 41 tsk->thread.dsp.dspr[0] = mfhi1(); \ 42 tsk->thread.dsp.dspr[1] = mflo1(); \ 43 tsk->thread.dsp.dspr[2] = mfhi2(); \ 44 tsk->thread.dsp.dspr[3] = mflo2(); \ 45 tsk->thread.dsp.dspr[4] = mfhi3(); \ 46 tsk->thread.dsp.dspr[5] = mflo3(); \ 47 tsk->thread.dsp.dspcontrol = rddsp(DSP_MASK); \ 58 mthi1(tsk->thread.dsp.dspr[0]); \ 59 mtlo1(tsk->thread.dsp.dspr[1]); \ 60 mthi2(tsk->thread.dsp.dspr[2]); \ [all …]
|
/kernel/linux/linux-5.10/arch/riscv/kernel/ |
D | asm-offsets.c | 16 OFFSET(TASK_THREAD_RA, task_struct, thread.ra); in asm_offsets() 17 OFFSET(TASK_THREAD_SP, task_struct, thread.sp); in asm_offsets() 18 OFFSET(TASK_THREAD_S0, task_struct, thread.s[0]); in asm_offsets() 19 OFFSET(TASK_THREAD_S1, task_struct, thread.s[1]); in asm_offsets() 20 OFFSET(TASK_THREAD_S2, task_struct, thread.s[2]); in asm_offsets() 21 OFFSET(TASK_THREAD_S3, task_struct, thread.s[3]); in asm_offsets() 22 OFFSET(TASK_THREAD_S4, task_struct, thread.s[4]); in asm_offsets() 23 OFFSET(TASK_THREAD_S5, task_struct, thread.s[5]); in asm_offsets() 24 OFFSET(TASK_THREAD_S6, task_struct, thread.s[6]); in asm_offsets() 25 OFFSET(TASK_THREAD_S7, task_struct, thread.s[7]); in asm_offsets() [all …]
|
/kernel/linux/linux-5.10/arch/powerpc/kernel/ptrace/ |
D | ptrace-adv.c | 10 struct pt_regs *regs = task->thread.regs; in user_enable_single_step() 13 task->thread.debug.dbcr0 &= ~DBCR0_BT; in user_enable_single_step() 14 task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC; in user_enable_single_step() 22 struct pt_regs *regs = task->thread.regs; in user_enable_block_step() 25 task->thread.debug.dbcr0 &= ~DBCR0_IC; in user_enable_block_step() 26 task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT; in user_enable_block_step() 34 struct pt_regs *regs = task->thread.regs; in user_disable_single_step() 43 task->thread.debug.dbcr0 &= ~(DBCR0_IC | DBCR0_BT); in user_disable_single_step() 47 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0, in user_disable_single_step() 48 task->thread.debug.dbcr1)) { in user_disable_single_step() [all …]
|
D | ptrace-noadv.c | 12 struct pt_regs *regs = task->thread.regs; in user_enable_single_step() 23 struct pt_regs *regs = task->thread.regs; in user_enable_block_step() 34 struct pt_regs *regs = task->thread.regs; in user_disable_single_step() 72 dabr_fake = ((child->thread.hw_brk[0].address & (~HW_BRK_TYPE_DABR)) | in ptrace_get_debugreg() 73 (child->thread.hw_brk[0].type & HW_BRK_TYPE_DABR)); in ptrace_get_debugreg() 86 struct thread_struct *thread = &task->thread; in ptrace_set_debugreg() local 125 bp = thread->ptrace_bps[0]; in ptrace_set_debugreg() 129 thread->ptrace_bps[0] = NULL; in ptrace_set_debugreg() 146 thread->ptrace_bps[0] = bp; in ptrace_set_debugreg() 147 thread->hw_brk[0] = hw_brk; in ptrace_set_debugreg() [all …]
|
D | ptrace-tm.c | 28 tm_save_sprs(&tsk->thread); in flush_tmregs_to_thread() 34 return task->thread.ckpt_regs.msr | task->thread.fpexc_mode; in get_user_ckpt_msr() 39 task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE; in set_user_ckpt_msr() 40 task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE; in set_user_ckpt_msr() 46 set_trap(&task->thread.ckpt_regs, trap); in set_user_ckpt_trap() 63 if (!MSR_TM_ACTIVE(target->thread.regs->msr)) in tm_cgpr_active() 92 if (!MSR_TM_ACTIVE(target->thread.regs->msr)) in tm_cgpr_get() 99 membuf_write(&to, &target->thread.ckpt_regs, in tm_cgpr_get() 106 membuf_write(&to, &target->thread.ckpt_regs.orig_gpr3, in tm_cgpr_get() 143 if (!MSR_TM_ACTIVE(target->thread.regs->msr)) in tm_cgpr_set() [all …]
|
/kernel/linux/linux-5.10/tools/perf/tests/ |
D | dwarf-unwind.c | 52 int test_dwarf_unwind__thread(struct thread *thread); 54 int test_dwarf_unwind__krava_3(struct thread *thread); 55 int test_dwarf_unwind__krava_2(struct thread *thread); 56 int test_dwarf_unwind__krava_1(struct thread *thread); 98 noinline int test_dwarf_unwind__thread(struct thread *thread) in test_dwarf_unwind__thread() argument 106 if (test__arch_unwind_sample(&sample, thread)) { in test_dwarf_unwind__thread() 111 err = unwind__get_entries(unwind_entry, &cnt, thread, in test_dwarf_unwind__thread() 132 struct thread *thread = *(struct thread **)p1; in test_dwarf_unwind__compare() local 138 global_unwind_retval = test_dwarf_unwind__thread(thread); in test_dwarf_unwind__compare() 141 global_unwind_retval = test_dwarf_unwind__thread(thread); in test_dwarf_unwind__compare() [all …]
|
/kernel/linux/linux-5.10/drivers/mailbox/ |
D | mtk-cmdq-mailbox.c | 65 struct cmdq_thread *thread; member 75 struct cmdq_thread *thread; member 94 static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread) in cmdq_thread_suspend() argument 98 writel(CMDQ_THR_SUSPEND, thread->base + CMDQ_THR_SUSPEND_TASK); in cmdq_thread_suspend() 101 if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED)) in cmdq_thread_suspend() 104 if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_STATUS, in cmdq_thread_suspend() 107 (u32)(thread->base - cmdq->base)); in cmdq_thread_suspend() 114 static void cmdq_thread_resume(struct cmdq_thread *thread) in cmdq_thread_resume() argument 116 writel(CMDQ_THR_RESUME, thread->base + CMDQ_THR_SUSPEND_TASK); in cmdq_thread_resume() 130 static int cmdq_thread_reset(struct cmdq *cmdq, struct cmdq_thread *thread) in cmdq_thread_reset() argument [all …]
|
/kernel/linux/linux-5.10/tools/perf/scripts/python/ |
D | stat-cpi.py | 10 def get_key(time, event, cpu, thread): argument 11 return "%d-%s-%d-%d" % (time, event, cpu, thread) 13 def store_key(time, cpu, thread): argument 20 if (thread not in threads): 21 threads.append(thread) 23 def store(time, event, cpu, thread, val, ena, run): argument 27 store_key(time, cpu, thread) 28 key = get_key(time, event, cpu, thread) 31 def get(time, event, cpu, thread): argument 32 key = get_key(time, event, cpu, thread) [all …]
|
/kernel/linux/linux-5.10/arch/parisc/kernel/ |
D | asm-offsets.c | 53 DEFINE(TASK_REGS, offsetof(struct task_struct, thread.regs)); in main() 54 DEFINE(TASK_PT_PSW, offsetof(struct task_struct, thread.regs.gr[ 0])); in main() 55 DEFINE(TASK_PT_GR1, offsetof(struct task_struct, thread.regs.gr[ 1])); in main() 56 DEFINE(TASK_PT_GR2, offsetof(struct task_struct, thread.regs.gr[ 2])); in main() 57 DEFINE(TASK_PT_GR3, offsetof(struct task_struct, thread.regs.gr[ 3])); in main() 58 DEFINE(TASK_PT_GR4, offsetof(struct task_struct, thread.regs.gr[ 4])); in main() 59 DEFINE(TASK_PT_GR5, offsetof(struct task_struct, thread.regs.gr[ 5])); in main() 60 DEFINE(TASK_PT_GR6, offsetof(struct task_struct, thread.regs.gr[ 6])); in main() 61 DEFINE(TASK_PT_GR7, offsetof(struct task_struct, thread.regs.gr[ 7])); in main() 62 DEFINE(TASK_PT_GR8, offsetof(struct task_struct, thread.regs.gr[ 8])); in main() [all …]
|
/kernel/linux/linux-5.10/Documentation/vm/ |
D | mmu_notifier.rst | 41 CPU-thread-0 {try to write to addrA} 42 CPU-thread-1 {try to write to addrB} 43 CPU-thread-2 {} 44 CPU-thread-3 {} 45 DEV-thread-0 {read addrA and populate device TLB} 46 DEV-thread-2 {read addrB and populate device TLB} 48 CPU-thread-0 {COW_step0: {mmu_notifier_invalidate_range_start(addrA)}} 49 CPU-thread-1 {COW_step0: {mmu_notifier_invalidate_range_start(addrB)}} 50 CPU-thread-2 {} 51 CPU-thread-3 {} [all …]
|
/kernel/linux/linux-5.10/arch/powerpc/kernel/ |
D | process.c | 95 if (tsk == current && tsk->thread.regs && in check_if_tm_restore_required() 96 MSR_TM_ACTIVE(tsk->thread.regs->msr) && in check_if_tm_restore_required() 98 tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr; in check_if_tm_restore_required() 159 msr = tsk->thread.regs->msr; in __giveup_fpu() 163 tsk->thread.regs->msr = msr; in __giveup_fpu() 182 if (tsk->thread.regs) { in flush_fp_to_thread() 192 if (tsk->thread.regs->msr & MSR_FP) { in flush_fp_to_thread() 216 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) { in enable_kernel_fp() 226 MSR_TM_ACTIVE(current->thread.regs->msr)) in enable_kernel_fp() 242 msr = tsk->thread.regs->msr; in __giveup_altivec() [all …]
|
/kernel/liteos_m/kal/posix/src/ |
D | pthread.c | 84 static inline bool IsPthread(pthread_t thread) in IsPthread() argument 87 if ((UINT32)thread > LOSCFG_BASE_CORE_TSK_LIMIT) { in IsPthread() 90 tcb = OS_TCB_FROM_TID((UINT32)thread); in IsPthread() 177 pthread_t thread = pthread_self(); in CheckForCancel() local 178 if (!IsPthread(thread)) { in CheckForCancel() 179 … PRINT_ERR("[%s:%d] This task %lu is not a posix thread!!!\n", __FUNCTION__, __LINE__, thread); in CheckForCancel() 183 tcb = OS_TCB_FROM_TID((UINT32)thread); in CheckForCancel() 194 int pthread_create(pthread_t *thread, const pthread_attr_t *attr, in pthread_create() argument 202 if ((thread == NULL) || (startRoutine == NULL)) { in pthread_create() 226 *thread = (pthread_t)taskID; in pthread_create() [all …]
|
/kernel/linux/linux-5.10/arch/sh/kernel/cpu/sh4/ |
D | fpu.c | 84 :"0"((char *)(&tsk->thread.xstate->hardfpu.status)), in save_fpu() 134 :"0" (tsk->thread.xstate), "r" (FPSCR_RCHG) in restore_fpu() 230 if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR)) in ieee_fpe_handler() 232 denormal_to_double(&tsk->thread.xstate->hardfpu, in ieee_fpe_handler() 248 hx = tsk->thread.xstate->hardfpu.fp_regs[n]; in ieee_fpe_handler() 249 hy = tsk->thread.xstate->hardfpu.fp_regs[m]; in ieee_fpe_handler() 250 fpscr = tsk->thread.xstate->hardfpu.fpscr; in ieee_fpe_handler() 260 | tsk->thread.xstate->hardfpu.fp_regs[n + 1]; in ieee_fpe_handler() 262 | tsk->thread.xstate->hardfpu.fp_regs[m + 1]; in ieee_fpe_handler() 264 tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32; in ieee_fpe_handler() [all …]
|
/kernel/linux/linux-5.10/arch/um/kernel/ |
D | process.c | 87 to->thread.prev_sched = from; in __switch_to() 90 switch_threads(&from->thread.switch_buf, &to->thread.switch_buf); in __switch_to() 93 return current->thread.prev_sched; in __switch_to() 98 struct pt_regs *regs = ¤t->thread.regs; in interrupt_end() 123 if (current->thread.prev_sched != NULL) in new_thread_handler() 124 schedule_tail(current->thread.prev_sched); in new_thread_handler() 125 current->thread.prev_sched = NULL; in new_thread_handler() 127 fn = current->thread.request.u.thread.proc; in new_thread_handler() 128 arg = current->thread.request.u.thread.arg; in new_thread_handler() 134 userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs); in new_thread_handler() [all …]
|
/kernel/linux/linux-5.10/arch/s390/kernel/ |
D | process.c | 79 dst->thread.fpu.regs = dst->thread.fpu.fprs; in arch_dup_task_struct() 88 dst->thread.ri_cb = NULL; in arch_dup_task_struct() 89 dst->thread.gs_cb = NULL; in arch_dup_task_struct() 90 dst->thread.gs_bc_cb = NULL; in arch_dup_task_struct() 105 p->thread.ksp = (unsigned long) frame; in copy_thread() 107 save_access_regs(&p->thread.acrs[0]); in copy_thread() 109 p->thread.mm_segment = get_fs(); in copy_thread() 111 memset(&p->thread.per_user, 0, sizeof(p->thread.per_user)); in copy_thread() 112 memset(&p->thread.per_event, 0, sizeof(p->thread.per_event)); in copy_thread() 114 p->thread.per_flags = 0; in copy_thread() [all …]
|
D | guarded_storage.c | 17 kfree(tsk->thread.gs_cb); in guarded_storage_release() 18 kfree(tsk->thread.gs_bc_cb); in guarded_storage_release() 25 if (!current->thread.gs_cb) { in gs_enable() 33 current->thread.gs_cb = gs_cb; in gs_enable() 41 if (current->thread.gs_cb) { in gs_disable() 43 kfree(current->thread.gs_cb); in gs_disable() 44 current->thread.gs_cb = NULL; in gs_disable() 55 gs_cb = current->thread.gs_bc_cb; in gs_set_bc_cb() 60 current->thread.gs_bc_cb = gs_cb; in gs_set_bc_cb() 71 gs_cb = current->thread.gs_bc_cb; in gs_clear_bc_cb() [all …]
|
/kernel/linux/linux-5.10/arch/mips/kernel/ |
D | asm-offsets.c | 113 OFFSET(THREAD_REG16, task_struct, thread.reg16); in output_thread_defines() 114 OFFSET(THREAD_REG17, task_struct, thread.reg17); in output_thread_defines() 115 OFFSET(THREAD_REG18, task_struct, thread.reg18); in output_thread_defines() 116 OFFSET(THREAD_REG19, task_struct, thread.reg19); in output_thread_defines() 117 OFFSET(THREAD_REG20, task_struct, thread.reg20); in output_thread_defines() 118 OFFSET(THREAD_REG21, task_struct, thread.reg21); in output_thread_defines() 119 OFFSET(THREAD_REG22, task_struct, thread.reg22); in output_thread_defines() 120 OFFSET(THREAD_REG23, task_struct, thread.reg23); in output_thread_defines() 121 OFFSET(THREAD_REG29, task_struct, thread.reg29); in output_thread_defines() 122 OFFSET(THREAD_REG30, task_struct, thread.reg30); in output_thread_defines() [all …]
|