Home
last modified time | relevance | path

Searched refs:regs (Results 1 – 25 of 50) sorted by relevance

12

/kernel/entry/
Dcommon.c19 static __always_inline void __enter_from_user_mode(struct pt_regs *regs) in __enter_from_user_mode() argument
21 arch_enter_from_user_mode(regs); in __enter_from_user_mode()
28 kmsan_unpoison_entry_regs(regs); in __enter_from_user_mode()
33 void noinstr enter_from_user_mode(struct pt_regs *regs) in enter_from_user_mode() argument
35 __enter_from_user_mode(regs); in enter_from_user_mode()
38 static inline void syscall_enter_audit(struct pt_regs *regs, long syscall) in syscall_enter_audit() argument
43 syscall_get_arguments(current, regs, args); in syscall_enter_audit()
48 static long syscall_trace_enter(struct pt_regs *regs, long syscall, in syscall_trace_enter() argument
59 if (syscall_user_dispatch(regs)) in syscall_trace_enter()
65 ret = ptrace_report_syscall_entry(regs); in syscall_trace_enter()
[all …]
Dsyscall_user_dispatch.c20 static void trigger_sigsys(struct pt_regs *regs) in trigger_sigsys() argument
30 info.si_syscall = syscall_get_nr(current, regs); in trigger_sigsys()
35 bool syscall_user_dispatch(struct pt_regs *regs) in syscall_user_dispatch() argument
40 if (likely(instruction_pointer(regs) - sd->offset < sd->len)) in syscall_user_dispatch()
43 if (unlikely(arch_syscall_is_vdso_sigreturn(regs))) in syscall_user_dispatch()
66 syscall_rollback(current, regs); in syscall_user_dispatch()
67 trigger_sigsys(regs); in syscall_user_dispatch()
/kernel/trace/
Dtrace_syscalls.c67 trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs) in trace_get_syscall_nr() argument
69 if (unlikely(arch_trace_is_compat_syscall(regs))) in trace_get_syscall_nr()
72 return syscall_get_nr(task, regs); in trace_get_syscall_nr()
76 trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs) in trace_get_syscall_nr() argument
78 return syscall_get_nr(task, regs); in trace_get_syscall_nr()
291 static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) in ftrace_syscall_enter() argument
302 syscall_nr = trace_get_syscall_nr(current, regs); in ftrace_syscall_enter()
326 syscall_get_arguments(current, regs, args); in ftrace_syscall_enter()
332 static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) in ftrace_syscall_exit() argument
341 syscall_nr = trace_get_syscall_nr(current, regs); in ftrace_syscall_exit()
[all …]
Dtrace_fprobe.c135 struct pt_regs *regs = rec; in process_fetch_insn() local
143 val = regs_get_kernel_stack_nth(regs, code->param); in process_fetch_insn()
146 val = kernel_stack_pointer(regs); in process_fetch_insn()
149 val = regs_return_value(regs); in process_fetch_insn()
153 val = regs_get_kernel_argument(regs, code->param); in process_fetch_insn()
173 struct pt_regs *regs, in NOKPROBE_SYMBOL()
187 dsize = __get_data_size(&tf->tp, regs); in NOKPROBE_SYMBOL()
194 fbuffer.regs = regs; in NOKPROBE_SYMBOL()
197 store_trace_args(&entry[1], &tf->tp, regs, sizeof(*entry), dsize); in NOKPROBE_SYMBOL()
204 struct pt_regs *regs) in fentry_trace_func() argument
[all …]
Dtrace_uprobe.c91 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
93 unsigned long func, struct pt_regs *regs);
107 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n) in get_user_stack_nth() argument
110 unsigned long addr = user_stack_pointer(regs); in get_user_stack_nth()
217 struct pt_regs *regs = rec; in process_fetch_insn() local
224 val = regs_get_register(regs, code->param); in process_fetch_insn()
227 val = get_user_stack_nth(regs, code->param); in process_fetch_insn()
230 val = user_stack_pointer(regs); in process_fetch_insn()
233 val = regs_return_value(regs); in process_fetch_insn()
947 unsigned long func, struct pt_regs *regs, in __uprobe_trace_func() argument
[all …]
Dtrace_kprobe.c239 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
241 struct pt_regs *regs);
1304 struct pt_regs *regs = rec; in process_fetch_insn() local
1312 val = regs_get_register(regs, code->param); in process_fetch_insn()
1315 val = regs_get_kernel_stack_nth(regs, code->param); in process_fetch_insn()
1318 val = kernel_stack_pointer(regs); in process_fetch_insn()
1321 val = regs_return_value(regs); in process_fetch_insn()
1325 val = regs_get_kernel_argument(regs, code->param); in process_fetch_insn()
1344 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs, in NOKPROBE_SYMBOL()
1357 dsize = __get_data_size(&tk->tp, regs); in NOKPROBE_SYMBOL()
[all …]
Dbpf_trace.c154 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc) in BPF_CALL_2() argument
156 regs_set_return_value(regs, rc); in BPF_CALL_2()
157 override_function_with_return(regs); in BPF_CALL_2()
618 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, in __bpf_perf_event_output() argument
647 return perf_event_output(event, sd, regs); in __bpf_perf_event_output()
660 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, in BPF_CALL_5() argument
691 err = __bpf_perf_event_output(regs, map, flags, &raw, sd); in BPF_CALL_5()
711 struct pt_regs regs[3]; member
734 struct pt_regs *regs; in bpf_event_output() local
746 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]); in bpf_event_output()
[all …]
Drethook.c207 void rethook_hook(struct rethook_node *node, struct pt_regs *regs, bool mcount) in rethook_hook() argument
209 arch_rethook_prepare(node, regs, mcount); in rethook_hook()
277 void __weak arch_rethook_fixup_return(struct pt_regs *regs, in arch_rethook_fixup_return() argument
289 unsigned long rethook_trampoline_handler(struct pt_regs *regs, in rethook_trampoline_handler() argument
303 instruction_pointer_set(regs, correct_ret_addr); in rethook_trampoline_handler()
323 correct_ret_addr, regs); in rethook_trampoline_handler()
331 arch_rethook_fixup_return(regs, correct_ret_addr); in rethook_trampoline_handler()
Dtrace_event_perf.c392 void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp) in perf_trace_buf_alloc() argument
408 if (regs) in perf_trace_buf_alloc()
409 *regs = this_cpu_ptr(&__perf_regs[rctx]); in perf_trace_buf_alloc()
435 struct pt_regs regs; in perf_ftrace_function_call() local
464 memset(&regs, 0, sizeof(regs)); in perf_ftrace_function_call()
465 perf_fetch_caller_regs(&regs); in perf_ftrace_function_call()
474 1, &regs, &head, NULL); in perf_ftrace_function_call()
/kernel/events/
Dcallchain.c39 struct pt_regs *regs) in perf_callchain_kernel() argument
44 struct pt_regs *regs) in perf_callchain_user() argument
180 get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, in get_perf_callchain() argument
197 if (kernel && !user_mode(regs)) { in get_perf_callchain()
200 perf_callchain_kernel(&ctx, regs); in get_perf_callchain()
204 if (!user_mode(regs)) { in get_perf_callchain()
206 regs = task_pt_regs(current); in get_perf_callchain()
208 regs = NULL; in get_perf_callchain()
211 if (regs) { in get_perf_callchain()
218 perf_callchain_user(&ctx, regs); in get_perf_callchain()
Duprobes.c1689 unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs) in uprobe_get_swbp_addr() argument
1691 return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE; in uprobe_get_swbp_addr()
1694 unsigned long uprobe_get_trap_addr(struct pt_regs *regs) in uprobe_get_trap_addr() argument
1701 return instruction_pointer(regs); in uprobe_get_trap_addr()
1848 struct pt_regs *regs) in cleanup_return_instances() argument
1853 while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) { in cleanup_return_instances()
1860 static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs) in prepare_uretprobe() argument
1886 orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs); in prepare_uretprobe()
1892 cleanup_return_instances(utask, chained, regs); in prepare_uretprobe()
1912 ri->func = instruction_pointer(regs); in prepare_uretprobe()
[all …]
Dcore.c6858 struct pt_regs *regs, u64 mask) in perf_output_sample_regs() argument
6867 val = perf_reg_value(regs, bit); in perf_output_sample_regs()
6873 struct pt_regs *regs) in perf_sample_regs_user() argument
6875 if (user_mode(regs)) { in perf_sample_regs_user()
6877 regs_user->regs = regs; in perf_sample_regs_user()
6879 perf_get_regs_user(regs_user, regs); in perf_sample_regs_user()
6882 regs_user->regs = NULL; in perf_sample_regs_user()
6887 struct pt_regs *regs) in perf_sample_regs_intr() argument
6889 regs_intr->regs = regs; in perf_sample_regs_intr()
6901 static u64 perf_ustack_task_size(struct pt_regs *regs) in perf_ustack_task_size() argument
[all …]
/kernel/bpf/
Dverifier.c1366 reg = &state->regs[i]; in print_verifier_state()
2146 struct bpf_reg_state *regs, u32 regno) in mark_reg_known_zero() argument
2152 __mark_reg_not_init(env, regs + regno); in mark_reg_known_zero()
2155 __mark_reg_known_zero(regs + regno); in mark_reg_known_zero()
2200 static void mark_reg_graph_node(struct bpf_reg_state *regs, u32 regno, in mark_reg_graph_node() argument
2203 __mark_reg_known_zero(&regs[regno]); in mark_reg_graph_node()
2204 regs[regno].type = PTR_TO_BTF_ID | MEM_ALLOC; in mark_reg_graph_node()
2205 regs[regno].btf = ds_head->btf; in mark_reg_graph_node()
2206 regs[regno].btf_id = ds_head->value_btf_id; in mark_reg_graph_node()
2207 regs[regno].off = ds_head->node_offset; in mark_reg_graph_node()
[all …]
Dstackmap.c283 BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, in BPF_CALL_3() argument
300 trace = get_perf_callchain(regs, 0, kernel, user, max_depth, in BPF_CALL_3()
342 return bpf_get_stackid((unsigned long)(ctx->regs), in BPF_CALL_3()
388 static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, in __bpf_get_stack() argument
414 if (task && user && !user_mode(regs)) in __bpf_get_stack()
435 trace = get_perf_callchain(regs, 0, kernel, user, max_depth, in __bpf_get_stack()
464 BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size, in BPF_CALL_4() argument
467 return __bpf_get_stack(regs, NULL, NULL, buf, size, flags); in BPF_CALL_4()
483 struct pt_regs *regs; in BPF_CALL_4() local
489 regs = task_pt_regs(task); in BPF_CALL_4()
[all …]
/kernel/
Drseq.c274 static int rseq_ip_fixup(struct pt_regs *regs) in rseq_ip_fixup() argument
276 unsigned long ip = instruction_pointer(regs); in rseq_ip_fixup()
300 instruction_pointer_set(regs, (unsigned long)rseq_cs.abort_ip); in rseq_ip_fixup()
315 void __rseq_handle_notify_resume(struct ksignal *ksig, struct pt_regs *regs) in __rseq_handle_notify_resume() argument
328 if (regs) { in __rseq_handle_notify_resume()
329 ret = rseq_ip_fixup(regs); in __rseq_handle_notify_resume()
348 void rseq_syscall(struct pt_regs *regs) in rseq_syscall() argument
350 unsigned long ip = instruction_pointer(regs); in rseq_syscall()
Dpanic.c154 void __weak __noreturn nmi_panic_self_stop(struct pt_regs *regs) in nmi_panic_self_stop() argument
193 void nmi_panic(struct pt_regs *regs, const char *msg) in nmi_panic() argument
203 nmi_panic_self_stop(regs); in nmi_panic()
661 struct pt_regs *regs, struct warn_args *args) in __warn() argument
678 if (regs) in __warn()
679 show_regs(regs); in __warn()
683 if (!regs) in __warn()
Dstacktrace.c165 unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store, in stack_trace_save_regs() argument
175 arch_stack_walk(consume_entry, &c, current, regs); in stack_trace_save_regs()
256 save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) in save_stack_trace_regs() argument
316 unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store, in stack_trace_save_regs() argument
325 save_stack_trace_regs(regs, &trace); in stack_trace_save_regs()
Dfail_function.c15 static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs);
17 static void fei_post_handler(struct kprobe *kp, struct pt_regs *regs, in fei_post_handler() argument
169 static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs) in fei_kprobe_handler() argument
174 regs_set_return_value(regs, attr->retval); in fei_kprobe_handler()
175 override_function_with_return(regs); in fei_kprobe_handler()
Dptrace.c938 ptrace_get_syscall_info_entry(struct task_struct *child, struct pt_regs *regs, in ptrace_get_syscall_info_entry() argument
945 info->entry.nr = syscall_get_nr(child, regs); in ptrace_get_syscall_info_entry()
946 syscall_get_arguments(child, regs, args); in ptrace_get_syscall_info_entry()
955 ptrace_get_syscall_info_seccomp(struct task_struct *child, struct pt_regs *regs, in ptrace_get_syscall_info_seccomp() argument
965 ptrace_get_syscall_info_entry(child, regs, info); in ptrace_get_syscall_info_seccomp()
974 ptrace_get_syscall_info_exit(struct task_struct *child, struct pt_regs *regs, in ptrace_get_syscall_info_exit() argument
978 info->exit.rval = syscall_get_error(child, regs); in ptrace_get_syscall_info_exit()
981 info->exit.rval = syscall_get_return_value(child, regs); in ptrace_get_syscall_info_exit()
991 struct pt_regs *regs = task_pt_regs(child); in ptrace_get_syscall_info() local
995 .instruction_pointer = instruction_pointer(regs), in ptrace_get_syscall_info()
[all …]
Dwatchdog.c145 void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs) in watchdog_hardlockup_check() argument
169 if (regs) in watchdog_hardlockup_check()
170 show_regs(regs); in watchdog_hardlockup_check()
186 nmi_panic(regs, "Hard LOCKUP"); in watchdog_hardlockup_check()
544 struct pt_regs *regs = get_irq_regs(); in watchdog_timer_fn() local
621 if (regs) in watchdog_timer_fn()
622 show_regs(regs); in watchdog_timer_fn()
631 trace_android_vh_watchdog_timer_softlockup(duration, regs, !!softlockup_panic); in watchdog_timer_fn()
Dkprobes.c392 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
422 void opt_pre_handler(struct kprobe *p, struct pt_regs *regs) in opt_pre_handler() argument
429 kp->pre_handler(kp, regs); in opt_pre_handler()
1198 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) in aggr_pre_handler() argument
1205 if (kp->pre_handler(kp, regs)) in aggr_pre_handler()
1214 static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, in aggr_post_handler() argument
1222 kp->post_handler(kp, regs, flags); in aggr_post_handler()
2022 void __weak arch_kretprobe_fixup_return(struct pt_regs *regs, in arch_kretprobe_fixup_return() argument
2031 unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs, in __kretprobe_trampoline_handler() argument
2051 instruction_pointer_set(regs, (unsigned long)correct_ret_addr); in __kretprobe_trampoline_handler()
[all …]
/kernel/debug/
Ddebug_core.c201 unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs) in kgdb_arch_pc() argument
203 return instruction_pointer(regs); in kgdb_arch_pc()
212 int __weak kgdb_skipexception(int exception, struct pt_regs *regs) in kgdb_skipexception() argument
571 static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, in kgdb_cpu_enter() argument
591 arch_kgdb_ops.disable_hw_break(regs); in kgdb_cpu_enter()
602 kgdb_info[cpu].debuggerinfo = regs; in kgdb_cpu_enter()
836 kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs) in kgdb_handle_exception() argument
858 ks->linux_regs = regs; in kgdb_handle_exception()
865 ret = kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER); in kgdb_handle_exception()
886 int kgdb_nmicallback(int cpu, void *regs) in kgdb_nmicallback() argument
[all …]
/kernel/debug/kdb/
Dkdb_bp.c27 static void kdb_setsinglestep(struct pt_regs *regs) in kdb_setsinglestep() argument
108 static void kdb_handle_bp(struct pt_regs *regs, kdb_bp_t *bp) in kdb_handle_bp() argument
111 kdb_printf("regs->ip = 0x%lx\n", instruction_pointer(regs)); in kdb_handle_bp()
116 kdb_setsinglestep(regs); in kdb_handle_bp()
125 static int _kdb_bp_install(struct pt_regs *regs, kdb_bp_t *bp) in _kdb_bp_install() argument
142 kdb_handle_bp(regs, bp); in _kdb_bp_install()
174 void kdb_bp_install(struct pt_regs *regs) in kdb_bp_install() argument
186 _kdb_bp_install(regs, bp); in kdb_bp_install()
/kernel/irq/
Dgeneric-chip.c44 irq_reg_writel(gc, mask, ct->regs.disable); in irq_gc_mask_disable_reg()
65 irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); in irq_gc_mask_set_bit()
85 irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); in irq_gc_mask_clr_bit()
104 irq_reg_writel(gc, mask, ct->regs.enable); in irq_gc_unmask_enable_reg()
121 irq_reg_writel(gc, mask, ct->regs.ack); in irq_gc_ack_set_bit()
137 irq_reg_writel(gc, mask, ct->regs.ack); in irq_gc_ack_clr_bit()
160 irq_reg_writel(gc, mask, ct->regs.disable); in irq_gc_mask_disable_and_ack_set()
162 irq_reg_writel(gc, mask, ct->regs.ack); in irq_gc_mask_disable_and_ack_set()
177 irq_reg_writel(gc, mask, ct->regs.eoi); in irq_gc_eoi()
260 u32 *mskptr = &gc->mask_cache, mskreg = ct->regs.mask; in irq_gc_init_mask_cache()
[all …]
Dhandle.c232 asmlinkage void noinstr generic_handle_arch_irq(struct pt_regs *regs) in generic_handle_arch_irq() argument
237 old_regs = set_irq_regs(regs); in generic_handle_arch_irq()
238 handle_arch_irq(regs); in generic_handle_arch_irq()

12