Home
last modified time | relevance | path

Searched refs:regs (Results 1 – 25 of 49) sorted by relevance

12

/kernel/entry/
Dcommon.c19 static __always_inline void __enter_from_user_mode(struct pt_regs *regs) in __enter_from_user_mode() argument
21 arch_enter_from_user_mode(regs); in __enter_from_user_mode()
28 kmsan_unpoison_entry_regs(regs); in __enter_from_user_mode()
33 void noinstr enter_from_user_mode(struct pt_regs *regs) in enter_from_user_mode() argument
35 __enter_from_user_mode(regs); in enter_from_user_mode()
38 static inline void syscall_enter_audit(struct pt_regs *regs, long syscall) in syscall_enter_audit() argument
43 syscall_get_arguments(current, regs, args); in syscall_enter_audit()
48 static long syscall_trace_enter(struct pt_regs *regs, long syscall, in syscall_trace_enter() argument
59 if (syscall_user_dispatch(regs)) in syscall_trace_enter()
65 ret = ptrace_report_syscall_entry(regs); in syscall_trace_enter()
[all …]
Dsyscall_user_dispatch.c19 static void trigger_sigsys(struct pt_regs *regs) in trigger_sigsys() argument
29 info.si_syscall = syscall_get_nr(current, regs); in trigger_sigsys()
34 bool syscall_user_dispatch(struct pt_regs *regs) in syscall_user_dispatch() argument
39 if (likely(instruction_pointer(regs) - sd->offset < sd->len)) in syscall_user_dispatch()
42 if (unlikely(arch_syscall_is_vdso_sigreturn(regs))) in syscall_user_dispatch()
65 syscall_rollback(current, regs); in syscall_user_dispatch()
66 trigger_sigsys(regs); in syscall_user_dispatch()
/kernel/trace/
Dtrace_syscalls.c67 trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs) in trace_get_syscall_nr() argument
69 if (unlikely(arch_trace_is_compat_syscall(regs))) in trace_get_syscall_nr()
72 return syscall_get_nr(task, regs); in trace_get_syscall_nr()
76 trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs) in trace_get_syscall_nr() argument
78 return syscall_get_nr(task, regs); in trace_get_syscall_nr()
291 static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) in ftrace_syscall_enter() argument
302 syscall_nr = trace_get_syscall_nr(current, regs); in ftrace_syscall_enter()
326 syscall_get_arguments(current, regs, args); in ftrace_syscall_enter()
332 static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) in ftrace_syscall_exit() argument
341 syscall_nr = trace_get_syscall_nr(current, regs); in ftrace_syscall_exit()
[all …]
Dtrace_uprobe.c96 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
98 unsigned long func, struct pt_regs *regs);
112 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n) in get_user_stack_nth() argument
115 unsigned long addr = user_stack_pointer(regs); in get_user_stack_nth()
222 struct pt_regs *regs = rec; in process_fetch_insn() local
228 val = regs_get_register(regs, code->param); in process_fetch_insn()
231 val = get_user_stack_nth(regs, code->param); in process_fetch_insn()
234 val = user_stack_pointer(regs); in process_fetch_insn()
237 val = regs_return_value(regs); in process_fetch_insn()
951 unsigned long func, struct pt_regs *regs, in __uprobe_trace_func() argument
[all …]
Dtrace_kprobe.c239 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
241 struct pt_regs *regs);
1341 struct pt_regs *regs = rec; in process_fetch_insn() local
1348 val = regs_get_register(regs, code->param); in process_fetch_insn()
1351 val = regs_get_kernel_stack_nth(regs, code->param); in process_fetch_insn()
1354 val = kernel_stack_pointer(regs); in process_fetch_insn()
1357 val = regs_return_value(regs); in process_fetch_insn()
1370 val = regs_get_kernel_argument(regs, code->param); in process_fetch_insn()
1387 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs, in NOKPROBE_SYMBOL()
1400 dsize = __get_data_size(&tk->tp, regs); in NOKPROBE_SYMBOL()
[all …]
Dbpf_trace.c146 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc) in BPF_CALL_2() argument
148 regs_set_return_value(regs, rc); in BPF_CALL_2()
149 override_function_with_return(regs); in BPF_CALL_2()
621 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, in __bpf_perf_event_output() argument
647 return perf_event_output(event, sd, regs); in __bpf_perf_event_output()
660 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, in BPF_CALL_5() argument
693 err = __bpf_perf_event_output(regs, map, flags, sd); in BPF_CALL_5()
713 struct pt_regs regs[3]; member
736 struct pt_regs *regs; in bpf_event_output() local
748 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]); in bpf_event_output()
[all …]
Drethook.c207 void rethook_hook(struct rethook_node *node, struct pt_regs *regs, bool mcount) in rethook_hook() argument
209 arch_rethook_prepare(node, regs, mcount); in rethook_hook()
277 void __weak arch_rethook_fixup_return(struct pt_regs *regs, in arch_rethook_fixup_return() argument
289 unsigned long rethook_trampoline_handler(struct pt_regs *regs, in rethook_trampoline_handler() argument
303 instruction_pointer_set(regs, correct_ret_addr); in rethook_trampoline_handler()
322 handler(rhn, rhn->rethook->data, regs); in rethook_trampoline_handler()
330 arch_rethook_fixup_return(regs, correct_ret_addr); in rethook_trampoline_handler()
Dtrace_event_perf.c396 void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp) in perf_trace_buf_alloc() argument
412 if (regs) in perf_trace_buf_alloc()
413 *regs = this_cpu_ptr(&__perf_regs[rctx]); in perf_trace_buf_alloc()
439 struct pt_regs regs; in perf_ftrace_function_call() local
468 memset(&regs, 0, sizeof(regs)); in perf_ftrace_function_call()
469 perf_fetch_caller_regs(&regs); in perf_ftrace_function_call()
478 1, &regs, &head, NULL); in perf_ftrace_function_call()
Dftrace_internal.h14 int ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs);
36 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) in ftrace_ops_test() argument
/kernel/events/
Dcallchain.c39 struct pt_regs *regs) in perf_callchain_kernel() argument
44 struct pt_regs *regs) in perf_callchain_user() argument
180 get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, in get_perf_callchain() argument
197 if (kernel && !user_mode(regs)) { in get_perf_callchain()
200 perf_callchain_kernel(&ctx, regs); in get_perf_callchain()
204 if (!user_mode(regs)) { in get_perf_callchain()
206 regs = task_pt_regs(current); in get_perf_callchain()
208 regs = NULL; in get_perf_callchain()
211 if (regs) { in get_perf_callchain()
218 perf_callchain_user(&ctx, regs); in get_perf_callchain()
Duprobes.c1688 unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs) in uprobe_get_swbp_addr() argument
1690 return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE; in uprobe_get_swbp_addr()
1693 unsigned long uprobe_get_trap_addr(struct pt_regs *regs) in uprobe_get_trap_addr() argument
1700 return instruction_pointer(regs); in uprobe_get_trap_addr()
1847 struct pt_regs *regs) in cleanup_return_instances() argument
1852 while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) { in cleanup_return_instances()
1859 static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs) in prepare_uretprobe() argument
1885 orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs); in prepare_uretprobe()
1891 cleanup_return_instances(utask, chained, regs); in prepare_uretprobe()
1911 ri->func = instruction_pointer(regs); in prepare_uretprobe()
[all …]
Dcore.c6673 struct pt_regs *regs, u64 mask) in perf_output_sample_regs() argument
6682 val = perf_reg_value(regs, bit); in perf_output_sample_regs()
6688 struct pt_regs *regs) in perf_sample_regs_user() argument
6690 if (user_mode(regs)) { in perf_sample_regs_user()
6692 regs_user->regs = regs; in perf_sample_regs_user()
6694 perf_get_regs_user(regs_user, regs); in perf_sample_regs_user()
6697 regs_user->regs = NULL; in perf_sample_regs_user()
6702 struct pt_regs *regs) in perf_sample_regs_intr() argument
6704 regs_intr->regs = regs; in perf_sample_regs_intr()
6716 static u64 perf_ustack_task_size(struct pt_regs *regs) in perf_ustack_task_size() argument
[all …]
/kernel/bpf/
Dverifier.c856 reg = &state->regs[i]; in print_verifier_state()
1394 struct bpf_reg_state *regs, u32 regno) in mark_reg_known_zero() argument
1400 __mark_reg_not_init(env, regs + regno); in mark_reg_known_zero()
1403 __mark_reg_known_zero(regs + regno); in mark_reg_known_zero()
1711 struct bpf_reg_state *regs, u32 regno) in mark_reg_unknown() argument
1717 __mark_reg_not_init(env, regs + regno); in mark_reg_unknown()
1720 __mark_reg_unknown(env, regs + regno); in mark_reg_unknown()
1731 struct bpf_reg_state *regs, u32 regno) in mark_reg_not_init() argument
1737 __mark_reg_not_init(env, regs + regno); in mark_reg_not_init()
1740 __mark_reg_not_init(env, regs + regno); in mark_reg_not_init()
[all …]
Dstackmap.c283 BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, in BPF_CALL_3() argument
300 trace = get_perf_callchain(regs, 0, kernel, user, max_depth, in BPF_CALL_3()
342 return bpf_get_stackid((unsigned long)(ctx->regs), in BPF_CALL_3()
388 static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, in __bpf_get_stack() argument
414 if (task && user && !user_mode(regs)) in __bpf_get_stack()
435 trace = get_perf_callchain(regs, 0, kernel, user, max_depth, in __bpf_get_stack()
464 BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size, in BPF_CALL_4() argument
467 return __bpf_get_stack(regs, NULL, NULL, buf, size, flags); in BPF_CALL_4()
483 struct pt_regs *regs; in BPF_CALL_4() local
489 regs = task_pt_regs(task); in BPF_CALL_4()
[all …]
/kernel/
Drseq.c245 static int rseq_ip_fixup(struct pt_regs *regs) in rseq_ip_fixup() argument
247 unsigned long ip = instruction_pointer(regs); in rseq_ip_fixup()
271 instruction_pointer_set(regs, (unsigned long)rseq_cs.abort_ip); in rseq_ip_fixup()
286 void __rseq_handle_notify_resume(struct ksignal *ksig, struct pt_regs *regs) in __rseq_handle_notify_resume() argument
299 if (regs) { in __rseq_handle_notify_resume()
300 ret = rseq_ip_fixup(regs); in __rseq_handle_notify_resume()
319 void rseq_syscall(struct pt_regs *regs) in rseq_syscall() argument
321 unsigned long ip = instruction_pointer(regs); in rseq_syscall()
Dpanic.c153 void __weak nmi_panic_self_stop(struct pt_regs *regs) in nmi_panic_self_stop() argument
192 void nmi_panic(struct pt_regs *regs, const char *msg) in nmi_panic() argument
202 nmi_panic_self_stop(regs); in nmi_panic()
652 struct pt_regs *regs, struct warn_args *args) in __warn() argument
669 if (regs) in __warn()
670 show_regs(regs); in __warn()
674 if (!regs) in __warn()
Dstacktrace.c165 unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store, in stack_trace_save_regs() argument
175 arch_stack_walk(consume_entry, &c, current, regs); in stack_trace_save_regs()
256 save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) in save_stack_trace_regs() argument
316 unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store, in stack_trace_save_regs() argument
325 save_stack_trace_regs(regs, &trace); in stack_trace_save_regs()
Dwatchdog_hld.c112 struct pt_regs *regs) in watchdog_overflow_callback() argument
142 if (regs) in watchdog_overflow_callback()
143 show_regs(regs); in watchdog_overflow_callback()
156 nmi_panic(regs, "Hard LOCKUP"); in watchdog_overflow_callback()
Dfail_function.c15 static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs);
17 static void fei_post_handler(struct kprobe *kp, struct pt_regs *regs, in fei_post_handler() argument
169 static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs) in fei_kprobe_handler() argument
174 regs_set_return_value(regs, attr->retval); in fei_kprobe_handler()
175 override_function_with_return(regs); in fei_kprobe_handler()
Dptrace.c937 ptrace_get_syscall_info_entry(struct task_struct *child, struct pt_regs *regs, in ptrace_get_syscall_info_entry() argument
944 info->entry.nr = syscall_get_nr(child, regs); in ptrace_get_syscall_info_entry()
945 syscall_get_arguments(child, regs, args); in ptrace_get_syscall_info_entry()
954 ptrace_get_syscall_info_seccomp(struct task_struct *child, struct pt_regs *regs, in ptrace_get_syscall_info_seccomp() argument
964 ptrace_get_syscall_info_entry(child, regs, info); in ptrace_get_syscall_info_seccomp()
973 ptrace_get_syscall_info_exit(struct task_struct *child, struct pt_regs *regs, in ptrace_get_syscall_info_exit() argument
977 info->exit.rval = syscall_get_error(child, regs); in ptrace_get_syscall_info_exit()
980 info->exit.rval = syscall_get_return_value(child, regs); in ptrace_get_syscall_info_exit()
990 struct pt_regs *regs = task_pt_regs(child); in ptrace_get_syscall_info() local
994 .instruction_pointer = instruction_pointer(regs), in ptrace_get_syscall_info()
[all …]
Dkprobes.c392 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
422 void opt_pre_handler(struct kprobe *p, struct pt_regs *regs) in opt_pre_handler() argument
429 kp->pre_handler(kp, regs); in opt_pre_handler()
1192 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) in aggr_pre_handler() argument
1199 if (kp->pre_handler(kp, regs)) in aggr_pre_handler()
1208 static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, in aggr_post_handler() argument
1216 kp->post_handler(kp, regs, flags); in aggr_post_handler()
2010 void __weak arch_kretprobe_fixup_return(struct pt_regs *regs, in arch_kretprobe_fixup_return() argument
2019 unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs, in __kretprobe_trampoline_handler() argument
2039 instruction_pointer_set(regs, (unsigned long)correct_ret_addr); in __kretprobe_trampoline_handler()
[all …]
/kernel/debug/
Ddebug_core.c201 unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs) in kgdb_arch_pc() argument
203 return instruction_pointer(regs); in kgdb_arch_pc()
212 int __weak kgdb_skipexception(int exception, struct pt_regs *regs) in kgdb_skipexception() argument
571 static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, in kgdb_cpu_enter() argument
591 arch_kgdb_ops.disable_hw_break(regs); in kgdb_cpu_enter()
602 kgdb_info[cpu].debuggerinfo = regs; in kgdb_cpu_enter()
836 kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs) in kgdb_handle_exception() argument
858 ks->linux_regs = regs; in kgdb_handle_exception()
865 ret = kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER); in kgdb_handle_exception()
886 int kgdb_nmicallback(int cpu, void *regs) in kgdb_nmicallback() argument
[all …]
/kernel/debug/kdb/
Dkdb_bp.c27 static void kdb_setsinglestep(struct pt_regs *regs) in kdb_setsinglestep() argument
108 static void kdb_handle_bp(struct pt_regs *regs, kdb_bp_t *bp) in kdb_handle_bp() argument
111 kdb_printf("regs->ip = 0x%lx\n", instruction_pointer(regs)); in kdb_handle_bp()
116 kdb_setsinglestep(regs); in kdb_handle_bp()
125 static int _kdb_bp_install(struct pt_regs *regs, kdb_bp_t *bp) in _kdb_bp_install() argument
142 kdb_handle_bp(regs, bp); in _kdb_bp_install()
174 void kdb_bp_install(struct pt_regs *regs) in kdb_bp_install() argument
186 _kdb_bp_install(regs, bp); in kdb_bp_install()
/kernel/irq/
Dgeneric-chip.c44 irq_reg_writel(gc, mask, ct->regs.disable); in irq_gc_mask_disable_reg()
65 irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); in irq_gc_mask_set_bit()
85 irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); in irq_gc_mask_clr_bit()
104 irq_reg_writel(gc, mask, ct->regs.enable); in irq_gc_unmask_enable_reg()
121 irq_reg_writel(gc, mask, ct->regs.ack); in irq_gc_ack_set_bit()
137 irq_reg_writel(gc, mask, ct->regs.ack); in irq_gc_ack_clr_bit()
160 irq_reg_writel(gc, mask, ct->regs.disable); in irq_gc_mask_disable_and_ack_set()
162 irq_reg_writel(gc, mask, ct->regs.ack); in irq_gc_mask_disable_and_ack_set()
177 irq_reg_writel(gc, mask, ct->regs.eoi); in irq_gc_eoi()
260 u32 *mskptr = &gc->mask_cache, mskreg = ct->regs.mask; in irq_gc_init_mask_cache()
[all …]
Dhandle.c232 asmlinkage void noinstr generic_handle_arch_irq(struct pt_regs *regs) in generic_handle_arch_irq() argument
237 old_regs = set_irq_regs(regs); in generic_handle_arch_irq()
238 handle_arch_irq(regs); in generic_handle_arch_irq()

12