Home
last modified time | relevance | path

Searched refs:ctx (Results 1 – 25 of 34) sorted by relevance

12

/kernel/events/
Dcore.c159 __get_cpu_context(struct perf_event_context *ctx) in __get_cpu_context() argument
161 return this_cpu_ptr(ctx->pmu->pmu_cpu_context); in __get_cpu_context()
165 struct perf_event_context *ctx) in perf_ctx_lock() argument
167 raw_spin_lock(&cpuctx->ctx.lock); in perf_ctx_lock()
168 if (ctx) in perf_ctx_lock()
169 raw_spin_lock(&ctx->lock); in perf_ctx_lock()
173 struct perf_event_context *ctx) in perf_ctx_unlock() argument
175 if (ctx) in perf_ctx_unlock()
176 raw_spin_unlock(&ctx->lock); in perf_ctx_unlock()
177 raw_spin_unlock(&cpuctx->ctx.lock); in perf_ctx_unlock()
[all …]
Dcallchain.c184 struct perf_callchain_entry_ctx ctx; in get_perf_callchain() local
191 ctx.entry = entry; in get_perf_callchain()
192 ctx.max_stack = max_stack; in get_perf_callchain()
193 ctx.nr = entry->nr = init_nr; in get_perf_callchain()
194 ctx.contexts = 0; in get_perf_callchain()
195 ctx.contexts_maxed = false; in get_perf_callchain()
199 perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL); in get_perf_callchain()
200 perf_callchain_kernel(&ctx, regs); in get_perf_callchain()
218 perf_callchain_store_context(&ctx, PERF_CONTEXT_USER); in get_perf_callchain()
221 perf_callchain_user(&ctx, regs); in get_perf_callchain()
Duprobes.c873 enum uprobe_filter_ctx ctx, struct mm_struct *mm) in consumer_filter() argument
875 return !uc->filter || uc->filter(uc, ctx, mm); in consumer_filter()
879 enum uprobe_filter_ctx ctx, struct mm_struct *mm) in filter_chain() argument
886 ret = consumer_filter(uc, ctx, mm); in filter_chain()
1851 enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL; in cleanup_return_instances()
1853 while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) { in cleanup_return_instances()
2177 bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, in arch_uretprobe_is_alive() argument
Dhw_breakpoint.c524 if (irqs_disabled() && bp->ctx && bp->ctx->task == current) in modify_user_hw_breakpoint()
/kernel/cgroup/
Dcgroup-v1.c402 struct cgroup_file_ctx *ctx = of->priv; in cgroup_pidlist_start() local
417 if (ctx->procs1.pidlist) in cgroup_pidlist_start()
418 ctx->procs1.pidlist = cgroup_pidlist_find(cgrp, type); in cgroup_pidlist_start()
424 if (!ctx->procs1.pidlist) { in cgroup_pidlist_start()
425 ret = pidlist_array_load(cgrp, type, &ctx->procs1.pidlist); in cgroup_pidlist_start()
429 l = ctx->procs1.pidlist; in cgroup_pidlist_start()
457 struct cgroup_file_ctx *ctx = of->priv; in cgroup_pidlist_stop() local
458 struct cgroup_pidlist *l = ctx->procs1.pidlist; in cgroup_pidlist_stop()
469 struct cgroup_file_ctx *ctx = of->priv; in cgroup_pidlist_next() local
470 struct cgroup_pidlist *l = ctx->procs1.pidlist; in cgroup_pidlist_next()
[all …]
Dcgroup.c1901 struct cgroup_fs_context *ctx = cgroup_fc2context(fc); in cgroup2_parse_param() local
1911 ctx->flags |= CGRP_ROOT_NS_DELEGATE; in cgroup2_parse_param()
1914 ctx->flags |= CGRP_ROOT_MEMORY_LOCAL_EVENTS; in cgroup2_parse_param()
1917 ctx->flags |= CGRP_ROOT_MEMORY_RECURSIVE_PROT; in cgroup2_parse_param()
1956 struct cgroup_fs_context *ctx = cgroup_fc2context(fc); in cgroup_reconfigure() local
1958 apply_cgroup_root_flags(ctx->flags); in cgroup_reconfigure()
1987 void init_cgroup_root(struct cgroup_fs_context *ctx) in init_cgroup_root() argument
1989 struct cgroup_root *root = ctx->root; in init_cgroup_root()
1997 root->flags = ctx->flags; in init_cgroup_root()
1998 if (ctx->release_agent) in init_cgroup_root()
[all …]
Dcgroup-internal.h236 void init_cgroup_root(struct cgroup_fs_context *ctx);
295 int cgroup1_reconfigure(struct fs_context *ctx);
/kernel/
Dauditsc.c160 static int audit_match_perm(struct audit_context *ctx, int mask) in audit_match_perm() argument
164 if (unlikely(!ctx)) in audit_match_perm()
166 n = ctx->major; in audit_match_perm()
168 switch (audit_classify_syscall(ctx->arch, n)) { in audit_match_perm()
192 return mask & ACC_MODE(ctx->argv[1]); in audit_match_perm()
194 return mask & ACC_MODE(ctx->argv[2]); in audit_match_perm()
196 return ((mask & AUDIT_PERM_WRITE) && ctx->argv[0] == SYS_BIND); in audit_match_perm()
204 static int audit_match_filetype(struct audit_context *ctx, int val) in audit_match_filetype() argument
209 if (unlikely(!ctx)) in audit_match_filetype()
212 list_for_each_entry(n, &ctx->names_list, list) { in audit_match_filetype()
[all …]
Daudit.c201 struct audit_context *ctx; /* NULL or associated context */ member
1227 char *ctx = NULL; in audit_receive_msg() local
1480 err = security_secid_to_secctx(audit_sig_sid, &ctx, &len); in audit_receive_msg()
1487 security_release_secctx(ctx, len); in audit_receive_msg()
1493 memcpy(sig_data->ctx, ctx, len); in audit_receive_msg()
1494 security_release_secctx(ctx, len); in audit_receive_msg()
1790 static struct audit_buffer *audit_buffer_alloc(struct audit_context *ctx, in audit_buffer_alloc() argument
1805 ab->ctx = ctx; in audit_buffer_alloc()
1839 static inline void audit_get_stamp(struct audit_context *ctx, in audit_get_stamp() argument
1842 if (!ctx || !auditsc_get_stamp(ctx, t, serial)) { in audit_get_stamp()
[all …]
Dworkqueue.c3965 static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx) in apply_wqattrs_cleanup() argument
3967 if (ctx) { in apply_wqattrs_cleanup()
3971 put_pwq_unlocked(ctx->pwq_tbl[node]); in apply_wqattrs_cleanup()
3972 put_pwq_unlocked(ctx->dfl_pwq); in apply_wqattrs_cleanup()
3974 free_workqueue_attrs(ctx->attrs); in apply_wqattrs_cleanup()
3976 kfree(ctx); in apply_wqattrs_cleanup()
3985 struct apply_wqattrs_ctx *ctx; in apply_wqattrs_prepare() local
3991 ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL); in apply_wqattrs_prepare()
3995 if (!ctx || !new_attrs || !tmp_attrs) in apply_wqattrs_prepare()
4020 ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs); in apply_wqattrs_prepare()
[all …]
Daudit.h259 extern int auditsc_get_stamp(struct audit_context *ctx,
298 struct audit_context *ctx);
/kernel/bpf/
Dcgroup.c1154 struct bpf_sock_addr_kern ctx = { in __cgroup_bpf_run_filter_sock_addr() local
1169 if (!ctx.uaddr) { in __cgroup_bpf_run_filter_sock_addr()
1171 ctx.uaddr = (struct sockaddr *)&unspec; in __cgroup_bpf_run_filter_sock_addr()
1175 ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(cgrp->bpf.effective[atype], &ctx, in __cgroup_bpf_run_filter_sock_addr()
1215 struct bpf_cgroup_dev_ctx ctx = { in __cgroup_bpf_check_dev_permission() local
1224 allow = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], &ctx, in __cgroup_bpf_check_dev_permission()
1318 struct bpf_sysctl_kern ctx = { in __cgroup_bpf_run_filter_sysctl() local
1333 ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL); in __cgroup_bpf_run_filter_sysctl()
1334 if (!ctx.cur_val || in __cgroup_bpf_run_filter_sysctl()
1335 table->proc_handler(table, 0, ctx.cur_val, &ctx.cur_len, &pos)) { in __cgroup_bpf_run_filter_sysctl()
[all …]
Dtask_iter.c93 struct bpf_iter__task ctx; in DEFINE_BPF_ITER_FUNC() local
102 ctx.meta = &meta; in DEFINE_BPF_ITER_FUNC()
103 ctx.task = task; in DEFINE_BPF_ITER_FUNC()
104 return bpf_iter_run_prog(prog, &ctx); in DEFINE_BPF_ITER_FUNC()
233 struct bpf_iter__task_file ctx; in DEFINE_BPF_ITER_FUNC() local
242 ctx.meta = &meta; in DEFINE_BPF_ITER_FUNC()
243 ctx.task = info->task; in DEFINE_BPF_ITER_FUNC()
244 ctx.fd = info->fd; in DEFINE_BPF_ITER_FUNC()
245 ctx.file = file; in DEFINE_BPF_ITER_FUNC()
246 return bpf_iter_run_prog(prog, &ctx); in DEFINE_BPF_ITER_FUNC()
[all …]
Dprog_iter.c46 struct bpf_iter__bpf_prog ctx; in DEFINE_BPF_ITER_FUNC() local
51 ctx.meta = &meta; in DEFINE_BPF_ITER_FUNC()
52 ctx.prog = v; in DEFINE_BPF_ITER_FUNC()
56 ret = bpf_iter_run_prog(prog, &ctx); in DEFINE_BPF_ITER_FUNC()
Dmap_iter.c46 struct bpf_iter__bpf_map ctx; in DEFINE_BPF_ITER_FUNC() local
51 ctx.meta = &meta; in DEFINE_BPF_ITER_FUNC()
52 ctx.map = v; in DEFINE_BPF_ITER_FUNC()
56 ret = bpf_iter_run_prog(prog, &ctx); in DEFINE_BPF_ITER_FUNC()
Dstackmap.c375 BPF_CALL_3(bpf_get_stackid_pe, struct bpf_perf_event_data_kern *, ctx, in BPF_CALL_3() argument
378 struct perf_event *event = ctx->event; in BPF_CALL_3()
386 return bpf_get_stackid((unsigned long)(ctx->regs), in BPF_CALL_3()
396 trace = ctx->data->callchain; in BPF_CALL_3()
552 BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx, in BPF_CALL_4() argument
555 struct pt_regs *regs = (struct pt_regs *)(ctx->regs); in BPF_CALL_4()
556 struct perf_event *event = ctx->event; in BPF_CALL_4()
573 trace = ctx->data->callchain; in BPF_CALL_4()
Darraymap.c559 struct bpf_iter__bpf_map_elem ctx = {}; in __bpf_array_map_seq_show() local
572 ctx.meta = &meta; in __bpf_array_map_seq_show()
573 ctx.map = info->map; in __bpf_array_map_seq_show()
575 ctx.key = &info->index; in __bpf_array_map_seq_show()
578 ctx.value = v; in __bpf_array_map_seq_show()
588 ctx.value = info->percpu_value_buf; in __bpf_array_map_seq_show()
592 return bpf_iter_run_prog(prog, &ctx); in __bpf_array_map_seq_show()
/kernel/locking/
Dtest-ww_mutex.c52 struct ww_acquire_ctx ctx; in __test_mutex() local
56 ww_acquire_init(&ctx, &ww_class); in __test_mutex()
67 ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL); in __test_mutex()
84 ww_acquire_fini(&ctx); in __test_mutex()
115 struct ww_acquire_ctx ctx; in test_aa() local
119 ww_acquire_init(&ctx, &ww_class); in test_aa()
121 ww_mutex_lock(&mutex, &ctx); in test_aa()
130 ret = ww_mutex_lock(&mutex, &ctx); in test_aa()
143 ww_acquire_fini(&ctx); in test_aa()
160 struct ww_acquire_ctx ctx; in test_abba_work() local
[all …]
Dww_mutex.h190 DEBUG_LOCKS_WARN_ON(ww->ctx); in ww_mutex_lock_acquired()
218 ww->ctx = ww_ctx; in ww_mutex_lock_acquired()
378 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in ww_mutex_set_context_fastpath() argument
380 ww_mutex_lock_acquired(lock, ctx); in ww_mutex_set_context_fastpath()
408 __ww_mutex_check_waiters(&lock->base, ctx); in ww_mutex_set_context_fastpath()
442 struct ww_acquire_ctx *ctx) in __ww_mutex_check_kill() argument
445 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx); in __ww_mutex_check_kill()
448 if (ctx->acquired == 0) in __ww_mutex_check_kill()
451 if (!ctx->is_wait_die) { in __ww_mutex_check_kill()
452 if (ctx->wounded) in __ww_mutex_check_kill()
[all …]
Dmutex.c324 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx)) in ww_mutex_spin_on_owner()
598 if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) in __mutex_lock_common()
808 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in ww_mutex_deadlock_injection() argument
813 if (ctx->deadlock_inject_countdown-- == 0) { in ww_mutex_deadlock_injection()
814 tmp = ctx->deadlock_inject_interval; in ww_mutex_deadlock_injection()
820 ctx->deadlock_inject_interval = tmp; in ww_mutex_deadlock_injection()
821 ctx->deadlock_inject_countdown = tmp; in ww_mutex_deadlock_injection()
822 ctx->contending_lock = lock; in ww_mutex_deadlock_injection()
834 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in ww_mutex_lock() argument
840 0, _RET_IP_, ctx); in ww_mutex_lock()
[all …]
Dww_rt_mutex.c23 if (unlikely(ww_ctx == READ_ONCE(lock->ctx))) in __ww_rt_mutex_lock()
54 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in ww_mutex_lock() argument
56 return __ww_rt_mutex_lock(lock, ctx, TASK_UNINTERRUPTIBLE, _RET_IP_); in ww_mutex_lock()
61 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in ww_mutex_lock_interruptible() argument
63 return __ww_rt_mutex_lock(lock, ctx, TASK_INTERRUPTIBLE, _RET_IP_); in ww_mutex_lock_interruptible()
Dlocktorture.c407 struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid]; in torture_ww_mutex_lock() local
418 ww_acquire_init(ctx, &torture_ww_class); in torture_ww_mutex_lock()
423 err = ww_mutex_lock(ll->lock, ctx); in torture_ww_mutex_lock()
434 ww_mutex_lock_slow(ll->lock, ctx); in torture_ww_mutex_lock()
446 struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid]; in torture_ww_mutex_unlock() local
451 ww_acquire_fini(ctx); in torture_ww_mutex_unlock()
/kernel/kcsan/
Dcore.c210 struct kcsan_ctx *ctx = get_ctx(); in kcsan_check_scoped_accesses() local
211 struct list_head *prev_save = ctx->scoped_accesses.prev; in kcsan_check_scoped_accesses()
214 ctx->scoped_accesses.prev = NULL; /* Avoid recursion. */ in kcsan_check_scoped_accesses()
215 list_for_each_entry(scoped_access, &ctx->scoped_accesses, list) in kcsan_check_scoped_accesses()
217 ctx->scoped_accesses.prev = prev_save; in kcsan_check_scoped_accesses()
222 is_atomic(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx) in is_atomic() argument
240 if (ctx->atomic_next > 0) { in is_atomic()
251 --ctx->atomic_next; /* in task, or outer interrupt */ in is_atomic()
255 return ctx->atomic_nest_count > 0 || ctx->in_flat_atomic; in is_atomic()
259 should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx) in should_watch() argument
[all …]
/kernel/bpf/preload/iterators/
Diterators.bpf.c77 int dump_bpf_map(struct bpf_iter__bpf_map *ctx) in dump_bpf_map() argument
79 struct seq_file *seq = ctx->meta->seq; in dump_bpf_map()
80 __u64 seq_num = ctx->meta->seq_num; in dump_bpf_map()
81 struct bpf_map *map = ctx->map; in dump_bpf_map()
94 int dump_bpf_prog(struct bpf_iter__bpf_prog *ctx) in dump_bpf_prog() argument
96 struct seq_file *seq = ctx->meta->seq; in dump_bpf_prog()
97 __u64 seq_num = ctx->meta->seq_num; in dump_bpf_prog()
98 struct bpf_prog *prog = ctx->prog; in dump_bpf_prog()
/kernel/trace/
Dbpf_trace.c95 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) in trace_call_bpf() argument
127 ret = BPF_PROG_RUN_ARRAY(call->prog_array, ctx, bpf_prog_run); in trace_call_bpf()
660 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) in bpf_event_output() argument
665 .data = ctx, in bpf_event_output()
981 BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx) in BPF_CALL_1() argument
984 return ((u64 *)ctx)[-1]; in BPF_CALL_1()
1008 BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx) in BPF_CALL_1() argument
1023 BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx) in BPF_CALL_1() argument
1025 return ctx->event->bpf_cookie; in BPF_CALL_1()
1311 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx, in BPF_CALL_3() argument
[all …]

12