/kernel/ |
D | auditsc.c | 148 static int audit_match_perm(struct audit_context *ctx, int mask) in audit_match_perm() argument 152 if (unlikely(!ctx)) in audit_match_perm() 154 n = ctx->major; in audit_match_perm() 156 switch (audit_classify_syscall(ctx->arch, n)) { in audit_match_perm() 180 return mask & ACC_MODE(ctx->argv[1]); in audit_match_perm() 182 return mask & ACC_MODE(ctx->argv[2]); in audit_match_perm() 184 return ((mask & AUDIT_PERM_WRITE) && ctx->argv[0] == SYS_BIND); in audit_match_perm() 188 return mask & ACC_MODE((u32)ctx->openat2.flags); in audit_match_perm() 194 static int audit_match_filetype(struct audit_context *ctx, int val) in audit_match_filetype() argument 199 if (unlikely(!ctx)) in audit_match_filetype() [all …]
|
D | audit.c | 201 struct audit_context *ctx; /* NULL or associated context */ member 1225 char *ctx = NULL; in audit_receive_msg() local 1478 err = security_secid_to_secctx(audit_sig_sid, &ctx, &len); in audit_receive_msg() 1482 sig_data = kmalloc(struct_size(sig_data, ctx, len), GFP_KERNEL); in audit_receive_msg() 1485 security_release_secctx(ctx, len); in audit_receive_msg() 1491 memcpy(sig_data->ctx, ctx, len); in audit_receive_msg() 1492 security_release_secctx(ctx, len); in audit_receive_msg() 1495 sig_data, struct_size(sig_data, ctx, len)); in audit_receive_msg() 1788 static struct audit_buffer *audit_buffer_alloc(struct audit_context *ctx, in audit_buffer_alloc() argument 1803 ab->ctx = ctx; in audit_buffer_alloc() [all …]
|
D | workqueue.c | 3947 static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx) in apply_wqattrs_cleanup() argument 3949 if (ctx) { in apply_wqattrs_cleanup() 3953 put_pwq_unlocked(ctx->pwq_tbl[node]); in apply_wqattrs_cleanup() 3954 put_pwq_unlocked(ctx->dfl_pwq); in apply_wqattrs_cleanup() 3956 free_workqueue_attrs(ctx->attrs); in apply_wqattrs_cleanup() 3958 kfree(ctx); in apply_wqattrs_cleanup() 3968 struct apply_wqattrs_ctx *ctx; in apply_wqattrs_prepare() local 3974 ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL); in apply_wqattrs_prepare() 3978 if (!ctx || !new_attrs || !tmp_attrs) in apply_wqattrs_prepare() 4004 ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs); in apply_wqattrs_prepare() [all …]
|
D | kallsyms.c | 880 struct bpf_iter__ksym ctx; in ksym_prog_seq_show() local 889 ctx.meta = &meta; in ksym_prog_seq_show() 890 ctx.ksym = m ? m->private : NULL; in ksym_prog_seq_show() 891 return bpf_iter_run_prog(prog, &ctx); in ksym_prog_seq_show()
|
/kernel/events/ |
D | core.c | 159 __get_cpu_context(struct perf_event_context *ctx) in __get_cpu_context() argument 161 return this_cpu_ptr(ctx->pmu->pmu_cpu_context); in __get_cpu_context() 165 struct perf_event_context *ctx) in perf_ctx_lock() argument 167 raw_spin_lock(&cpuctx->ctx.lock); in perf_ctx_lock() 168 if (ctx) in perf_ctx_lock() 169 raw_spin_lock(&ctx->lock); in perf_ctx_lock() 173 struct perf_event_context *ctx) in perf_ctx_unlock() argument 175 if (ctx) in perf_ctx_unlock() 176 raw_spin_unlock(&ctx->lock); in perf_ctx_unlock() 177 raw_spin_unlock(&cpuctx->ctx.lock); in perf_ctx_unlock() [all …]
|
D | callchain.c | 184 struct perf_callchain_entry_ctx ctx; in get_perf_callchain() local 191 ctx.entry = entry; in get_perf_callchain() 192 ctx.max_stack = max_stack; in get_perf_callchain() 193 ctx.nr = entry->nr = init_nr; in get_perf_callchain() 194 ctx.contexts = 0; in get_perf_callchain() 195 ctx.contexts_maxed = false; in get_perf_callchain() 199 perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL); in get_perf_callchain() 200 perf_callchain_kernel(&ctx, regs); in get_perf_callchain() 216 perf_callchain_store_context(&ctx, PERF_CONTEXT_USER); in get_perf_callchain() 218 perf_callchain_user(&ctx, regs); in get_perf_callchain()
|
/kernel/cgroup/ |
D | cgroup-v1.c | 397 struct cgroup_file_ctx *ctx = of->priv; in cgroup_pidlist_start() local 412 if (ctx->procs1.pidlist) in cgroup_pidlist_start() 413 ctx->procs1.pidlist = cgroup_pidlist_find(cgrp, type); in cgroup_pidlist_start() 419 if (!ctx->procs1.pidlist) { in cgroup_pidlist_start() 420 ret = pidlist_array_load(cgrp, type, &ctx->procs1.pidlist); in cgroup_pidlist_start() 424 l = ctx->procs1.pidlist; in cgroup_pidlist_start() 452 struct cgroup_file_ctx *ctx = of->priv; in cgroup_pidlist_stop() local 453 struct cgroup_pidlist *l = ctx->procs1.pidlist; in cgroup_pidlist_stop() 464 struct cgroup_file_ctx *ctx = of->priv; in cgroup_pidlist_next() local 465 struct cgroup_pidlist *l = ctx->procs1.pidlist; in cgroup_pidlist_next() [all …]
|
D | cgroup.c | 1945 struct cgroup_fs_context *ctx = cgroup_fc2context(fc); in cgroup2_parse_param() local 1955 ctx->flags |= CGRP_ROOT_NS_DELEGATE; in cgroup2_parse_param() 1958 ctx->flags |= CGRP_ROOT_FAVOR_DYNMODS; in cgroup2_parse_param() 1961 ctx->flags |= CGRP_ROOT_MEMORY_LOCAL_EVENTS; in cgroup2_parse_param() 1964 ctx->flags |= CGRP_ROOT_MEMORY_RECURSIVE_PROT; in cgroup2_parse_param() 2008 struct cgroup_fs_context *ctx = cgroup_fc2context(fc); in cgroup_reconfigure() local 2010 apply_cgroup_root_flags(ctx->flags); in cgroup_reconfigure() 2039 void init_cgroup_root(struct cgroup_fs_context *ctx) in init_cgroup_root() argument 2041 struct cgroup_root *root = ctx->root; in init_cgroup_root() 2050 root->flags = ctx->flags & ~CGRP_ROOT_FAVOR_DYNMODS; in init_cgroup_root() [all …]
|
D | cgroup-internal.h | 236 void init_cgroup_root(struct cgroup_fs_context *ctx); 297 int cgroup1_reconfigure(struct fs_context *ctx);
|
/kernel/locking/ |
D | test-ww_mutex.c | 61 struct ww_acquire_ctx ctx; in __test_mutex() local 65 ww_acquire_init(&ctx, &ww_class); in __test_mutex() 76 ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL); in __test_mutex() 93 ww_acquire_fini(&ctx); in __test_mutex() 124 struct ww_acquire_ctx ctx; in test_aa() local 129 ww_acquire_init(&ctx, &ww_class); in test_aa() 132 ret = ww_mutex_lock(&mutex, &ctx); in test_aa() 138 ret = !ww_mutex_trylock(&mutex, &ctx); in test_aa() 152 if (ww_mutex_trylock(&mutex, &ctx)) { in test_aa() 159 ret = ww_mutex_lock(&mutex, &ctx); in test_aa() [all …]
|
D | ww_mutex.h | 190 DEBUG_LOCKS_WARN_ON(ww->ctx); in ww_mutex_lock_acquired() 218 ww->ctx = ww_ctx; in ww_mutex_lock_acquired() 378 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in ww_mutex_set_context_fastpath() argument 380 ww_mutex_lock_acquired(lock, ctx); in ww_mutex_set_context_fastpath() 408 __ww_mutex_check_waiters(&lock->base, ctx); in ww_mutex_set_context_fastpath() 442 struct ww_acquire_ctx *ctx) in __ww_mutex_check_kill() argument 445 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx); in __ww_mutex_check_kill() 448 if (ctx->acquired == 0) in __ww_mutex_check_kill() 451 if (!ctx->is_wait_die) { in __ww_mutex_check_kill() 452 if (ctx->wounded) in __ww_mutex_check_kill() [all …]
|
D | mutex.c | 331 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx)) in ww_mutex_spin_on_owner() 608 if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) in __mutex_lock_common() 867 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in ww_mutex_deadlock_injection() argument 872 if (ctx->deadlock_inject_countdown-- == 0) { in ww_mutex_deadlock_injection() 873 tmp = ctx->deadlock_inject_interval; in ww_mutex_deadlock_injection() 879 ctx->deadlock_inject_interval = tmp; in ww_mutex_deadlock_injection() 880 ctx->deadlock_inject_countdown = tmp; in ww_mutex_deadlock_injection() 881 ctx->contending_lock = lock; in ww_mutex_deadlock_injection() 893 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in ww_mutex_lock() argument 899 0, _RET_IP_, ctx); in ww_mutex_lock() [all …]
|
D | ww_rt_mutex.c | 48 if (unlikely(ww_ctx == READ_ONCE(lock->ctx))) in __ww_rt_mutex_lock() 79 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in ww_mutex_lock() argument 81 return __ww_rt_mutex_lock(lock, ctx, TASK_UNINTERRUPTIBLE, _RET_IP_); in ww_mutex_lock() 86 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in ww_mutex_lock_interruptible() argument 88 return __ww_rt_mutex_lock(lock, ctx, TASK_INTERRUPTIBLE, _RET_IP_); in ww_mutex_lock_interruptible()
|
D | locktorture.c | 407 struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid]; in torture_ww_mutex_lock() local 418 ww_acquire_init(ctx, &torture_ww_class); in torture_ww_mutex_lock() 423 err = ww_mutex_lock(ll->lock, ctx); in torture_ww_mutex_lock() 434 ww_mutex_lock_slow(ll->lock, ctx); in torture_ww_mutex_lock() 446 struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid]; in torture_ww_mutex_unlock() local 451 ww_acquire_fini(ctx); in torture_ww_mutex_unlock()
|
/kernel/bpf/ |
D | cgroup.c | 33 const void *ctx, bpf_prog_run_fn run_prog, in bpf_prog_run_array_cg() argument 51 func_ret = run_prog(prog, ctx); in bpf_prog_run_array_cg() 66 unsigned int __cgroup_bpf_run_lsm_sock(const void *ctx, in __cgroup_bpf_run_lsm_sock() argument 75 args = (u64 *)ctx; in __cgroup_bpf_run_lsm_sock() 84 ctx, bpf_prog_run, 0, NULL); in __cgroup_bpf_run_lsm_sock() 88 unsigned int __cgroup_bpf_run_lsm_socket(const void *ctx, in __cgroup_bpf_run_lsm_socket() argument 97 args = (u64 *)ctx; in __cgroup_bpf_run_lsm_socket() 106 ctx, bpf_prog_run, 0, NULL); in __cgroup_bpf_run_lsm_socket() 110 unsigned int __cgroup_bpf_run_lsm_current(const void *ctx, in __cgroup_bpf_run_lsm_current() argument 125 ctx, bpf_prog_run, 0, NULL); in __cgroup_bpf_run_lsm_current() [all …]
|
D | task_iter.c | 200 struct bpf_iter__task ctx; in DEFINE_BPF_ITER_FUNC() local 208 ctx.meta = &meta; in DEFINE_BPF_ITER_FUNC() 209 ctx.task = task; in DEFINE_BPF_ITER_FUNC() 210 return bpf_iter_run_prog(prog, &ctx); in DEFINE_BPF_ITER_FUNC() 376 struct bpf_iter__task_file ctx; in DEFINE_BPF_ITER_FUNC() local 385 ctx.meta = &meta; in DEFINE_BPF_ITER_FUNC() 386 ctx.task = info->task; in DEFINE_BPF_ITER_FUNC() 387 ctx.fd = info->fd; in DEFINE_BPF_ITER_FUNC() 388 ctx.file = file; in DEFINE_BPF_ITER_FUNC() 389 return bpf_iter_run_prog(prog, &ctx); in DEFINE_BPF_ITER_FUNC() [all …]
|
D | link_iter.c | 46 struct bpf_iter__bpf_link ctx; in DEFINE_BPF_ITER_FUNC() local 51 ctx.meta = &meta; in DEFINE_BPF_ITER_FUNC() 52 ctx.link = v; in DEFINE_BPF_ITER_FUNC() 56 ret = bpf_iter_run_prog(prog, &ctx); in DEFINE_BPF_ITER_FUNC()
|
D | prog_iter.c | 46 struct bpf_iter__bpf_prog ctx; in DEFINE_BPF_ITER_FUNC() local 51 ctx.meta = &meta; in DEFINE_BPF_ITER_FUNC() 52 ctx.prog = v; in DEFINE_BPF_ITER_FUNC() 56 ret = bpf_iter_run_prog(prog, &ctx); in DEFINE_BPF_ITER_FUNC()
|
D | map_iter.c | 46 struct bpf_iter__bpf_map ctx; in DEFINE_BPF_ITER_FUNC() local 51 ctx.meta = &meta; in DEFINE_BPF_ITER_FUNC() 52 ctx.map = v; in DEFINE_BPF_ITER_FUNC() 56 ret = bpf_iter_run_prog(prog, &ctx); in DEFINE_BPF_ITER_FUNC()
|
D | cgroup_iter.c | 124 struct bpf_iter__cgroup ctx; in __cgroup_iter_seq_show() local 133 ctx.meta = &meta; in __cgroup_iter_seq_show() 134 ctx.cgroup = css ? css->cgroup : NULL; in __cgroup_iter_seq_show() 138 ret = bpf_iter_run_prog(prog, &ctx); in __cgroup_iter_seq_show()
|
D | stackmap.c | 331 BPF_CALL_3(bpf_get_stackid_pe, struct bpf_perf_event_data_kern *, ctx, in BPF_CALL_3() argument 334 struct perf_event *event = ctx->event; in BPF_CALL_3() 342 return bpf_get_stackid((unsigned long)(ctx->regs), in BPF_CALL_3() 352 trace = ctx->data->callchain; in BPF_CALL_3() 508 BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx, in BPF_CALL_4() argument 511 struct pt_regs *regs = (struct pt_regs *)(ctx->regs); in BPF_CALL_4() 512 struct perf_event *event = ctx->event; in BPF_CALL_4() 529 trace = ctx->data->callchain; in BPF_CALL_4()
|
/kernel/kcsan/ |
D | core.c | 215 struct kcsan_ctx *ctx = get_ctx(); in kcsan_check_scoped_accesses() local 218 if (ctx->disable_scoped) in kcsan_check_scoped_accesses() 221 ctx->disable_scoped++; in kcsan_check_scoped_accesses() 222 list_for_each_entry(scoped_access, &ctx->scoped_accesses, list) { in kcsan_check_scoped_accesses() 226 ctx->disable_scoped--; in kcsan_check_scoped_accesses() 231 is_atomic(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type) in is_atomic() argument 249 if (ctx->atomic_next > 0) { in is_atomic() 260 --ctx->atomic_next; /* in task, or outer interrupt */ in is_atomic() 264 return ctx->atomic_nest_count > 0 || ctx->in_flat_atomic; in is_atomic() 268 should_watch(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type) in should_watch() argument [all …]
|
/kernel/bpf/preload/iterators/ |
D | iterators.bpf.c | 77 int dump_bpf_map(struct bpf_iter__bpf_map *ctx) in dump_bpf_map() argument 79 struct seq_file *seq = ctx->meta->seq; in dump_bpf_map() 80 __u64 seq_num = ctx->meta->seq_num; in dump_bpf_map() 81 struct bpf_map *map = ctx->map; in dump_bpf_map() 94 int dump_bpf_prog(struct bpf_iter__bpf_prog *ctx) in dump_bpf_prog() argument 96 struct seq_file *seq = ctx->meta->seq; in dump_bpf_prog() 97 __u64 seq_num = ctx->meta->seq_num; in dump_bpf_prog() 98 struct bpf_prog *prog = ctx->prog; in dump_bpf_prog()
|
D | iterators.lskel.h | 9 struct bpf_loader_ctx ctx; member 83 skel->ctx.sz = (void *)&skel->links - (void *)skel; in iterators_bpf__open() 105 opts.ctx = (struct bpf_loader_ctx *)skel; in iterators_bpf__load()
|
/kernel/trace/ |
D | bpf_trace.c | 85 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx); 86 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx); 102 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) in trace_call_bpf() argument 136 ctx, bpf_prog_run); in trace_call_bpf() 719 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) in bpf_event_output() argument 724 .data = ctx, in bpf_event_output() 1039 BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx) in BPF_CALL_1() argument 1042 return ((u64 *)ctx)[-2]; in BPF_CALL_1() 1109 BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx) in BPF_CALL_1() argument 1124 BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx) in BPF_CALL_1() argument [all …]
|