/kernel/cgroup/ |
D | debug.c | 46 int i, refcnt; in current_css_set_read() local 54 refcnt = refcount_read(&cset->refcount); in current_css_set_read() 55 seq_printf(seq, "css_set %pK %d", cset, refcnt); in current_css_set_read() 56 if (refcnt > cset->nr_tasks) in current_css_set_read() 57 seq_printf(seq, " +%d", refcnt - cset->nr_tasks); in current_css_set_read() 126 int refcnt = refcount_read(&cset->refcount); in cgroup_css_links_read() local 148 seq_printf(seq, " %d", refcnt); in cgroup_css_links_read() 149 if (refcnt - cset->nr_tasks > 0) { in cgroup_css_links_read() 150 int extra = refcnt - cset->nr_tasks; in cgroup_css_links_read()
|
D | cgroup.c | 2001 ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release, in cgroup_setup_root() 2082 percpu_ref_exit(&root_cgrp->self.refcnt); in cgroup_setup_root() 2214 !percpu_ref_is_dying(&root->cgrp.self.refcnt)) { in cgroup_kill_sb() 2216 percpu_ref_kill(&root->cgrp.self.refcnt); in cgroup_kill_sb() 3067 if (!css || !percpu_ref_is_dying(&css->refcnt)) in cgroup_lock_and_drain_offline() 3190 WARN_ON_ONCE(percpu_ref_is_dying(&css->refcnt)); in cgroup_apply_control_enable() 3230 WARN_ON_ONCE(percpu_ref_is_dying(&css->refcnt)); in cgroup_apply_control_disable() 5133 percpu_ref_exit(&css->refcnt); in css_free_rwork_fn() 5234 container_of(ref, struct cgroup_subsys_state, refcnt); in css_release() 5335 err = percpu_ref_init(&css->refcnt, css_release, 0, GFP_KERNEL); in css_create() [all …]
|
D | cgroup-v1.c | 1165 if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) in cgroup1_root_to_use() 1242 if (!ret && !percpu_ref_tryget_live(&ctx->root->cgrp.self.refcnt)) in cgroup1_get_tree() 1250 if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) { in cgroup1_get_tree()
|
/kernel/bpf/ |
D | bpf_struct_ops.c | 22 refcount_t refcnt; \ 266 refcount_set(&uvalue->refcnt, refcount_read(&kvalue->refcnt)); in bpf_struct_ops_map_sys_lookup_elem() 344 if (uvalue->state || refcount_read(&uvalue->refcnt)) in bpf_struct_ops_map_update_elem() 451 refcount_set(&kvalue->refcnt, 1); in bpf_struct_ops_map_update_elem() 500 if (refcount_dec_and_test(&st_map->kvalue.refcnt)) in bpf_struct_ops_map_delete_elem() 643 return refcount_inc_not_zero(&kvalue->refcnt); in bpf_struct_ops_get() 651 if (refcount_dec_and_test(&kvalue->refcnt)) { in bpf_struct_ops_put()
|
D | cgroup.c | 28 percpu_ref_kill(&cgrp->bpf.refcnt); in cgroup_bpf_offline() 149 percpu_ref_exit(&cgrp->bpf.refcnt); in cgroup_bpf_release() 160 struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt); in cgroup_bpf_release_fn() 297 ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0, in cgroup_bpf_inherit() 325 percpu_ref_exit(&cgrp->bpf.refcnt); in cgroup_bpf_inherit() 340 if (percpu_ref_is_zero(&desc->bpf.refcnt)) in update_effective_progs() 352 if (percpu_ref_is_zero(&desc->bpf.refcnt)) { in update_effective_progs() 537 if (percpu_ref_is_zero(&desc->bpf.refcnt)) in replace_effective_prog() 681 if (percpu_ref_is_zero(&desc->bpf.refcnt)) in purge_effective_progs()
|
D | trampoline.c | 72 refcount_inc(&tr->refcnt); in bpf_trampoline_lookup() 83 refcount_set(&tr->refcnt, 1); in bpf_trampoline_lookup() 481 if (!refcount_dec_and_test(&tr->refcnt)) in bpf_trampoline_put()
|
D | cpumap.c | 68 atomic_t refcnt; /* Control when this struct can be free'ed */ member 139 atomic_inc(&rcpu->refcnt); in get_cpu_map_entry() 217 if (atomic_dec_and_test(&rcpu->refcnt)) { in put_cpu_map_entry()
|
D | syscall.c | 503 if (atomic64_dec_and_test(&map->refcnt)) { in __bpf_map_put() 848 atomic64_set(&map->refcnt, 1); in map_create() 932 atomic64_inc(&map->refcnt); in bpf_map_inc() 938 atomic64_inc(&map->refcnt); in bpf_map_inc_with_uref() 978 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0); in __bpf_map_inc_not_zero() 1777 if (atomic64_dec_and_test(&prog->aux->refcnt)) { in __bpf_prog_put() 1886 atomic64_add(i, &prog->aux->refcnt); in bpf_prog_add() 1897 WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0); in bpf_prog_sub() 1903 atomic64_inc(&prog->aux->refcnt); in bpf_prog_inc() 1912 refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0); in bpf_prog_inc_not_zero() [all …]
|
D | btf.c | 210 refcount_t refcnt; member 1484 if (btf && refcount_dec_and_test(&btf->refcnt)) { in btf_put() 4187 refcount_set(&btf->refcnt, 1); in btf_parse() 4402 refcount_set(&btf->refcnt, 1); in BTF_ID() 5498 refcount_inc(&btf->refcnt); in btf_get_by_fd() 5543 if (!btf || !refcount_inc_not_zero(&btf->refcnt)) in btf_get_fd_by_id()
|
/kernel/rcu/ |
D | refscale.c | 250 static atomic_t refcnt; variable 257 atomic_inc(&refcnt); in ref_refcnt_section() 258 atomic_dec(&refcnt); in ref_refcnt_section() 267 atomic_inc(&refcnt); in ref_refcnt_delay_section() 269 atomic_dec(&refcnt); in ref_refcnt_delay_section()
|
/kernel/ |
D | padata.c | 214 refcount_inc(&pd->refcnt); in padata_do_parallel() 388 if (refcount_sub_and_test(cnt, &pd->refcnt)) in padata_serial_worker() 601 refcount_set(&pd->refcnt, 1); in padata_alloc_pd() 675 if (refcount_dec_and_test(&ps->opd->refcnt)) in padata_replace() 1118 if (refcount_dec_and_test(&pd->refcnt)) in padata_free_shell()
|
D | workqueue.c | 182 int refcnt; /* PL: refcnt for unbound pools */ member 209 int refcnt; /* L: reference count */ member 1110 WARN_ON_ONCE(pwq->refcnt <= 0); in get_pwq() 1111 pwq->refcnt++; in get_pwq() 1124 if (likely(--pwq->refcnt)) in put_pwq() 1485 if (unlikely(!pwq->refcnt)) { in __queue_work() 3473 pool->refcnt = 1; in init_worker_pool() 3574 if (--pool->refcnt) in put_unbound_pool() 3645 pool->refcnt++; in get_unbound_pool() 3807 pwq->refcnt = 1; in init_pwq() [all …]
|
D | module.c | 834 atomic_set(&mod->refcnt, MODULE_REF_BASE); in module_unload_init() 840 atomic_inc(&mod->refcnt); in module_unload_init() 942 ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt); in try_release_module_ref() 946 ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0); in try_release_module_ref() 977 return atomic_read(&mod->refcnt) - MODULE_REF_BASE; in module_refcount() 1126 __ATTR(refcnt, 0444, show_refcnt, NULL); 1132 atomic_inc(&module->refcnt); in __module_get() 1147 atomic_inc_not_zero(&module->refcnt) != 0)) in try_module_get() 1164 ret = atomic_dec_if_positive(&module->refcnt); in module_put()
|
D | auditsc.c | 1942 n->name->refcnt++; in __audit_reusename() 1985 name->refcnt++; in __audit_getname() 2119 name->refcnt++; in __audit_inode() 2246 found_child->name->refcnt++; in __audit_inode_child()
|
D | audit_tree.c | 1065 BUG_ON(refcount_read(&mark->refcnt) < 1); in audit_tree_freeing_mark()
|