/kernel/cgroup/ |
D | debug.c | 46 int i, refcnt; in current_css_set_read() local 54 refcnt = refcount_read(&cset->refcount); in current_css_set_read() 55 seq_printf(seq, "css_set %pK %d", cset, refcnt); in current_css_set_read() 56 if (refcnt > cset->nr_tasks) in current_css_set_read() 57 seq_printf(seq, " +%d", refcnt - cset->nr_tasks); in current_css_set_read() 126 int refcnt = refcount_read(&cset->refcount); in cgroup_css_links_read() local 148 seq_printf(seq, " %d", refcnt); in cgroup_css_links_read() 149 if (refcnt - cset->nr_tasks > 0) { in cgroup_css_links_read() 150 int extra = refcnt - cset->nr_tasks; in cgroup_css_links_read()
|
D | cgroup.c | 2069 ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release, in cgroup_setup_root() 2156 percpu_ref_exit(&root_cgrp->self.refcnt); in cgroup_setup_root() 2291 !percpu_ref_is_dying(&root->cgrp.self.refcnt)) { in cgroup_kill_sb() 2293 percpu_ref_kill(&root->cgrp.self.refcnt); in cgroup_kill_sb() 3144 if (!css || !percpu_ref_is_dying(&css->refcnt)) in cgroup_lock_and_drain_offline() 3267 WARN_ON_ONCE(percpu_ref_is_dying(&css->refcnt)); in cgroup_apply_control_enable() 3307 WARN_ON_ONCE(percpu_ref_is_dying(&css->refcnt)); in cgroup_apply_control_disable() 5374 percpu_ref_exit(&css->refcnt); in css_free_rwork_fn() 5473 container_of(ref, struct cgroup_subsys_state, refcnt); in css_release() 5574 err = percpu_ref_init(&css->refcnt, css_release, 0, GFP_KERNEL); in css_create() [all …]
|
D | cgroup-v1.c | 1168 if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) in cgroup1_root_to_use() 1248 if (!ret && !percpu_ref_tryget_live(&ctx->root->cgrp.self.refcnt)) in cgroup1_get_tree() 1256 if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) { in cgroup1_get_tree()
|
/kernel/trace/ |
D | trace_dynevent.c | 33 atomic_inc(&dyn_call->refcnt); in trace_event_dyn_try_get_ref() 46 if (WARN_ON_ONCE(atomic_read(&call->refcnt) <= 0)) { in trace_event_dyn_put_ref() 47 atomic_set(&call->refcnt, 0); in trace_event_dyn_put_ref() 51 atomic_dec(&call->refcnt); in trace_event_dyn_put_ref() 56 return atomic_read(&call->refcnt) != 0; in trace_event_dyn_busy()
|
D | trace_events_user.c | 113 refcount_t refcnt; member 293 return refcount_read(&user->refcnt) == 1; in user_event_last_ref() 858 refcount_inc(&user->refcnt); in find_user_event() 1077 refcount_inc(&user->refcnt); in user_event_reg() 1082 refcount_dec(&user->refcnt); in user_event_reg() 1116 refcount_dec(&user->refcnt); in user_event_create() 1349 refcount_set(&user->refcnt, 2); in user_event_parse() 1381 refcount_dec(&user->refcnt); in delete_user_event() 1543 refcount_inc(&user->refcnt); in user_events_ref_add() 1614 refcount_dec(&user->refcnt); in user_events_ioctl_reg() [all …]
|
D | trace_events.c | 2567 atomic_set(&call->refcnt, 0); in __register_event()
|
/kernel/bpf/ |
D | bpf_struct_ops.c | 22 refcount_t refcnt; \ 270 refcount_set(&uvalue->refcnt, refcount_read(&kvalue->refcnt)); in bpf_struct_ops_map_sys_lookup_elem() 381 if (uvalue->state || refcount_read(&uvalue->refcnt)) in bpf_struct_ops_map_update_elem() 494 refcount_set(&kvalue->refcnt, 1); in bpf_struct_ops_map_update_elem() 540 if (refcount_dec_and_test(&st_map->kvalue.refcnt)) in bpf_struct_ops_map_delete_elem() 667 return refcount_inc_not_zero(&kvalue->refcnt); in bpf_struct_ops_get() 683 if (refcount_dec_and_test(&kvalue->refcnt)) { in bpf_struct_ops_put()
|
D | cgroup.c | 132 int refcnt; member 169 cgroup_lsm_atype[i].refcnt++; in bpf_cgroup_atype_get() 177 if (--cgroup_lsm_atype[i].refcnt <= 0) in bpf_cgroup_atype_put() 179 WARN_ON_ONCE(cgroup_lsm_atype[i].refcnt < 0); in bpf_cgroup_atype_put() 195 percpu_ref_kill(&cgrp->bpf.refcnt); in cgroup_bpf_offline() 323 percpu_ref_exit(&cgrp->bpf.refcnt); in cgroup_bpf_release() 334 struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt); in cgroup_bpf_release_fn() 471 ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0, in cgroup_bpf_inherit() 499 percpu_ref_exit(&cgrp->bpf.refcnt); in cgroup_bpf_inherit() 514 if (percpu_ref_is_zero(&desc->bpf.refcnt)) in update_effective_progs() [all …]
|
D | cpumap.c | 71 atomic_t refcnt; /* Control when this struct can be free'ed */ member 129 atomic_inc(&rcpu->refcnt); in get_cpu_map_entry() 154 if (atomic_dec_and_test(&rcpu->refcnt)) { in put_cpu_map_entry()
|
D | syscall.c | 638 if (atomic64_dec_and_test(&map->refcnt)) { in __bpf_map_put() 1117 atomic64_set(&map->refcnt, 1); in map_create() 1214 atomic64_inc(&map->refcnt); in bpf_map_inc() 1220 atomic64_inc(&map->refcnt); in bpf_map_inc_with_uref() 1261 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0); in __bpf_map_inc_not_zero() 2072 if (atomic64_dec_and_test(&aux->refcnt)) { in __bpf_prog_put() 2206 atomic64_add(i, &prog->aux->refcnt); in bpf_prog_add() 2217 WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0); in bpf_prog_sub() 2223 atomic64_inc(&prog->aux->refcnt); in bpf_prog_inc() 2232 refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0); in bpf_prog_inc_not_zero() [all …]
|
D | trampoline.c | 145 refcount_inc(&tr->refcnt); in bpf_trampoline_lookup() 166 refcount_set(&tr->refcnt, 1); in bpf_trampoline_lookup() 830 if (!refcount_dec_and_test(&tr->refcnt)) in bpf_trampoline_put()
|
D | btf.c | 236 refcount_t refcnt; member 1675 refcount_inc(&btf->refcnt); in btf_get() 1680 if (btf && refcount_dec_and_test(&btf->refcnt)) { in btf_put() 5061 refcount_set(&btf->refcnt, 1); in btf_parse() 5239 refcount_set(&btf->refcnt, 1); in BTF_ID() 5319 refcount_set(&btf->refcnt, 1); in btf_parse_module() 6960 refcount_inc(&btf->refcnt); in btf_get_by_fd() 7033 if (!btf || !refcount_inc_not_zero(&btf->refcnt)) in btf_get_fd_by_id()
|
/kernel/sched/ |
D | core_sched.c | 8 refcount_t refcnt; member 17 refcount_set(&ck->refcnt, 1); in sched_core_alloc_cookie() 27 if (ptr && refcount_dec_and_test(&ptr->refcnt)) { in sched_core_put_cookie() 38 refcount_inc(&ptr->refcnt); in sched_core_get_cookie()
|
/kernel/rcu/ |
D | refscale.c | 285 static atomic_t refcnt; variable 292 atomic_inc(&refcnt); in ref_refcnt_section() 293 atomic_dec(&refcnt); in ref_refcnt_section() 302 atomic_inc(&refcnt); in ref_refcnt_delay_section() 304 atomic_dec(&refcnt); in ref_refcnt_delay_section()
|
/kernel/ |
D | padata.c | 201 refcount_inc(&pd->refcnt); in padata_do_parallel() 375 if (refcount_sub_and_test(cnt, &pd->refcnt)) in padata_serial_worker() 588 refcount_set(&pd->refcnt, 1); in padata_alloc_pd() 662 if (refcount_dec_and_test(&ps->opd->refcnt)) in padata_replace() 1105 if (refcount_dec_and_test(&pd->refcnt)) in padata_free_shell()
|
D | workqueue.c | 190 int refcnt; /* PL: refcnt for unbound pools */ member 210 int refcnt; /* L: reference count */ member 1123 WARN_ON_ONCE(pwq->refcnt <= 0); in get_pwq() 1124 pwq->refcnt++; in get_pwq() 1137 if (likely(--pwq->refcnt)) in put_pwq() 1495 if (unlikely(!pwq->refcnt)) { in __queue_work() 3482 pool->refcnt = 1; in init_worker_pool() 3583 if (--pool->refcnt) in put_unbound_pool() 3654 pool->refcnt++; in get_unbound_pool() 3816 pwq->refcnt = 1; in init_pwq() [all …]
|
D | auditsc.c | 2211 n->name->refcnt++; in __audit_reusename() 2240 name->refcnt++; in __audit_getname() 2372 name->refcnt++; in __audit_inode() 2499 found_child->name->refcnt++; in __audit_inode_child()
|
D | audit_tree.c | 1062 BUG_ON(refcount_read(&mark->refcnt) < 1); in audit_tree_freeing_mark()
|
/kernel/module/ |
D | main.c | 551 atomic_set(&mod->refcnt, MODULE_REF_BASE); in module_unload_init() 557 atomic_inc(&mod->refcnt); in module_unload_init() 659 ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt); in try_release_module_ref() 663 ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0); in try_release_module_ref() 693 return atomic_read(&mod->refcnt) - MODULE_REF_BASE; in module_refcount() 820 __ATTR(refcnt, 0444, show_refcnt, NULL); 826 atomic_inc(&module->refcnt); in __module_get() 841 atomic_inc_not_zero(&module->refcnt) != 0)) in try_module_get() 858 ret = atomic_dec_if_positive(&module->refcnt); in module_put()
|