/kernel/cgroup/ |
D | freezer.c | 15 static void cgroup_propagate_frozen(struct cgroup *cgrp, bool frozen) in cgroup_propagate_frozen() argument 25 while ((cgrp = cgroup_parent(cgrp))) { in cgroup_propagate_frozen() 27 cgrp->freezer.nr_frozen_descendants += desc; in cgroup_propagate_frozen() 28 if (!test_bit(CGRP_FROZEN, &cgrp->flags) && in cgroup_propagate_frozen() 29 test_bit(CGRP_FREEZE, &cgrp->flags) && in cgroup_propagate_frozen() 30 cgrp->freezer.nr_frozen_descendants == in cgroup_propagate_frozen() 31 cgrp->nr_descendants) { in cgroup_propagate_frozen() 32 set_bit(CGRP_FROZEN, &cgrp->flags); in cgroup_propagate_frozen() 33 cgroup_file_notify(&cgrp->events_file); in cgroup_propagate_frozen() 34 TRACE_CGROUP_PATH(notify_frozen, cgrp, 1); in cgroup_propagate_frozen() [all …]
|
D | cgroup.c | 171 struct cgroup_root cgrp_dfl_root = { .cgrp.rstat_cpu = &cgrp_dfl_root_rstat_cpu }; 243 static int cgroup_apply_control(struct cgroup *cgrp); 244 static void cgroup_finalize_control(struct cgroup *cgrp, int ret); 247 static int cgroup_destroy_locked(struct cgroup *cgrp); 248 static struct cgroup_subsys_state *css_create(struct cgroup *cgrp, 253 struct cgroup *cgrp, struct cftype cfts[], 317 bool cgroup_on_dfl(const struct cgroup *cgrp) in cgroup_on_dfl() argument 319 return cgrp->root == &cgrp_dfl_root; in cgroup_on_dfl() 353 static bool cgroup_has_tasks(struct cgroup *cgrp) in cgroup_has_tasks() argument 355 return cgrp->nr_populated_csets; in cgroup_has_tasks() [all …]
|
D | rstat.c | 13 static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu); 15 static struct cgroup_rstat_cpu *cgroup_rstat_cpu(struct cgroup *cgrp, int cpu) in cgroup_rstat_cpu() argument 17 return per_cpu_ptr(cgrp->rstat_cpu, cpu); in cgroup_rstat_cpu() 29 void cgroup_rstat_updated(struct cgroup *cgrp, int cpu) in cgroup_rstat_updated() argument 42 if (data_race(cgroup_rstat_cpu(cgrp, cpu)->updated_next)) in cgroup_rstat_updated() 49 struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu); in cgroup_rstat_updated() 50 struct cgroup *parent = cgroup_parent(cgrp); in cgroup_rstat_updated() 62 rstatc->updated_next = cgrp; in cgroup_rstat_updated() 68 prstatc->updated_children = cgrp; in cgroup_rstat_updated() 70 cgrp = parent; in cgroup_rstat_updated() [all …]
|
D | cgroup-v1.c | 197 void cgroup1_pidlist_destroy_all(struct cgroup *cgrp) in cgroup1_pidlist_destroy_all() argument 201 mutex_lock(&cgrp->pidlist_mutex); in cgroup1_pidlist_destroy_all() 202 list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links) in cgroup1_pidlist_destroy_all() 204 mutex_unlock(&cgrp->pidlist_mutex); in cgroup1_pidlist_destroy_all() 207 BUG_ON(!list_empty(&cgrp->pidlists)); in cgroup1_pidlist_destroy_all() 278 static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp, in cgroup_pidlist_find() argument 285 lockdep_assert_held(&cgrp->pidlist_mutex); in cgroup_pidlist_find() 287 list_for_each_entry(l, &cgrp->pidlists, links) in cgroup_pidlist_find() 299 static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp, in cgroup_pidlist_find_create() argument 304 lockdep_assert_held(&cgrp->pidlist_mutex); in cgroup_pidlist_find_create() [all …]
|
D | cgroup-internal.h | 27 #define TRACE_CGROUP_PATH(type, cgrp, ...) \ argument 33 cgroup_path(cgrp, trace_cgroup_path, \ 35 trace_cgroup_##type(cgrp, trace_cgroup_path, \ 96 struct cgroup *cgrp; member 184 static inline bool cgroup_is_dead(const struct cgroup *cgrp) in cgroup_is_dead() argument 186 return !(cgrp->self.flags & CSS_ONLINE); in cgroup_is_dead() 189 static inline bool notify_on_release(const struct cgroup *cgrp) in notify_on_release() argument 191 return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); in notify_on_release() 222 bool cgroup_on_dfl(const struct cgroup *cgrp); 223 bool cgroup_is_thread_root(struct cgroup *cgrp); [all …]
|
D | debug.c | 101 struct cgroup *c = link->cgrp; in current_css_set_cg_links_read() 206 struct cgroup *cgrp; in cgroup_subsys_states_read() local 212 cgrp = cgroup_kn_lock_live(of->kn, false); in cgroup_subsys_states_read() 213 if (!cgrp) in cgroup_subsys_states_read() 217 css = rcu_dereference_check(cgrp->subsys[ss->id], true); in cgroup_subsys_states_read() 258 struct cgroup *cgrp; in cgroup_masks_read() local 260 cgrp = cgroup_kn_lock_live(of->kn, false); in cgroup_masks_read() 261 if (!cgrp) in cgroup_masks_read() 264 cgroup_masks_read_one(seq, "subtree_control", cgrp->subtree_control); in cgroup_masks_read() 265 cgroup_masks_read_one(seq, "subtree_ss_mask", cgrp->subtree_ss_mask); in cgroup_masks_read()
|
D | cpuset.c | 4179 struct cgroup *cgrp; in cpuset_print_current_mems_allowed() local 4183 cgrp = task_cs(current)->css.cgroup; in cpuset_print_current_mems_allowed() 4185 pr_cont_cgroup_name(cgrp); in cpuset_print_current_mems_allowed()
|
/kernel/bpf/ |
D | cgroup.c | 31 bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp, in bpf_prog_run_array_cg() argument 46 array = rcu_dereference(cgrp->effective[atype]); in bpf_prog_run_array_cg() 71 struct cgroup *cgrp; in __cgroup_bpf_run_lsm_sock() local 80 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); in __cgroup_bpf_run_lsm_sock() 81 if (likely(cgrp)) in __cgroup_bpf_run_lsm_sock() 82 ret = bpf_prog_run_array_cg(&cgrp->bpf, in __cgroup_bpf_run_lsm_sock() 93 struct cgroup *cgrp; in __cgroup_bpf_run_lsm_socket() local 102 cgrp = sock_cgroup_ptr(&sock->sk->sk_cgrp_data); in __cgroup_bpf_run_lsm_socket() 103 if (likely(cgrp)) in __cgroup_bpf_run_lsm_socket() 104 ret = bpf_prog_run_array_cg(&cgrp->bpf, in __cgroup_bpf_run_lsm_socket() [all …]
|
D | cgroup_iter.c | 165 struct cgroup *cgrp = aux->cgroup.start; in BTF_ID_LIST_SINGLE() local 172 p->start_css = &cgrp->self; in BTF_ID_LIST_SINGLE() 201 struct cgroup *cgrp; in bpf_iter_attach_cgroup() local 213 cgrp = cgroup_v1v2_get_from_fd(fd); in bpf_iter_attach_cgroup() 215 cgrp = cgroup_get_from_id(id); in bpf_iter_attach_cgroup() 217 cgrp = cgroup_get_from_path("/"); in bpf_iter_attach_cgroup() 219 if (IS_ERR(cgrp)) in bpf_iter_attach_cgroup() 220 return PTR_ERR(cgrp); in bpf_iter_attach_cgroup() 222 aux->cgroup.start = cgrp; in bpf_iter_attach_cgroup()
|
D | helpers.c | 396 struct cgroup *cgrp; in BPF_CALL_0() local 400 cgrp = task_dfl_cgroup(current); in BPF_CALL_0() 401 cgrp_id = cgroup_id(cgrp); in BPF_CALL_0() 415 struct cgroup *cgrp; in BPF_CALL_1() local 420 cgrp = task_dfl_cgroup(current); in BPF_CALL_1() 421 ancestor = cgroup_ancestor(cgrp, ancestor_level); in BPF_CALL_1()
|
/kernel/events/ |
D | core.c | 703 if (!event->cgrp) in perf_cgroup_match() 707 if (!cpuctx->cgrp) in perf_cgroup_match() 716 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup, in perf_cgroup_match() 717 event->cgrp->css.cgroup); in perf_cgroup_match() 722 css_put(&event->cgrp->css); in perf_detach_cgroup() 723 event->cgrp = NULL; in perf_detach_cgroup() 728 return event->cgrp != NULL; in is_cgroup_event() 735 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_event_time() 743 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_event_time_now() 763 struct perf_cgroup *cgrp = cpuctx->cgrp; in update_cgrp_time_from_cpuctx() local [all …]
|
/kernel/trace/ |
D | bpf_trace.c | 805 struct cgroup *cgrp; in BPF_CALL_2() local 810 cgrp = READ_ONCE(array->ptrs[idx]); in BPF_CALL_2() 811 if (unlikely(!cgrp)) in BPF_CALL_2() 814 return task_under_cgroup_hierarchy(current, cgrp); in BPF_CALL_2()
|