| /tools/testing/selftests/bpf/prog_tests/ |
| D | cgroup_hierarchical_stats.c | 54 } cgroups[] = { variable 64 #define N_CGROUPS ARRAY_SIZE(cgroups) 133 fd = create_and_get_cgroup(cgroups[i].path); in setup_cgroups() 137 cgroups[i].fd = fd; in setup_cgroups() 138 cgroups[i].id = get_cgroup_id(cgroups[i].path); in setup_cgroups() 147 close(cgroups[i].fd); in cleanup_cgroups() 175 if (join_parent_cgroup(cgroups[i].path)) in attach_processes() 220 attach_counters[i] = get_attach_counter(cgroups[i].id, in check_attach_counters() 221 cgroups[i].name); in check_attach_counters() 288 err = setup_cgroup_iter(*skel, cgroups[i].fd, cgroups[i].name); in setup_progs() [all …]
|
| /tools/cgroup/ |
| D | memcg_shrinker.py | 11 cgroups = {} 17 cgroups[ino] = path 20 return cgroups 44 cgroups = scan_cgroups("/sys/fs/cgroup/") 58 cg = cgroups[ino]
|
| /tools/testing/selftests/bpf/progs/ |
| D | percpu_alloc_cgrp_local_storage.c | 30 e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, in BPF_PROG() 56 e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0); in BPF_PROG() 89 e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0); in BPF_PROG()
|
| D | cgrp_ls_recursion.c | 59 __on_update(task->cgroups->dfl_cgrp); in BPF_PROG() 92 __on_enter(regs, id, task->cgroups->dfl_cgrp); in BPF_PROG()
|
| D | rcu_read_lock.c | 33 struct css_set *cgroups; in get_cgroup_id() local 41 cgroups = task->cgroups; in get_cgroup_id() 42 if (!cgroups) in get_cgroup_id() 44 cgroup_id = cgroups->dfl_cgrp->kn->id; in get_cgroup_id()
|
| D | cgrp_ls_sleepable.c | 87 __no_rcu_lock(task->cgroups->dfl_cgrp); in no_rcu_lock() 119 cgrp = task->cgroups->dfl_cgrp; in yes_rcu_lock()
|
| D | cgrp_ls_tp_btf.c | 86 __on_enter(regs, id, task->cgroups->dfl_cgrp); in BPF_PROG() 124 __on_exit(regs, id, task->cgroups->dfl_cgrp); in BPF_PROG()
|
| D | profiler.inc.h | 255 struct kernfs_node* proc_kernfs = BPF_CORE_READ(task, cgroups, dfl_cgrp, kn); in populate_cgroup_info() 266 BPF_CORE_READ(task, cgroups, subsys[i]); in populate_cgroup_info() 627 struct kernfs_node* proc_kernfs = BPF_CORE_READ(task, cgroups, dfl_cgrp, kn); in raw_tracepoint__sched_process_exit()
|
| /tools/perf/util/ |
| D | cgroup.c | 569 down_write(&env->cgroups.lock); in cgroup__findnew() 570 cgrp = __cgroup__findnew(&env->cgroups.tree, id, true, path); in cgroup__findnew() 571 up_write(&env->cgroups.lock); in cgroup__findnew() 584 down_read(&env->cgroups.lock); in cgroup__find() 585 cgrp = __cgroup__findnew(&env->cgroups.tree, id, false, NULL); in cgroup__find() 586 up_read(&env->cgroups.lock); in cgroup__find() 595 down_write(&env->cgroups.lock); in perf_env__purge_cgroups() 596 while (!RB_EMPTY_ROOT(&env->cgroups.tree)) { in perf_env__purge_cgroups() 597 node = rb_first(&env->cgroups.tree); in perf_env__purge_cgroups() 600 rb_erase(node, &env->cgroups.tree); in perf_env__purge_cgroups() [all …]
|
| D | cgroup.h | 31 int evlist__expand_cgroup(struct evlist *evlist, const char *cgroups,
|
| D | bpf_lock_contention.c | 177 read_all_cgroups(&con->cgroups); in lock_contention_prepare() 373 struct cgroup *cgrp = __cgroup__find(&con->cgroups, cgrp_id); in lock_contention_get_name() 534 while (!RB_EMPTY_ROOT(&con->cgroups)) { in lock_contention_finish() 535 struct rb_node *node = rb_first(&con->cgroups); in lock_contention_finish() 538 rb_erase(node, &con->cgroups); in lock_contention_finish()
|
| D | lock-contention.h | 141 struct rb_root cgroups; member
|
| D | env.h | 125 } cgroups; member
|
| /tools/perf/Documentation/ |
| D | perf-bench.txt | 128 --cgroups=:: 129 Names of cgroups for sender and receiver, separated by a comma. 131 Note that perf doesn't create nor delete the cgroups, so users should 132 make sure that the cgroups exist and are accessible before use. 154 (executing 1000000 pipe operations between cgroups)
|
| D | perf-lock.txt | 216 Show lock contention only in the given cgroups (comma separated list).
|
| D | perf-trace.txt | 75 Look for cgroups to set at the /sys/fs/cgroup/perf_event directory, then 89 Multiple cgroups:
|
| D | perf-top.txt | 326 container "name" are monitored when they run on the monitored CPUs. Multiple cgroups 334 --all-cgroups::
|
| /tools/perf/util/bpf_skel/ |
| D | off_cpu.bpf.c | 126 return BPF_CORE_READ(t, cgroups, dfl_cgrp, kn, id); in get_cgroup_id() 137 cgrp = BPF_CORE_READ(t, cgroups, subsys[perf_subsys_id], cgroup); in get_cgroup_id()
|
| D | bperf_cgroup.bpf.c | 97 cgrp = BPF_CORE_READ(p, cgroups, subsys[perf_subsys_id], cgroup); in get_cgroup_v1_idx()
|
| D | lock_contention.bpf.c | 167 cgrp = BPF_CORE_READ(task, cgroups, subsys[perf_subsys_id], cgroup); in get_current_cgroup_id()
|
| /tools/perf/tests/shell/ |
| D | record_bpf_filter.sh | 154 -a --all-cgroups --synth=cgroup -o "${perfdata}" true 2> /dev/null
|
| D | record.sh | 211 if ! perf record -aB --synth=cgroup --all-cgroups -o "${perfdata}" ${testprog} 2> /dev/null
|
| /tools/perf/util/bpf_skel/vmlinux/ |
| D | vmlinux.h | 107 struct css_set *cgroups; member
|
| /tools/testing/selftests/mm/ |
| D | charge_reserved_hugetlb.sh | 506 echo Test normal case, multiple cgroups. 552 echo Test normal case with write, multiple cgroups.
|
| /tools/bpf/bpftool/Documentation/ |
| D | bpftool-cgroup.rst | 63 Iterate over all cgroups in *CGROUP_ROOT* and list all attached programs.
|