| /tools/testing/selftests/bpf/progs/ |
| D | task_kfunc_failure.c | 19 static struct __tasks_kfunc_map_value *insert_lookup_task(struct task_struct *task) in insert_lookup_task() argument 23 status = tasks_kfunc_map_insert(task); in insert_lookup_task() 27 return tasks_kfunc_map_value_lookup(task); in insert_lookup_task() 32 int BPF_PROG(task_kfunc_acquire_untrusted, struct task_struct *task, u64 clone_flags) in BPF_PROG() argument 37 v = insert_lookup_task(task); in BPF_PROG() 42 acquired = bpf_task_acquire(v->task); in BPF_PROG() 53 int BPF_PROG(task_kfunc_acquire_fp, struct task_struct *task, u64 clone_flags) in BPF_PROG() argument 69 int BPF_PROG(task_kfunc_acquire_unsafe_kretprobe, struct task_struct *task, u64 clone_flags) in BPF_PROG() argument 74 acquired = bpf_task_acquire(task); in BPF_PROG() 84 int BPF_PROG(task_kfunc_acquire_unsafe_kretprobe_rcu, struct task_struct *task, u64 clone_flags) in BPF_PROG() argument [all …]
|
| D | rcu_read_lock.c | 32 struct task_struct *task; in get_cgroup_id() local 35 task = bpf_get_current_task_btf(); in get_cgroup_id() 36 if (task->pid != target_pid) in get_cgroup_id() 41 cgroups = task->cgroups; in get_cgroup_id() 53 struct task_struct *task, *real_parent; in task_succ() local 57 task = bpf_get_current_task_btf(); in task_succ() 58 if (task->pid != target_pid) in task_succ() 63 real_parent = task->real_parent; in task_succ() 82 struct task_struct *task, *real_parent; in no_lock() local 85 task = bpf_get_current_task_btf(); in no_lock() [all …]
|
| D | task_kfunc_success.c | 23 struct task_struct *bpf_task_acquire___one(struct task_struct *task) __ksym __weak; 39 static int test_acquire_release(struct task_struct *task) in test_acquire_release() argument 57 acquired = bpf_task_acquire(task); in test_acquire_release() 67 int BPF_PROG(test_task_kfunc_flavor_relo, struct task_struct *task, u64 clone_flags) in BPF_PROG() argument 73 acquired = bpf_task_acquire___one(task); in BPF_PROG() 79 acquired = bpf_task_acquire___two(task, &fake_ctx); in BPF_PROG() 97 int BPF_PROG(test_task_kfunc_flavor_relo_not_found, struct task_struct *task, u64 clone_flags) in BPF_PROG() argument 111 int BPF_PROG(test_task_acquire_release_argument, struct task_struct *task, u64 clone_flags) in BPF_PROG() argument 116 return test_acquire_release(task); in BPF_PROG() 120 int BPF_PROG(test_task_acquire_release_current, struct task_struct *task, u64 clone_flags) in BPF_PROG() argument [all …]
|
| D | verifier_global_ptr_args.c | 15 __weak int subprog_trusted_task_nullable(struct task_struct *task __arg_trusted __arg_nullable) in subprog_trusted_task_nullable() 17 if (!task) in subprog_trusted_task_nullable() 19 return task->pid + task->tgid; in subprog_trusted_task_nullable() 22 __weak int subprog_trusted_task_nullable_extra_layer(struct task_struct *task __arg_trusted __arg_n… in subprog_trusted_task_nullable_extra_layer() 24 return subprog_trusted_task_nullable(task) + subprog_trusted_task_nullable(NULL); in subprog_trusted_task_nullable_extra_layer() 59 __weak int subprog_trusted_task_nonnull(struct task_struct *task __arg_trusted) in subprog_trusted_task_nonnull() 61 return task->pid + task->tgid; in subprog_trusted_task_nonnull() 108 struct task_struct___local *task __arg_trusted __arg_nullable) in subprog_nullable_task_flavor() 112 if (!task) in subprog_nullable_task_flavor() 115 return bpf_copy_from_user_task(&buf, sizeof(buf), NULL, (void *)task, 0); in subprog_nullable_task_flavor() [all …]
|
| D | bpf_iter_tasks.c | 17 struct task_struct *task = ctx->task; in dump_task() local 20 if (task == (void *)0) { in dump_task() 25 if (task->pid != (pid_t)tid) in dump_task() 33 BPF_SEQ_PRINTF(seq, "%8d %8d\n", task->tgid, task->pid); in dump_task() 44 struct task_struct *task = ctx->task; in dump_task_sleepable() local 51 if (task == (void *)0) { in dump_task_sleepable() 58 ret = bpf_copy_from_user_task(&user_data, sizeof(uint32_t), ptr, task, 0); in dump_task_sleepable() 69 regs = (struct pt_regs *)bpf_task_pt_regs(task); in dump_task_sleepable() 76 ret = bpf_copy_from_user_task(&user_data, sizeof(uint32_t), ptr, task, 0); in dump_task_sleepable() 86 BPF_SEQ_PRINTF(seq, "%8d %8d %8d\n", task->tgid, task->pid, user_data); in dump_task_sleepable()
|
| D | bpf_iter_task_file.c | 17 struct task_struct *task = ctx->task; in dump_task_file() local 21 if (task == (void *)0 || file == (void *)0) in dump_task_file() 29 if (tgid == task->tgid && task->tgid != task->pid) in dump_task_file() 32 if (last_tgid != task->tgid) { in dump_task_file() 33 last_tgid = task->tgid; in dump_task_file() 37 BPF_SEQ_PRINTF(seq, "%8d %8d %8d %lx\n", task->tgid, task->pid, fd, in dump_task_file()
|
| D | cgrp_ls_sleepable.c | 23 struct cgroup *bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) __ksym; 60 struct task_struct *task; in cgrp1_no_rcu_lock() local 63 task = bpf_get_current_task_btf(); in cgrp1_no_rcu_lock() 64 if (task->pid != target_pid) in cgrp1_no_rcu_lock() 68 cgrp = bpf_task_get_cgroup1(task, target_hid); in cgrp1_no_rcu_lock() 80 struct task_struct *task; in no_rcu_lock() local 82 task = bpf_get_current_task_btf(); in no_rcu_lock() 83 if (task->pid != target_pid) in no_rcu_lock() 87 __no_rcu_lock(task->cgroups->dfl_cgrp); in no_rcu_lock() 94 struct task_struct *task; in yes_rcu_lock() local [all …]
|
| D | task_ls_recursion.c | 33 struct task_struct *task = bpf_get_current_task_btf(); in BPF_PROG() local 36 if (!test_pid || task->pid != test_pid) in BPF_PROG() 39 ptr = bpf_task_storage_get(&map_a, task, 0, in BPF_PROG() 51 err = bpf_task_storage_delete(&map_a, task); in BPF_PROG() 62 ptr = bpf_task_storage_get(&map_b, task, 0, in BPF_PROG() 73 struct task_struct *task; in BPF_PROG() local 76 task = bpf_get_current_task_btf(); in BPF_PROG() 77 if (!test_pid || task->pid != test_pid) in BPF_PROG() 80 ptr = bpf_task_storage_get(&map_a, task, 0, in BPF_PROG() 85 ptr = bpf_task_storage_get(&map_b, task, 0, in BPF_PROG()
|
| D | find_vma.c | 23 static long check_vma(struct task_struct *task, struct vm_area_struct *vma, in check_vma() argument 40 struct task_struct *task = bpf_get_current_task_btf(); in handle_getpid() local 43 if (task->pid != target_pid) in handle_getpid() 46 find_addr_ret = bpf_find_vma(task, addr, check_vma, &data, 0); in handle_getpid() 49 find_zero_ret = bpf_find_vma(task, 0, check_vma, &data, 0); in handle_getpid() 56 struct task_struct *task = bpf_get_current_task_btf(); in handle_pe() local 59 if (task->pid != target_pid) in handle_pe() 62 find_addr_ret = bpf_find_vma(task, addr, check_vma, &data, 0); in handle_pe() 67 find_zero_ret = bpf_find_vma(task, 0, check_vma, &data, 0); in handle_pe()
|
| D | bpf_iter_task_stack.c | 16 struct task_struct *task = ctx->task; in dump_task_stack() local 19 if (task == (void *)0) in dump_task_stack() 22 retlen = bpf_get_task_stack(task, entries, in dump_task_stack() 27 BPF_SEQ_PRINTF(seq, "pid: %8u num_entries: %8u\n", task->pid, in dump_task_stack() 44 struct task_struct *task = ctx->task; in get_task_user_stacks() local 48 if (task == (void *)0) in get_task_user_stacks() 51 res = bpf_get_task_stack(task, entries, in get_task_user_stacks()
|
| D | test_core_reloc_kernel.c | 53 struct task_struct *task = (void *)bpf_get_current_task(); in test_core_kernel() local 62 if (CORE_READ(&pid, &task->pid) || in test_core_kernel() 63 CORE_READ(&tgid, &task->tgid)) in test_core_kernel() 70 out->valid[1] = BPF_CORE_READ(task, in test_core_kernel() 72 out->valid[2] = BPF_CORE_READ(task, in test_core_kernel() 75 out->valid[3] = BPF_CORE_READ(task, in test_core_kernel() 78 out->valid[4] = BPF_CORE_READ(task, in test_core_kernel() 81 out->valid[5] = BPF_CORE_READ(task, in test_core_kernel() 85 out->valid[6] = BPF_CORE_READ(task, in test_core_kernel() 89 out->valid[7] = BPF_CORE_READ(task, in test_core_kernel() [all …]
|
| D | task_local_storage.c | 27 struct task_struct *task; in BPF_PROG() local 30 task = bpf_get_current_task_btf(); in BPF_PROG() 31 if (task->pid != target_pid) in BPF_PROG() 34 ptr = bpf_task_storage_get(&enter_id, task, 0, in BPF_PROG() 48 struct task_struct *task; in BPF_PROG() local 51 task = bpf_get_current_task_btf(); in BPF_PROG() 52 if (task->pid != target_pid) in BPF_PROG() 55 ptr = bpf_task_storage_get(&enter_id, task, 0, in BPF_PROG()
|
| D | cgrp_ls_tp_btf.c | 33 struct cgroup *bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) __ksym; 69 struct task_struct *task; in BPF_PROG() local 72 task = bpf_get_current_task_btf(); in BPF_PROG() 73 if (task->pid != target_pid) in BPF_PROG() 77 cgrp = bpf_task_get_cgroup1(task, target_hid); in BPF_PROG() 86 __on_enter(regs, id, task->cgroups->dfl_cgrp); in BPF_PROG() 107 struct task_struct *task; in BPF_PROG() local 110 task = bpf_get_current_task_btf(); in BPF_PROG() 111 if (task->pid != target_pid) in BPF_PROG() 115 cgrp = bpf_task_get_cgroup1(task, target_hid); in BPF_PROG() [all …]
|
| D | percpu_alloc_cgrp_local_storage.c | 25 struct task_struct *task; in BPF_PROG() local 29 task = bpf_get_current_task_btf(); in BPF_PROG() 30 e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, in BPF_PROG() 50 struct task_struct *task; in BPF_PROG() local 55 task = bpf_get_current_task_btf(); in BPF_PROG() 56 e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0); in BPF_PROG() 79 struct task_struct *task; in BPF_PROG() local 88 task = bpf_get_current_task_btf(); in BPF_PROG() 89 e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0); in BPF_PROG()
|
| D | bpf_iter_vma_offset.c | 20 struct task_struct *task = ctx->task; in get_vma_offset() local 22 if (task == NULL || vma == NULL) in get_vma_offset() 25 if (last_tgid != task->tgid) in get_vma_offset() 27 last_tgid = task->tgid; in get_vma_offset() 29 if (task->tgid != pid) in get_vma_offset()
|
| D | cpumask_failure.c | 36 int BPF_PROG(test_alloc_no_release, struct task_struct *task, u64 clone_flags) in BPF_PROG() argument 49 int BPF_PROG(test_alloc_double_release, struct task_struct *task, u64 clone_flags) in BPF_PROG() argument 64 int BPF_PROG(test_acquire_wrong_cpumask, struct task_struct *task, u64 clone_flags) in BPF_PROG() argument 69 cpumask = bpf_cpumask_acquire((struct bpf_cpumask *)task->cpus_ptr); in BPF_PROG() 77 int BPF_PROG(test_mutate_cpumask, struct task_struct *task, u64 clone_flags) in BPF_PROG() argument 80 bpf_cpumask_set_cpu(0, (struct bpf_cpumask *)task->cpus_ptr); in BPF_PROG() 87 int BPF_PROG(test_insert_remove_no_release, struct task_struct *task, u64 clone_flags) in BPF_PROG() argument 111 int BPF_PROG(test_cpumask_null, struct task_struct *task, u64 clone_flags) in BPF_PROG() argument 121 int BPF_PROG(test_global_mask_out_of_rcu, struct task_struct *task, u64 clone_flags) in BPF_PROG() argument 155 int BPF_PROG(test_global_mask_no_null_check, struct task_struct *task, u64 clone_flags) in BPF_PROG() argument [all …]
|
| D | test_task_under_cgroup.c | 11 long bpf_task_under_cgroup(struct task_struct *task, struct cgroup *ancestor) __ksym; 21 int BPF_PROG(tp_btf_run, struct task_struct *task, u64 clone_flags) in BPF_PROG() argument 29 acquired = bpf_task_acquire(task); in BPF_PROG() 55 struct task_struct *task; in BPF_PROG() local 58 task = bpf_get_current_task_btf(); in BPF_PROG() 59 if (local_pid != task->pid) in BPF_PROG() 69 if (!bpf_task_under_cgroup(task, cgrp)) in BPF_PROG()
|
| D | iters_css_task.c | 27 struct task_struct *task; in BPF_PROG() local 41 bpf_for_each(css_task, task, css, CSS_TASK_ITER_PROCS) in BPF_PROG() 42 if (task->pid == target_pid) in BPF_PROG() 61 struct task_struct *task; in cgroup_id_printer() local 77 bpf_for_each(css_task, task, css, CSS_TASK_ITER_PROCS) { in cgroup_id_printer() 78 if (task->pid == target_pid) in cgroup_id_printer() 91 struct task_struct *task; in BPF_PROG() local 97 bpf_for_each(css_task, task, css, CSS_TASK_ITER_PROCS) { in BPF_PROG()
|
| D | find_vma_fail2.c | 12 static long write_task(struct task_struct *task, struct vm_area_struct *vma, in write_task() argument 16 task->mm = NULL; in write_task() 24 struct task_struct *task = bpf_get_current_task_btf(); in handle_getpid() local 27 bpf_find_vma(task, 0, write_task, &data, 0); in handle_getpid()
|
| D | cgrp_ls_recursion.c | 27 struct cgroup *bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) __ksym; 46 struct task_struct *task = bpf_get_current_task_btf(); in BPF_PROG() local 50 cgrp = bpf_task_get_cgroup1(task, target_hid); in BPF_PROG() 59 __on_update(task->cgroups->dfl_cgrp); in BPF_PROG() 79 struct task_struct *task = bpf_get_current_task_btf(); in BPF_PROG() local 83 cgrp = bpf_task_get_cgroup1(task, target_hid); in BPF_PROG() 92 __on_enter(regs, id, task->cgroups->dfl_cgrp); in BPF_PROG()
|
| /tools/perf/util/bpf_skel/ |
| D | kwork_top.bpf.c | 105 static __always_inline void update_task_info(struct task_struct *task, __u32 cpu) in update_task_info() argument 108 .pid = task->pid, in update_task_info() 114 .tgid = task->tgid, in update_task_info() 115 .is_kthread = task->flags & PF_KTHREAD ? 1 : 0, in update_task_info() 117 BPF_CORE_READ_STR_INTO(&data.comm, task, comm); in update_task_info() 139 static void on_sched_out(struct task_struct *task, __u64 ts, __u32 cpu) in on_sched_out() argument 144 pelem = bpf_task_storage_get(&kwork_top_task_time, task, NULL, 0); in on_sched_out() 152 .pid = task->pid, in on_sched_out() 153 .task_p = (__u64)task, in on_sched_out() 157 update_task_info(task, cpu); in on_sched_out() [all …]
|
| /tools/perf/tests/shell/ |
| D | pipe_test.sh | 40 task="perf" 41 if ! perf record -e task-clock:u -o - ${prog} | perf report -i - --task | grep -q ${task} 48 if ! perf record -g -e task-clock:u -o - ${prog} | perf report -i - --task | grep -q ${task} 55 perf record -g -e task-clock:u -o - ${prog} > ${data} 56 if ! perf report -i ${data} --task | grep -q ${task} 72 …if ! perf record -e task-clock:u -o - ${prog} | perf inject ${inject_opt}| perf report -i - | grep… 79 …if ! perf record -g -e task-clock:u -o - ${prog} | perf inject ${inject_opt} | perf report -i - | … 86 perf record -e task-clock:u -o - ${prog} | perf inject ${inject_opt} -o ${data} 93 perf record -e task-clock:u -o ${data} ${prog} 100 perf record -e task-clock:u -o - ${prog} > ${data} [all …]
|
| D | record_bpf_filter.sh | 25 if ! perf record -e task-clock --filter 'period > 1' \ 44 if ! perf record -e task-clock -c 10000 --filter 'ip < 0xffffffff00000000' \ 70 if ! perf record -e task-clock --filter 'cpu > 0' \ 78 if ! perf record --sample-cpu -e task-clock --filter 'cpu > 0' \ 92 if ! perf record -e task-clock --filter 'period > 1000, ip > 0' \ 100 if ! perf record -e task-clock --filter 'period > 1000 , cpu > 0 || ip > 0' \ 108 if ! perf record -e task-clock --filter 'period > 0 || code_pgsz > 4096' \ 122 if ! perf record -e task-clock --filter 'period > 100000' \ 131 if ! perf script -i "${perfdata}" -F period,event | grep task-clock | \ 153 if ! perf record -e task-clock --filter 'cgroup == /' \ [all …]
|
| D | test_task_analyzer.sh | 5 tmpdir=$(mktemp -d /tmp/perf-script-task-analyzer-XXXXX) 75 perf script report task-analyzer > "$out" 82 perf script report task-analyzer --ns --rename-comms-by-tids 0:random > "$out" 89 perf script report task-analyzer --ms --filter-tasks perf --highlight-tasks perf \ 97 perf script report task-analyzer --extended-times --time-limit :99999 \ 105 perf script report task-analyzer --summary > "$out" 112 perf script report task-analyzer --summary-extended > "$out" 119 perf script report task-analyzer --summary-only > "$out" 126 perf script report task-analyzer --extended-times --summary --ns > "$out" 133 perf script report task-analyzer --csv csv > /dev/null [all …]
|
| /tools/perf/scripts/python/ |
| D | task-analyzer.py | 256 def feed(self, task): argument 262 self._last_start = task.time_in(time_unit) 263 self._last_finish = task.time_out(time_unit) 265 self._time_in = task.time_in() 266 time_in = task.time_in(time_unit) 267 time_out = task.time_out(time_unit) 274 self._last_finish = task.time_out(time_unit) 275 self._last_start = task.time_in(time_unit) 412 for task in db["tid"][tid]: 413 pid = task.pid [all …]
|