| /tools/testing/selftests/bpf/progs/ |
| D | task_kfunc_failure.c | 19 static struct __tasks_kfunc_map_value *insert_lookup_task(struct task_struct *task) in insert_lookup_task() 32 int BPF_PROG(task_kfunc_acquire_untrusted, struct task_struct *task, u64 clone_flags) in BPF_PROG() 34 struct task_struct *acquired; in BPF_PROG() 53 int BPF_PROG(task_kfunc_acquire_fp, struct task_struct *task, u64 clone_flags) in BPF_PROG() 55 struct task_struct *acquired, *stack_task = (struct task_struct *)&clone_flags; in BPF_PROG() 58 acquired = bpf_task_acquire((struct task_struct *)&stack_task); in BPF_PROG() 69 int BPF_PROG(task_kfunc_acquire_unsafe_kretprobe, struct task_struct *task, u64 clone_flags) in BPF_PROG() 71 struct task_struct *acquired; in BPF_PROG() 84 int BPF_PROG(task_kfunc_acquire_unsafe_kretprobe_rcu, struct task_struct *task, u64 clone_flags) in BPF_PROG() 86 struct task_struct *acquired; in BPF_PROG() [all …]
|
| D | task_kfunc_success.c | 21 struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym __weak; 23 struct task_struct *bpf_task_acquire___one(struct task_struct *task) __ksym __weak; 25 struct task_struct *bpf_task_acquire___two(struct task_struct *p, void *ctx) __ksym __weak; 27 struct task_struct *bpf_task_acquire___three(void *ctx) __ksym __weak; 39 static int test_acquire_release(struct task_struct *task) in test_acquire_release() 41 struct task_struct *acquired = NULL; in test_acquire_release() 67 int BPF_PROG(test_task_kfunc_flavor_relo, struct task_struct *task, u64 clone_flags) in BPF_PROG() 69 struct task_struct *acquired = NULL; in BPF_PROG() 97 int BPF_PROG(test_task_kfunc_flavor_relo_not_found, struct task_struct *task, u64 clone_flags) in BPF_PROG() 111 int BPF_PROG(test_task_acquire_release_argument, struct task_struct *task, u64 clone_flags) in BPF_PROG() [all …]
|
| D | verifier_global_ptr_args.c | 12 extern struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym __weak; 13 extern void bpf_task_release(struct task_struct *p) __ksym __weak; 15 __weak int subprog_trusted_task_nullable(struct task_struct *task __arg_trusted __arg_nullable) in subprog_trusted_task_nullable() 22 __weak int subprog_trusted_task_nullable_extra_layer(struct task_struct *task __arg_trusted __arg_n… in subprog_trusted_task_nullable_extra_layer() 33 struct task_struct *t1 = bpf_get_current_task_btf(); in trusted_task_arg_nullable() 34 struct task_struct *t2 = bpf_task_acquire(t1); in trusted_task_arg_nullable() 59 __weak int subprog_trusted_task_nonnull(struct task_struct *task __arg_trusted) in subprog_trusted_task_nonnull() 79 struct task_struct *t = bpf_get_current_task_btf(); in trusted_task_arg_nonnull_fail2() 80 struct task_struct *nullable; in trusted_task_arg_nonnull_fail2() 100 struct task_struct *t = bpf_get_current_task_btf(); in trusted_task_arg_nonnull() [all …]
|
| D | task_kfunc_common.h | 13 struct task_struct __kptr * task; 23 struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym; 24 void bpf_task_release(struct task_struct *p) __ksym; 25 struct task_struct *bpf_task_from_pid(s32 pid) __ksym; 29 static inline struct __tasks_kfunc_map_value *tasks_kfunc_map_value_lookup(struct task_struct *p) in tasks_kfunc_map_value_lookup() 41 static inline int tasks_kfunc_map_insert(struct task_struct *p) in tasks_kfunc_map_insert() 45 struct task_struct *acquired, *old; in tasks_kfunc_map_insert()
|
| D | test_task_under_cgroup.c | 11 long bpf_task_under_cgroup(struct task_struct *task, struct cgroup *ancestor) __ksym; 13 struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym; 14 void bpf_task_release(struct task_struct *p) __ksym; 21 int BPF_PROG(tp_btf_run, struct task_struct *task, u64 clone_flags) in BPF_PROG() 24 struct task_struct *acquired; in BPF_PROG() 55 struct task_struct *task; in BPF_PROG()
|
| D | iters_testmod.c | 15 struct task_struct *cur_task = bpf_get_current_task_btf(); in iter_next_trusted() 35 struct task_struct *cur_task = bpf_get_current_task_btf(); in iter_next_trusted_or_null() 53 struct task_struct *cur_task = bpf_get_current_task_btf(); in iter_next_rcu() 55 struct task_struct *task_ptr; in iter_next_rcu() 73 struct task_struct *cur_task = bpf_get_current_task_btf(); in iter_next_rcu_or_null() 75 struct task_struct *task_ptr; in iter_next_rcu_or_null() 91 struct task_struct *cur_task = bpf_get_current_task_btf(); in iter_next_rcu_not_trusted() 93 struct task_struct *task_ptr; in iter_next_rcu_not_trusted()
|
| D | cpumask_failure.c | 36 int BPF_PROG(test_alloc_no_release, struct task_struct *task, u64 clone_flags) in BPF_PROG() 49 int BPF_PROG(test_alloc_double_release, struct task_struct *task, u64 clone_flags) in BPF_PROG() 64 int BPF_PROG(test_acquire_wrong_cpumask, struct task_struct *task, u64 clone_flags) in BPF_PROG() 77 int BPF_PROG(test_mutate_cpumask, struct task_struct *task, u64 clone_flags) in BPF_PROG() 87 int BPF_PROG(test_insert_remove_no_release, struct task_struct *task, u64 clone_flags) in BPF_PROG() 111 int BPF_PROG(test_cpumask_null, struct task_struct *task, u64 clone_flags) in BPF_PROG() 121 int BPF_PROG(test_global_mask_out_of_rcu, struct task_struct *task, u64 clone_flags) in BPF_PROG() 155 int BPF_PROG(test_global_mask_no_null_check, struct task_struct *task, u64 clone_flags) in BPF_PROG() 183 int BPF_PROG(test_global_mask_rcu_no_null_check, struct task_struct *task, u64 clone_flags) in BPF_PROG() 208 int BPF_PROG(test_invalid_nested_array, struct task_struct *task, u64 clone_flags) in BPF_PROG()
|
| D | rcu_read_lock.c | 26 struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym; 27 void bpf_task_release(struct task_struct *p) __ksym; 32 struct task_struct *task; in get_cgroup_id() 53 struct task_struct *task, *real_parent; in task_succ() 82 struct task_struct *task, *real_parent; in no_lock() 94 struct task_struct *task, *real_parent; in two_regions() 113 struct task_struct *task, *real_parent; in non_sleepable_1() 129 struct task_struct *task, *real_parent; in non_sleepable_2() 148 struct task_struct *task, *real_parent, *gparent; in task_acquire() 176 struct task_struct *task; in miss_lock() [all …]
|
| D | test_overhead.c | 10 struct task_struct; 13 int BPF_KPROBE(prog1, struct task_struct *tsk, const char *buf, bool exec) in BPF_KPROBE() 31 int BPF_PROG(prog4, struct task_struct *tsk, const char *buf, bool exec) in BPF_PROG() 37 int BPF_PROG(prog5, struct task_struct *tsk, const char *buf, bool exec) in BPF_PROG()
|
| D | test_subprogs.c | 57 return BPF_CORE_READ((struct task_struct *)(void *)t, tgid); in get_task_tgid() 71 struct task_struct *t = (void *)bpf_get_current_task(); in prog1() 83 struct task_struct *t = (void *)bpf_get_current_task(); in prog2() 101 struct task_struct *t = (void *)bpf_get_current_task(); in prog3() 117 struct task_struct *t = (void *)bpf_get_current_task(); in prog4()
|
| D | cpumask_success.c | 139 int BPF_PROG(test_alloc_free_cpumask, struct task_struct *task, u64 clone_flags) in BPF_PROG() 155 int BPF_PROG(test_set_clear_cpu, struct task_struct *task, u64 clone_flags) in BPF_PROG() 184 int BPF_PROG(test_setall_clear_cpu, struct task_struct *task, u64 clone_flags) in BPF_PROG() 213 int BPF_PROG(test_first_firstzero_cpu, struct task_struct *task, u64 clone_flags) in BPF_PROG() 252 int BPF_PROG(test_firstand_nocpu, struct task_struct *task, u64 clone_flags) in BPF_PROG() 284 int BPF_PROG(test_test_and_set_clear, struct task_struct *task, u64 clone_flags) in BPF_PROG() 316 int BPF_PROG(test_and_or_xor, struct task_struct *task, u64 clone_flags) in BPF_PROG() 363 int BPF_PROG(test_intersects_subset, struct task_struct *task, u64 clone_flags) in BPF_PROG() 405 int BPF_PROG(test_copy_any_anyand, struct task_struct *task, u64 clone_flags) in BPF_PROG() 459 int BPF_PROG(test_insert_leave, struct task_struct *task, u64 clone_flags) in BPF_PROG() [all …]
|
| D | verifier_vfs_reject.c | 35 struct task_struct *task; in BPF_PROG() 37 task = (struct task_struct *)&x; in BPF_PROG() 52 struct task_struct *parent; in BPF_PROG() 101 int BPF_PROG(path_d_path_kfunc_untrusted_from_argument, struct task_struct *task) in BPF_PROG() 118 struct task_struct *current; in BPF_PROG()
|
| /tools/testing/selftests/sched_ext/ |
| D | maximal.bpf.c | 17 s32 BPF_STRUCT_OPS(maximal_select_cpu, struct task_struct *p, s32 prev_cpu, in BPF_STRUCT_OPS() 23 void BPF_STRUCT_OPS(maximal_enqueue, struct task_struct *p, u64 enq_flags) in BPF_STRUCT_OPS() 28 void BPF_STRUCT_OPS(maximal_dequeue, struct task_struct *p, u64 deq_flags) in BPF_STRUCT_OPS() 31 void BPF_STRUCT_OPS(maximal_dispatch, s32 cpu, struct task_struct *prev) in BPF_STRUCT_OPS() 36 void BPF_STRUCT_OPS(maximal_runnable, struct task_struct *p, u64 enq_flags) in BPF_STRUCT_OPS() 39 void BPF_STRUCT_OPS(maximal_running, struct task_struct *p) in BPF_STRUCT_OPS() 42 void BPF_STRUCT_OPS(maximal_stopping, struct task_struct *p, bool runnable) in BPF_STRUCT_OPS() 45 void BPF_STRUCT_OPS(maximal_quiescent, struct task_struct *p, u64 deq_flags) in BPF_STRUCT_OPS() 48 bool BPF_STRUCT_OPS(maximal_yield, struct task_struct *from, in BPF_STRUCT_OPS() 49 struct task_struct *to) in BPF_STRUCT_OPS() [all …]
|
| D | maybe_null.bpf.c | 12 void BPF_STRUCT_OPS(maybe_null_running, struct task_struct *p) in BPF_STRUCT_OPS() 15 void BPF_STRUCT_OPS(maybe_null_success_dispatch, s32 cpu, struct task_struct *p) in BPF_STRUCT_OPS() 21 bool BPF_STRUCT_OPS(maybe_null_success_yield, struct task_struct *from, in BPF_STRUCT_OPS() 22 struct task_struct *to) in BPF_STRUCT_OPS()
|
| D | select_cpu_vtime.bpf.c | 28 static inline u64 task_vtime(const struct task_struct *p) in task_vtime() 38 s32 BPF_STRUCT_OPS(select_cpu_vtime_select_cpu, struct task_struct *p, in BPF_STRUCT_OPS() 54 void BPF_STRUCT_OPS(select_cpu_vtime_dispatch, s32 cpu, struct task_struct *p) in BPF_STRUCT_OPS() 60 void BPF_STRUCT_OPS(select_cpu_vtime_running, struct task_struct *p) in BPF_STRUCT_OPS() 66 void BPF_STRUCT_OPS(select_cpu_vtime_stopping, struct task_struct *p, in BPF_STRUCT_OPS() 72 void BPF_STRUCT_OPS(select_cpu_vtime_enable, struct task_struct *p) in BPF_STRUCT_OPS()
|
| D | init_enable_count.bpf.c | 18 s32 BPF_STRUCT_OPS_SLEEPABLE(cnt_init_task, struct task_struct *p, in BPF_STRUCT_OPS_SLEEPABLE() 31 void BPF_STRUCT_OPS(cnt_exit_task, struct task_struct *p) in BPF_STRUCT_OPS() 36 void BPF_STRUCT_OPS(cnt_enable, struct task_struct *p) in BPF_STRUCT_OPS() 41 void BPF_STRUCT_OPS(cnt_disable, struct task_struct *p) in BPF_STRUCT_OPS()
|
| D | exit.bpf.c | 20 s32 BPF_STRUCT_OPS(exit_select_cpu, struct task_struct *p, in BPF_STRUCT_OPS() 31 void BPF_STRUCT_OPS(exit_enqueue, struct task_struct *p, u64 enq_flags) in BPF_STRUCT_OPS() 39 void BPF_STRUCT_OPS(exit_dispatch, s32 cpu, struct task_struct *p) in BPF_STRUCT_OPS() 47 void BPF_STRUCT_OPS(exit_enable, struct task_struct *p) in BPF_STRUCT_OPS() 53 s32 BPF_STRUCT_OPS(exit_init_task, struct task_struct *p, in BPF_STRUCT_OPS()
|
| D | maybe_null_fail_yld.bpf.c | 12 void BPF_STRUCT_OPS(maybe_null_running, struct task_struct *p) in BPF_STRUCT_OPS() 15 bool BPF_STRUCT_OPS(maybe_null_fail_yield, struct task_struct *from, in BPF_STRUCT_OPS() 16 struct task_struct *to) in BPF_STRUCT_OPS()
|
| D | dsp_local_on.bpf.c | 19 s32 BPF_STRUCT_OPS(dsp_local_on_select_cpu, struct task_struct *p, in BPF_STRUCT_OPS() 25 void BPF_STRUCT_OPS(dsp_local_on_enqueue, struct task_struct *p, in BPF_STRUCT_OPS() 34 void BPF_STRUCT_OPS(dsp_local_on_dispatch, s32 cpu, struct task_struct *prev) in BPF_STRUCT_OPS() 37 struct task_struct *p; in BPF_STRUCT_OPS()
|
| D | enq_select_cpu_fails.bpf.c | 13 s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, 16 s32 BPF_STRUCT_OPS(enq_select_cpu_fails_select_cpu, struct task_struct *p, in BPF_STRUCT_OPS() 22 void BPF_STRUCT_OPS(enq_select_cpu_fails_enqueue, struct task_struct *p, in BPF_STRUCT_OPS()
|
| D | select_cpu_dfl_nodispatch.bpf.c | 31 s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, 34 s32 BPF_STRUCT_OPS(select_cpu_dfl_nodispatch_select_cpu, struct task_struct *p, in BPF_STRUCT_OPS() 52 void BPF_STRUCT_OPS(select_cpu_dfl_nodispatch_enqueue, struct task_struct *p, in BPF_STRUCT_OPS() 74 struct task_struct *p, struct scx_init_task_args *args) in BPF_STRUCT_OPS()
|
| /tools/bpf/runqslower/ |
| D | runqslower.bpf.c | 28 static int trace_enqueue(struct task_struct *t) in trace_enqueue() 49 struct task_struct *p = (void *)ctx[0]; in handle__sched_wakeup() 58 struct task_struct *p = (void *)ctx[0]; in handle__sched_wakeup_new() 69 struct task_struct *prev = (struct task_struct *)ctx[1]; in handle__sched_switch() 70 struct task_struct *next = (struct task_struct *)ctx[2]; in handle__sched_switch()
|
| /tools/perf/util/bpf_skel/ |
| D | kwork_top.bpf.c | 105 static __always_inline void update_task_info(struct task_struct *task, __u32 cpu) in update_task_info() 139 static void on_sched_out(struct task_struct *task, __u64 ts, __u32 cpu) in on_sched_out() 160 static void on_sched_in(struct task_struct *task, __u64 ts) in on_sched_in() 173 struct task_struct *prev, *next; in on_switch() 175 prev = (struct task_struct *)ctx[1]; in on_switch() 176 next = (struct task_struct *)ctx[2]; in on_switch() 197 struct task_struct *task; in on_irq_handler_entry() 209 task = (struct task_struct *)bpf_get_current_task(); in on_irq_handler_entry() 232 struct task_struct *task; in on_irq_handler_exit() 245 task = (struct task_struct *)bpf_get_current_task(); in on_irq_handler_exit() [all …]
|
| D | off_cpu.bpf.c | 106 static inline int get_task_state(struct task_struct *t) in get_task_state() 121 static inline __u64 get_cgroup_id(struct task_struct *t) in get_cgroup_id() 141 static inline int can_record(struct task_struct *t, int state) in can_record() 186 static int off_cpu_stat(u64 *ctx, struct task_struct *prev, in off_cpu_stat() 187 struct task_struct *next, int state) in off_cpu_stat() 240 struct task_struct *task; in on_newtask() 248 task = (struct task_struct *)bpf_get_current_task(); in on_newtask() 254 task = (struct task_struct *)ctx[0]; in on_newtask() 267 struct task_struct *prev, *next; in on_switch() 273 prev = (struct task_struct *)ctx[1]; in on_switch() [all …]
|
| /tools/sched_ext/ |
| D | scx_simple.bpf.c | 60 s32 BPF_STRUCT_OPS(simple_select_cpu, struct task_struct *p, s32 prev_cpu, u64 wake_flags) in BPF_STRUCT_OPS() 74 void BPF_STRUCT_OPS(simple_enqueue, struct task_struct *p, u64 enq_flags) in BPF_STRUCT_OPS() 95 void BPF_STRUCT_OPS(simple_dispatch, s32 cpu, struct task_struct *prev) in BPF_STRUCT_OPS() 100 void BPF_STRUCT_OPS(simple_running, struct task_struct *p) in BPF_STRUCT_OPS() 115 void BPF_STRUCT_OPS(simple_stopping, struct task_struct *p, bool runnable) in BPF_STRUCT_OPS() 132 void BPF_STRUCT_OPS(simple_enable, struct task_struct *p) in BPF_STRUCT_OPS()
|