| /tools/testing/selftests/ftrace/test.d/instances/ |
| D | instance-event.tc | 7 rmdir foo 2>/dev/null 29 mkdir foo 2> /dev/null 30 rmdir foo 2> /dev/null 36 cat foo/trace 1> /dev/null 2>&1 42 echo 1 > foo/events/sched/sched_switch/enable 69 mkdir foo 70 ls foo > /dev/null 71 rmdir foo 72 if [ -d foo ]; then 76 mkdir foo [all …]
|
| /tools/perf/tests/shell/ |
| D | test_uprobe_from_different_cu.sh | 27 perf probe -x ${temp_dir}/testfile -d foo || true 41 cat > ${temp_dir}/testfile-foo.h << EOF 48 extern int foo (int i, struct t *t); 51 cat > ${temp_dir}/testfile-foo.c << EOF 55 foo (int i, struct t *t) 79 return foo (3, &g); 83 gcc -g -Og -flto -c ${temp_dir}/testfile-foo.c -o ${temp_dir}/testfile-foo.o 85 gcc -g -Og -o ${temp_dir}/testfile ${temp_dir}/testfile-foo.o ${temp_dir}/testfile-main.o 87 perf probe -x ${temp_dir}/testfile --funcs foo | grep "foo" 88 perf probe -x ${temp_dir}/testfile foo
|
| /tools/testing/selftests/bpf/prog_tests/ |
| D | cgroup_attach_override.c | 28 int drop_prog = -1, allow_prog = -1, foo = -1, bar = -1; in serial_test_cgroup_attach_override() local 41 foo = test__join_cgroup(FOO); in serial_test_cgroup_attach_override() 42 if (CHECK(foo < 0, "cgroup_join_foo", "cgroup setup failed\n")) in serial_test_cgroup_attach_override() 45 if (CHECK(bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, in serial_test_cgroup_attach_override() 87 if (CHECK(bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS), in serial_test_cgroup_attach_override() 111 if (CHECK(!bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS), in serial_test_cgroup_attach_override() 116 if (CHECK(bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, 0), in serial_test_cgroup_attach_override() 132 if (CHECK(!bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, in serial_test_cgroup_attach_override() 138 if (CHECK(bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, 0), in serial_test_cgroup_attach_override() 144 close(foo); in serial_test_cgroup_attach_override()
|
| D | task_under_cgroup.c | 14 int ret, foo; in test_task_under_cgroup() local 17 foo = test__join_cgroup(FOO); in test_task_under_cgroup() 18 if (!ASSERT_OK(foo < 0, "cgroup_join_foo")) in test_task_under_cgroup() 59 close(foo); in test_task_under_cgroup()
|
| /tools/testing/selftests/bpf/progs/ |
| D | verifier_helper_value_access.c | 9 long long foo; member 24 int foo[MAX_ENTRIES]; member 227 __imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo)), in via_const_imm_full_range() 228 __imm_const(test_val_foo, offsetof(struct test_val, foo)) in via_const_imm_full_range() 255 __imm_const(test_val_foo, offsetof(struct test_val, foo)) in via_const_imm_partial_range() 281 __imm_const(test_val_foo, offsetof(struct test_val, foo)) in via_const_imm_empty_range() 308 __imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo) + 8), in imm_out_of_bound_range() 309 __imm_const(test_val_foo, offsetof(struct test_val, foo)) in imm_out_of_bound_range() 336 __imm_const(test_val_foo, offsetof(struct test_val, foo)) in const_imm_negative_range_adjustment_1() 363 __imm_const(test_val_foo, offsetof(struct test_val, foo)) in const_imm_negative_range_adjustment_2() [all …]
|
| D | verifier_array_access.c | 12 int foo[MAX_ENTRIES]; member 58 __imm_const(test_val_foo, offsetof(struct test_val, foo)) in an_array_with_a_constant_1() 85 __imm_const(test_val_foo, offsetof(struct test_val, foo)) in __flag() 114 __imm_const(test_val_foo, offsetof(struct test_val, foo)) in __flag() 147 __imm_const(test_val_foo, offsetof(struct test_val, foo)) in __flag() 172 __imm_const(test_val_foo, offsetof(struct test_val, foo)) in an_array_with_a_constant_2() 201 __imm_const(test_val_foo, offsetof(struct test_val, foo)) in __flag() 230 __imm_const(test_val_foo, offsetof(struct test_val, foo)) in __flag() 262 __imm_const(test_val_foo, offsetof(struct test_val, foo)) in __flag() 294 __imm_const(test_val_foo, offsetof(struct test_val, foo)) in __flag() [all …]
|
| D | linked_list.c | 13 struct bpf_list_head head __contains(foo, node2); 22 private(C) struct bpf_list_head ghead_array[2] __contains(foo, node2); 23 private(C) struct bpf_list_head ghead_array_one[1] __contains(foo, node2); 31 struct foo *f; in list_push_pop() 41 bpf_obj_drop(container_of(n, struct foo, node2)); in list_push_pop() 50 bpf_obj_drop(container_of(n, struct foo, node2)); in list_push_pop() 67 f = container_of(n, struct foo, node2); in list_push_pop() 82 f = container_of(n, struct foo, node2); in list_push_pop() 93 bpf_obj_drop(container_of(n, struct foo, node2)); in list_push_pop() 101 bpf_obj_drop(container_of(n, struct foo, node2)); in list_push_pop() [all …]
|
| D | test_global_data.c | 24 struct foo { struct 34 __type(value, struct foo); argument 52 static const struct foo struct0 = { 57 static struct foo struct1; 58 static const struct foo struct2; 59 static struct foo struct3 = {
|
| D | test_global_func9.c | 30 __noinline int foo(const struct S *s) in foo() function 86 result |= foo(&s); in global_func9() 93 result |= foo(s); in global_func9() 99 result |= foo((const struct S *)&c); in global_func9() 103 result |= foo(NULL); in global_func9()
|
| D | verifier_const.c | 9 const volatile long foo = 42; variable 19 bpf_strtol(buff, sizeof(buff), 0, (long *)&foo); in tcx1() 48 bpf_check_mtu(skb, skb->ifindex, (__u32 *)&foo, 0, 0); in tcx4() 85 write_fixed((void *)&foo, skb->mark); in tcx7() 94 write_dyn((void *)&foo, &bart, bpf_get_prandom_u32() & 3); in BPF_PROG()
|
| D | linked_list_fail.c | 12 struct foo *f, *f1, *f2; \ 239 f = bpf_obj_new(struct foo); in obj_drop_non_zero_off() 249 return bpf_obj_new(struct foo)->data; in new_null_ret() 255 bpf_obj_new(struct foo); in obj_new_acq() 262 struct foo *f; in use_after_drop() 290 struct foo *f; in direct_read_lock() 301 struct foo *f; in direct_write_lock() 313 struct foo *f; in direct_read_head() 324 struct foo *f; in direct_write_head() 336 struct foo *f; in direct_read_node() [all …]
|
| D | test_ringbuf_write.c | 23 int *foo, cur_pid = bpf_get_current_pid_tgid() >> 32; in test_ringbuf_write() local 41 foo = sample2 + 4084; in test_ringbuf_write() 42 *foo = 256; in test_ringbuf_write()
|
| D | test_spin_lock_fail.c | 7 struct foo { struct 15 __type(value, struct foo); argument 37 struct foo *f; in lock_id_kptr_preserve() 56 struct foo *f; in lock_id_mapval_preserve() 69 struct foo *f; in lock_id_innermapval_preserve() 87 struct foo *f1, *f2, *v, *iv; \ 126 struct foo *f1, *f2; in lock_id_mismatch_mapval_mapval() 150 struct foo *f1, *f2; in lock_id_mismatch_innermapval_innermapval1() 174 struct foo *f1, *f2; in lock_id_mismatch_innermapval_innermapval2()
|
| D | test_global_func8.c | 8 __noinline int foo(struct __sk_buff *skb) in foo() function 17 if (!foo(skb)) in global_func8()
|
| D | test_global_func16.c | 7 __noinline int foo(int (*arr)[10]) in foo() function 21 const int rv = foo(&array); in global_func16()
|
| D | test_global_func7.c | 9 void foo(struct __sk_buff *skb) in foo() function 18 foo(skb); in global_func7()
|
| D | test_global_func14.c | 9 __noinline int foo(const struct S *s) in foo() function 22 return foo(NULL); in global_func14()
|
| D | test_global_func17.c | 6 __noinline int foo(int *p) in foo() function 18 return foo((int *)&i); in global_func17()
|
| D | linked_list.h | 14 struct foo { struct 25 struct bpf_list_head head __contains(foo, node2); argument 53 private(A) struct bpf_list_head ghead __contains(foo, node2);
|
| D | test_global_func11.c | 11 __noinline int foo(const struct S *s) in foo() function 20 return foo((const void *)skb); in global_func11()
|
| /tools/perf/Documentation/ |
| D | callchain-overhead-calculation.txt | 24 void foo(void) { 30 foo(); 39 In this case 'foo' is a child of 'bar', and 'bar' is an immediate 40 child of 'main' so 'foo' also is a child of 'main'. In other words, 41 'main' is a parent of 'foo' and 'bar', and 'bar' is a parent of 'foo'. 43 Suppose all samples are recorded in 'foo' and 'bar' only. When it's 50 60.00% foo 52 --- foo 65 child functions (i.e. 'foo' and 'bar') are added to the parents to 87 60.00% 60.00% foo [all …]
|
| /tools/testing/selftests/damon/ |
| D | debugfs_duplicate_context_creation.sh | 9 if ! echo foo > "$DBGFS/mk_contexts" 15 if echo foo > "$DBGFS/mk_contexts" 21 if ! echo foo > "$DBGFS/rm_contexts"
|
| /tools/memory-model/Documentation/ |
| D | access-marking.txt | 243 For example, ASSERT_EXCLUSIVE_ACCESS(foo) tells KCSAN that any 244 concurrent access to variable foo by any other CPU is an error, even 246 ASSERT_EXCLUSIVE_WRITER(foo) tells KCSAN that although it is OK for there 247 to be concurrent reads from foo from other CPUs, it is an error for some 248 other CPU to be concurrently writing to foo, even if that concurrent 269 For example, suppose a shared variable "foo" is read only while a 274 int foo; 280 foo = newval; 291 ret = foo; 298 pr_info("Current value of foo: %d\n", data_race(foo)); [all …]
|
| /tools/bootconfig/samples/ |
| D | bad-samekey.bconf | 3 foo = value 6 key.foo = value
|
| /tools/testing/selftests/net/ |
| D | unicast_extensions.sh | 60 ip -n $foo_ns address add $1/$3 dev foo || return 1 61 ip -n $foo_ns link set foo up || return 1 88 ip -n $foo_ns address add $1/$5 dev foo || return 1 89 ip -n $foo_ns link set foo up || return 1 117 ip link add foo netns $foo_ns type veth peer name bar netns $bar_ns 141 ip link add foo netns $foo_ns type veth peer name foo1 netns $router_ns
|