/tools/testing/selftests/bpf/map_tests/ |
D | htab_map_batch_ops.c | 13 static void map_batch_update(int map_fd, __u32 max_entries, int *keys, in map_batch_update() argument 27 for (i = 0; i < max_entries; i++) { in map_batch_update() 36 err = bpf_map_update_batch(map_fd, keys, values, &max_entries, &opts); in map_batch_update() 40 static void map_batch_verify(int *visited, __u32 max_entries, in map_batch_verify() argument 50 memset(visited, 0, max_entries * sizeof(*visited)); in map_batch_verify() 51 for (i = 0; i < max_entries; i++) { in map_batch_verify() 70 for (i = 0; i < max_entries; i++) { in map_batch_verify() 81 const __u32 max_entries = 10; in __test_map_lookup_and_delete_batch() local 82 value pcpu_values[max_entries]; in __test_map_lookup_and_delete_batch() 98 xattr.max_entries = max_entries; in __test_map_lookup_and_delete_batch() [all …]
|
D | lpm_trie_map_batch_ops.c | 21 static void map_batch_update(int map_fd, __u32 max_entries, in map_batch_update() argument 32 for (i = 0; i < max_entries; i++) { in map_batch_update() 39 err = bpf_map_update_batch(map_fd, keys, values, &max_entries, &opts); in map_batch_update() 43 static void map_batch_verify(int *visited, __u32 max_entries, in map_batch_verify() argument 50 memset(visited, 0, max_entries * sizeof(*visited)); in map_batch_verify() 51 for (i = 0; i < max_entries; i++) { in map_batch_verify() 59 for (i = 0; i < max_entries; i++) { in map_batch_verify() 77 const __u32 max_entries = 10; in test_lpm_trie_map_batch_ops() local 85 xattr.max_entries = max_entries; in test_lpm_trie_map_batch_ops() 90 keys = malloc(max_entries * sizeof(struct test_lpm_key)); in test_lpm_trie_map_batch_ops() [all …]
|
D | array_map_batch_ops.c | 14 static void map_batch_update(int map_fd, __u32 max_entries, int *keys, in map_batch_update() argument 24 for (i = 0; i < max_entries; i++) { in map_batch_update() 35 err = bpf_map_update_batch(map_fd, keys, values, &max_entries, &opts); in map_batch_update() 39 static void map_batch_verify(int *visited, __u32 max_entries, int *keys, in map_batch_verify() argument 45 memset(visited, 0, max_entries * sizeof(*visited)); in map_batch_verify() 46 for (i = 0; i < max_entries; i++) { in map_batch_verify() 63 for (i = 0; i < max_entries; i++) { in map_batch_verify() 80 const __u32 max_entries = 10; in __test_map_lookup_and_update_batch() local 89 xattr.max_entries = max_entries; in __test_map_lookup_and_update_batch() 98 keys = calloc(max_entries, sizeof(*keys)); in __test_map_lookup_and_update_batch() [all …]
|
/tools/testing/selftests/bpf/progs/ |
D | test_btf_map_in_map.c | 8 __uint(max_entries, 1); 16 __uint(max_entries, 2); 23 __uint(max_entries, 3); 31 __uint(max_entries, 1); 47 __uint(max_entries, 3); 56 __uint(max_entries, 5); 63 __uint(max_entries, 3); 69 __uint(max_entries, 1); 83 __uint(max_entries, 5); 99 __uint(max_entries, 1); [all …]
|
D | map_ptr_kern.c | 34 __u32 max_entries; member 39 __u32 value_size, __u32 max_entries) in check_bpf_map_fields() argument 44 VERIFY(map->max_entries == max_entries); in check_bpf_map_fields() 56 VERIFY(indirect->max_entries == direct->max_entries); in check_bpf_map_ptr() 63 __u32 key_size, __u32 value_size, __u32 max_entries) in check() argument 67 max_entries)); in check() 101 __uint(max_entries, MAX_ENTRIES); 137 __uint(max_entries, MAX_ENTRIES); 152 for (i = 0; i < array->map.max_entries && i < LOOP_BOUND; ++i) { in check_array() 169 __uint(max_entries, MAX_ENTRIES); [all …]
|
D | lsm.c | 14 __uint(max_entries, 1); 21 __uint(max_entries, 1); 28 __uint(max_entries, 1); 35 __uint(max_entries, 1); 42 __uint(max_entries, 1); 49 __uint(max_entries, 1); 56 __uint(max_entries, 1); 63 __uint(max_entries, 1); 73 __uint(max_entries, 1);
|
D | sockmap_verdict_prog.c | 9 __uint(max_entries, 20); 16 __uint(max_entries, 20); 23 __uint(max_entries, 20); 30 __uint(max_entries, 20);
|
D | test_pinning.c | 10 __uint(max_entries, 1); 18 __uint(max_entries, 1); 25 __uint(max_entries, 1);
|
D | test_stacktrace_map.c | 13 __uint(max_entries, 1); 20 __uint(max_entries, 16384); 29 __uint(max_entries, 16384); 36 __uint(max_entries, 16384);
|
D | test_stacktrace_build_id.c | 13 __uint(max_entries, 1); 20 __uint(max_entries, 16384); 29 __uint(max_entries, 128); 37 __uint(max_entries, 128);
|
D | test_sockmap_update.c | 8 __uint(max_entries, 1); 15 __uint(max_entries, 1); 22 __uint(max_entries, 1);
|
D | bpf_iter_sockmap.c | 13 __uint(max_entries, 64); 20 __uint(max_entries, 64); 27 __uint(max_entries, 64);
|
D | linked_maps1.c | 15 __uint(max_entries, 16); 22 __uint(max_entries, 8); 37 __uint(max_entries, 16);
|
D | linked_maps2.c | 13 __uint(max_entries, 16); 23 __uint(max_entries, 8); 31 __uint(max_entries, 16);
|
D | test_map_in_map_invalid.c | 10 __uint(max_entries, 4); 15 __uint(max_entries, 0); /* This will make map creation to fail */
|
D | sample_map_ret0.c | 9 .max_entries = 2, 16 .max_entries = 2,
|
D | recursion.c | 12 __uint(max_entries, 1); 19 __uint(max_entries, 1);
|
D | test_pe_preserve_elems.c | 9 __uint(max_entries, 1); 16 __uint(max_entries, 1);
|
D | test_select_reuseport_kern.c | 26 __uint(max_entries, 1); 33 __uint(max_entries, NR_RESULTS); 40 __uint(max_entries, 1); 47 __uint(max_entries, 1); 54 __uint(max_entries, 1);
|
D | xdp_redirect_multi_kern.c | 19 __uint(max_entries, 1024); 26 __uint(max_entries, 128); 34 __uint(max_entries, 128);
|
D | bpf_iter_bpf_hash_map.c | 16 __uint(max_entries, 3); 23 __uint(max_entries, 3); 30 __uint(max_entries, 3);
|
/tools/testing/selftests/bpf/prog_tests/ |
D | for_each.c | 12 int i, err, hashmap_fd, max_entries, percpu_map_fd; in test_hash_map() local 23 max_entries = bpf_map__max_entries(skel->maps.hashmap); in test_hash_map() 24 for (i = 0; i < max_entries; i++) { in test_hash_map() 53 ASSERT_EQ(skel->bss->hashmap_elems, max_entries, "hashmap_elems"); in test_hash_map() 72 __u32 key, num_cpus, max_entries, retval; in test_array_map() local 84 max_entries = bpf_map__max_entries(skel->maps.arraymap); in test_array_map() 85 for (i = 0; i < max_entries; i++) { in test_array_map() 89 if (i != max_entries - 1) in test_array_map()
|
D | btf.c | 75 __u32 max_entries; member 143 .max_entries = 4, 198 .max_entries = 4, 223 .max_entries = 4, 264 .max_entries = 4, 309 .max_entries = 1, 331 .max_entries = 1, 353 .max_entries = 1, 375 .max_entries = 1, 400 .max_entries = 1, [all …]
|
/tools/lib/perf/ |
D | cpumap.c | 109 int max_entries = 0; in perf_cpu_map__read() local 125 if (new_max >= max_entries) { in perf_cpu_map__read() 126 max_entries = new_max + MAX_NR_CPUS / 2; in perf_cpu_map__read() 127 tmp = realloc(tmp_cpus, max_entries * sizeof(int)); in perf_cpu_map__read() 136 if (nr_cpus == max_entries) { in perf_cpu_map__read() 137 max_entries += MAX_NR_CPUS; in perf_cpu_map__read() 138 tmp = realloc(tmp_cpus, max_entries * sizeof(int)); in perf_cpu_map__read() 183 int max_entries = 0; in perf_cpu_map__new() local 226 if (nr_cpus == max_entries) { in perf_cpu_map__new() 227 max_entries += MAX_NR_CPUS; in perf_cpu_map__new() [all …]
|
/tools/include/linux/ |
D | stacktrace.h | 8 unsigned int nr_entries, max_entries; member 20 backtrace((void **)(trace)->entries, (trace)->max_entries))
|