| /tools/testing/selftests/bpf/progs/ |
| D | test_bpf_ma.c | 56 static __always_inline void batch_alloc(struct bpf_map *map, unsigned int batch, unsigned int idx) in batch_alloc() argument 62 for (i = 0; i < batch; i++) { in batch_alloc() 83 static __always_inline void batch_free(struct bpf_map *map, unsigned int batch, unsigned int idx) in batch_free() argument 89 for (i = 0; i < batch; i++) { in batch_free() 105 static __always_inline void batch_percpu_alloc(struct bpf_map *map, unsigned int batch, in batch_percpu_alloc() argument 112 for (i = 0; i < batch; i++) { in batch_percpu_alloc() 133 static __always_inline void batch_percpu_free(struct bpf_map *map, unsigned int batch, in batch_percpu_free() argument 140 for (i = 0; i < batch; i++) { in batch_percpu_free() 154 #define CALL_BATCH_ALLOC(size, batch, idx) \ argument 155 batch_alloc((struct bpf_map *)(&array_##size), batch, idx) [all …]
|
| /tools/testing/selftests/bpf/map_tests/ |
| D | htab_map_batch_ops.c | 79 __u32 batch, count, total, total_success; in __test_map_lookup_and_delete_batch() local 109 err = bpf_map_lookup_and_delete_batch(map_fd, NULL, &batch, keys, in __test_map_lookup_and_delete_batch() 119 err = bpf_map_lookup_and_delete_batch(map_fd, NULL, &batch, keys, in __test_map_lookup_and_delete_batch() 127 err = bpf_map_lookup_and_delete_batch(map_fd, NULL, &batch, keys, in __test_map_lookup_and_delete_batch() 153 total ? &batch : NULL, in __test_map_lookup_and_delete_batch() 154 &batch, keys + total, in __test_map_lookup_and_delete_batch() 216 total ? &batch : NULL, in __test_map_lookup_and_delete_batch() 217 &batch, keys + total, in __test_map_lookup_and_delete_batch()
|
| D | array_map_batch_ops.c | 75 __u64 batch = 0; in __test_map_lookup_and_update_batch() local 105 batch = 0; in __test_map_lookup_and_update_batch() 113 total ? &batch : NULL, in __test_map_lookup_and_update_batch() 114 &batch, keys + total, in __test_map_lookup_and_update_batch()
|
| D | lpm_trie_map_batch_ops.c | 73 __u64 batch = 0; in test_lpm_trie_map_batch_ops() local 98 batch = 0; in test_lpm_trie_map_batch_ops() 106 total ? &batch : NULL, &batch, in test_lpm_trie_map_batch_ops()
|
| D | map_percpu_stats.c | 89 static void delete_all_elements(__u32 type, int map_fd, bool batch) in delete_all_elements() argument 103 if (batch) { in delete_all_elements()
|
| /tools/virtio/ |
| D | virtio_test.c | 170 bool delayed, int batch, int reset_n, int bufs) in run_test() argument 178 const bool random_batch = batch == RANDOM_BATCH; in run_test() 193 batch = (random() % vq->vring.num) + 1; in run_test() 196 (started - completed) < batch) { in run_test() 349 long batch = 1, reset = 0; in main() local 376 batch = RANDOM_BATCH; in main() 378 batch = strtol(optarg, NULL, 10); in main() 379 assert(batch > 0); in main() 380 assert(batch < (long)INT_MAX + 1); in main() 401 run_test(&dev, &dev.vqs[0], delayed, batch, reset, 0x100000); in main()
|
| /tools/virtio/ringtest/ |
| D | main.c | 22 int batch = 1; variable 116 int tokick = batch; in run_guest() 129 tokick = batch; in run_guest() 348 batch = c; in main() 372 if (batch > max_outstanding) in main() 373 batch = max_outstanding; in main()
|
| D | ptr_ring.c | 124 array.batch = param; in alloc_ring()
|
| /tools/testing/selftests/drivers/net/mlxsw/ |
| D | fib_offload.sh | 284 >> $batch_dir/add.batch 286 >> $batch_dir/del.batch 291 ip -batch $batch_dir/add.batch 307 ip -batch $batch_dir/del.batch
|
| /tools/testing/radix-tree/ |
| D | test.c | 174 unsigned batch, xa_mark_t iftag, xa_mark_t thentag) in tag_tagged_items() argument 180 if (batch == 0) in tag_tagged_items() 181 batch = 1; in tag_tagged_items() 186 if (++tagged % batch) in tag_tagged_items()
|
| D | test.h | 31 unsigned batch, xa_mark_t iftag, xa_mark_t thentag);
|
| /tools/kvm/kvm_stat/ |
| D | kvm_stat.txt | 23 Use batch and logging modes for scripting purposes. 64 --batch:: 65 run in batch mode for one second
|
| /tools/testing/selftests/net/packetdrill/ |
| D | tcp_zerocopy_batch.pkt | 2 // batch zerocopy test:
|
| /tools/testing/selftests/rcutorture/bin/ |
| D | kvm-test-1-run-batch.sh | 19 echo ---- Running batch $*
|
| D | torture.sh | 671 …echo Waiting for batch $batchno of $ncompresses compressions `date` | tee -a "$tdir/log-xz" | tee … 680 …echo Waiting for final batch $batchno of $ncompresses compressions `date` | tee -a "$tdir/log-xz" …
|
| /tools/testing/selftests/drivers/net/mlxsw/spectrum-2/ |
| D | tc_flower.sh | 668 batch="${batch}filter add dev $iface ingress protocol ipv6 pref 1 \ 689 batch="${batch}filter del dev $iface ingress protocol ipv6 pref 1 \ 751 declare batch="" 754 echo -n -e $batch | tc -b - 756 declare batch="" 784 echo -n -e $batch | tc -b - 806 declare batch="" 810 echo -e $batch | tc -b -
|
| /tools/testing/selftests/net/ |
| D | xfrm_policy_add_speed.sh | 59 if ! timeout "$timeout" ip netns exec "$ns" ip -batch "$tmp";then
|
| D | txtimestamp.c | 314 int batch = 0; in __recv_errmsg_cmsg() local 355 batch++; in __recv_errmsg_cmsg() 359 if (batch > 1) { in __recv_errmsg_cmsg() 360 fprintf(stderr, "batched %d timestamps\n", batch); in __recv_errmsg_cmsg() 361 } else if (!batch) { in __recv_errmsg_cmsg()
|
| D | xfrm_policy.sh | 167 done | ip -batch /dev/stdin 178 done | ip -batch /dev/stdin
|
| /tools/testing/selftests/bpf/ |
| D | verify_sig_setup.sh | 42 -batch -x509 -config ${tmp_dir}/x509.genkey \
|
| D | network_helpers.c | 632 char batch[1500]; in send_recv_server() local 649 nr_sent = send(fd, &batch, in send_recv_server() 650 MIN(a->bytes - bytes, sizeof(batch)), 0); in send_recv_server() 686 char batch[1500]; in send_recv_data() local 697 nr_recv = recv(fd, &batch, in send_recv_data() 698 MIN(total_bytes - bytes, sizeof(batch)), 0); in send_recv_data()
|
| /tools/lib/bpf/ |
| D | bpf.c | 528 const size_t attr_sz = offsetofend(union bpf_attr, batch); in bpf_map_batch_common() 536 attr.batch.map_fd = fd; in bpf_map_batch_common() 537 attr.batch.in_batch = ptr_to_u64(in_batch); in bpf_map_batch_common() 538 attr.batch.out_batch = ptr_to_u64(out_batch); in bpf_map_batch_common() 539 attr.batch.keys = ptr_to_u64(keys); in bpf_map_batch_common() 540 attr.batch.values = ptr_to_u64(values); in bpf_map_batch_common() 541 attr.batch.count = *count; in bpf_map_batch_common() 542 attr.batch.elem_flags = OPTS_GET(opts, elem_flags, 0); in bpf_map_batch_common() 543 attr.batch.flags = OPTS_GET(opts, flags, 0); in bpf_map_batch_common() 546 *count = attr.batch.count; in bpf_map_batch_common()
|
| /tools/mm/ |
| D | page-types.c | 673 unsigned long batch; in walk_pfn() local 688 batch = min_t(unsigned long, count, KPAGEFLAGS_BATCH); in walk_pfn() 689 pages = kpageflags_read(buf, index, batch); in walk_pfn() 732 unsigned long batch; in walk_vma() local 738 batch = min_t(unsigned long, count, PAGEMAP_BATCH); in walk_vma() 739 pages = pagemap_read(buf, index, batch); in walk_vma()
|
| /tools/bpf/bpftool/Documentation/ |
| D | bpftool.rst | 19 **bpftool** **batch file** *FILE*
|
| /tools/sched_ext/ |
| D | scx_qmap.bpf.c | 370 u32 zero = 0, batch = dsp_batch ?: 1; in BPF_STRUCT_OPS() local 437 batch--; in BPF_STRUCT_OPS() 439 if (!batch || !scx_bpf_dispatch_nr_slots()) { in BPF_STRUCT_OPS()
|