/tools/testing/selftests/net/ |
D | psock_tpacket.c | 66 struct ring { struct 71 void (*walk)(int sock, struct ring *ring); argument 220 static void walk_v1_v2_rx(int sock, struct ring *ring) in walk_v1_v2_rx() argument 227 bug_on(ring->type != PACKET_RX_RING); in walk_v1_v2_rx() 239 while (__v1_v2_rx_kernel_ready(ring->rd[frame_num].iov_base, in walk_v1_v2_rx() 240 ring->version)) { in walk_v1_v2_rx() 241 ppd.raw = ring->rd[frame_num].iov_base; in walk_v1_v2_rx() 243 switch (ring->version) { in walk_v1_v2_rx() 260 __v1_v2_rx_user_ready(ppd.raw, ring->version); in walk_v1_v2_rx() 262 frame_num = (frame_num + 1) % ring->rd_num; in walk_v1_v2_rx() [all …]
|
D | txring_overwrite.c | 86 static int setup_tx(char **ring) in setup_tx() argument 114 *ring = mmap(0, req.tp_block_size * req.tp_block_nr, in setup_tx() 116 if (*ring == MAP_FAILED) in setup_tx() 161 char *ring; in main() local 165 fdt = setup_tx(&ring); in main() 167 send_pkt(fdt, ring, payload_patterns[0]); in main() 168 send_pkt(fdt, ring, payload_patterns[1]); in main()
|
D | toeplitz.c | 202 static char *recv_frame(const struct ring_state *ring, char *frame) in recv_frame() argument 208 ring->cpu); in recv_frame() 216 static bool recv_block(struct ring_state *ring) in recv_block() argument 222 block = (void *)(ring->mmap + ring->idx * ring_block_sz); in recv_block() 230 frame = recv_frame(ring, frame); in recv_block() 235 ring->idx = (ring->idx + 1) % ring_block_nr; in recv_block() 258 void *ring; in setup_ring() local 276 ring = mmap(0, req3.tp_block_size * req3.tp_block_nr, in setup_ring() 279 if (ring == MAP_FAILED) in setup_ring() 282 return ring; in setup_ring() [all …]
|
D | psock_fanout.c | 197 char *ring; in sock_fanout_open_ring() local 211 ring = mmap(0, req.tp_block_size * req.tp_block_nr, in sock_fanout_open_ring() 213 if (ring == MAP_FAILED) { in sock_fanout_open_ring() 218 return ring; in sock_fanout_open_ring() 221 static int sock_fanout_read_ring(int fd, void *ring) in sock_fanout_read_ring() argument 223 struct tpacket2_hdr *header = ring; in sock_fanout_read_ring() 228 header = ring + (count * getpagesize()); in sock_fanout_read_ring()
|
/tools/virtio/ringtest/ |
D | virtio_ring_0_9.c | 22 struct vring ring; variable 76 vring_init(&ring, ring_size, p, 0x1000); in alloc_ring() 86 ring.desc[i].next = i + 1; in alloc_ring() 117 desc = ring.desc; in add_inbuf() 136 ring.avail->ring[avail & (ring_size - 1)] = in add_inbuf() 143 ring.avail->ring[avail] = head; in add_inbuf() 148 ring.avail->idx = guest.avail_idx; in add_inbuf() 160 index = ring.used->ring[head].id; in get_buf() 167 if (ring.used->idx == guest.last_used_idx) in get_buf() 176 index = ring.used->ring[head].id; in get_buf() [all …]
|
D | ring.c | 56 struct desc *ring; variable 82 ret = posix_memalign((void **)&ring, 0x1000, ring_size * sizeof *ring); in alloc_ring() 101 ring[i] = desc; in alloc_ring() 125 ring[head].addr = (unsigned long)(void*)buf; in add_inbuf() 126 ring[head].len = len; in add_inbuf() 133 index = ring[head].index; in add_inbuf() 138 ring[head].flags = DESC_HW; in add_inbuf() 149 if (ring[head].flags & DESC_HW) in get_buf() 153 *lenp = ring[head].len; in get_buf() 154 index = ring[head].index & (ring_size - 1); in get_buf() [all …]
|
D | Makefile | 4 all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder ptr_ring noring 11 ring.o: ring.c main.h 16 ring: ring.o main.o target 24 -rm ring.o ring
|
D | README | 1 Partial implementation of various ring layouts, useful to tune virtio design. 6 # sh run-on-all.sh perf stat -r 10 --log-fd 1 -- ./ring
|
/tools/io_uring/ |
D | io_uring-cp.c | 34 static int setup_context(unsigned entries, struct io_uring *ring) in setup_context() argument 38 ret = io_uring_queue_init(entries, ring, 0); in setup_context() 69 static void queue_prepped(struct io_uring *ring, struct io_data *data) in queue_prepped() argument 73 sqe = io_uring_get_sqe(ring); in queue_prepped() 84 static int queue_read(struct io_uring *ring, off_t size, off_t offset) in queue_read() argument 93 sqe = io_uring_get_sqe(ring); in queue_read() 111 static void queue_write(struct io_uring *ring, struct io_data *data) in queue_write() argument 119 queue_prepped(ring, data); in queue_write() 120 io_uring_submit(ring); in queue_write() 123 static int copy_file(struct io_uring *ring, off_t insize) in copy_file() argument [all …]
|
D | queue.c | 11 static int __io_uring_get_cqe(struct io_uring *ring, in __io_uring_get_cqe() argument 14 struct io_uring_cq *cq = &ring->cq; in __io_uring_get_cqe() 36 ret = io_uring_enter(ring->ring_fd, 0, 1, in __io_uring_get_cqe() 49 int io_uring_peek_cqe(struct io_uring *ring, struct io_uring_cqe **cqe_ptr) in io_uring_peek_cqe() argument 51 return __io_uring_get_cqe(ring, cqe_ptr, 0); in io_uring_peek_cqe() 58 int io_uring_wait_cqe(struct io_uring *ring, struct io_uring_cqe **cqe_ptr) in io_uring_wait_cqe() argument 60 return __io_uring_get_cqe(ring, cqe_ptr, 1); in io_uring_wait_cqe() 68 int io_uring_submit(struct io_uring *ring) in io_uring_submit() argument 70 struct io_uring_sq *sq = &ring->sq; in io_uring_submit() 126 ret = io_uring_enter(ring->ring_fd, submitted, 0, in io_uring_submit() [all …]
|
D | setup.c | 64 int io_uring_queue_mmap(int fd, struct io_uring_params *p, struct io_uring *ring) in io_uring_queue_mmap() argument 68 memset(ring, 0, sizeof(*ring)); in io_uring_queue_mmap() 69 ret = io_uring_mmap(fd, p, &ring->sq, &ring->cq); in io_uring_queue_mmap() 71 ring->ring_fd = fd; in io_uring_queue_mmap() 79 int io_uring_queue_init(unsigned entries, struct io_uring *ring, unsigned flags) in io_uring_queue_init() argument 91 ret = io_uring_queue_mmap(fd, &p, ring); in io_uring_queue_init() 98 void io_uring_queue_exit(struct io_uring *ring) in io_uring_queue_exit() argument 100 struct io_uring_sq *sq = &ring->sq; in io_uring_queue_exit() 101 struct io_uring_cq *cq = &ring->cq; in io_uring_queue_exit() 106 close(ring->ring_fd); in io_uring_queue_exit()
|
D | liburing.h | 64 extern int io_uring_queue_init(unsigned entries, struct io_uring *ring, 67 struct io_uring *ring); 68 extern void io_uring_queue_exit(struct io_uring *ring); 69 extern int io_uring_peek_cqe(struct io_uring *ring, 71 extern int io_uring_wait_cqe(struct io_uring *ring, 73 extern int io_uring_submit(struct io_uring *ring); 74 extern struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring); 80 static inline void io_uring_cqe_seen(struct io_uring *ring, in io_uring_cqe_seen() argument 84 struct io_uring_cq *cq = &ring->cq; in io_uring_cqe_seen()
|
D | io_uring-bench.c | 196 struct io_sq_ring *ring = &s->sq_ring; in prep_more_ios() local 199 next_tail = tail = *ring->tail; in prep_more_ios() 203 if (next_tail == *ring->head) in prep_more_ios() 208 ring->array[index] = index; in prep_more_ios() 213 if (*ring->tail != tail) { in prep_more_ios() 216 *ring->tail = tail; in prep_more_ios() 246 struct io_cq_ring *ring = &s->cq_ring; in reap_events() local 250 head = *ring->head; in reap_events() 255 if (head == *ring->tail) in reap_events() 257 cqe = &ring->cqes[head & cq_ring_mask]; in reap_events() [all …]
|
/tools/lib/bpf/ |
D | ringbuf.c | 24 struct ring { struct 36 struct ring *rings; argument 42 static void ringbuf_unmap_ring(struct ring_buffer *rb, struct ring *r) in ringbuf_unmap_ring() 61 struct ring *r; in ring_buffer__add() 209 static int64_t ringbuf_process_ring(struct ring* r) in ringbuf_process_ring() 263 struct ring *ring = &rb->rings[i]; in ring_buffer__consume() local 265 err = ringbuf_process_ring(ring); in ring_buffer__consume() 290 struct ring *ring = &rb->rings[ring_id]; in ring_buffer__poll() local 292 err = ringbuf_process_ring(ring); in ring_buffer__poll()
|
D | xsk.h | 100 void *ring; \ 117 __u64 *addrs = (__u64 *)fill->ring; in xsk_ring_prod__fill_addr() 125 const __u64 *addrs = (const __u64 *)comp->ring; in xsk_ring_cons__comp_addr() 133 struct xdp_desc *descs = (struct xdp_desc *)tx->ring; in xsk_ring_prod__tx_desc() 141 const struct xdp_desc *descs = (const struct xdp_desc *)rx->ring; in xsk_ring_cons__rx_desc()
|
D | xsk.c | 259 fill->ring = map + off.fr.desc; in xsk_create_umem_rings() 275 comp->ring = map + off.cr.desc; in xsk_create_umem_rings() 929 munmap(ctx->fill->ring - off.fr.desc, off.fr.desc + umem->config.fill_size * in xsk_put_ctx() 931 munmap(ctx->comp->ring - off.cr.desc, off.cr.desc + umem->config.comp_size * in xsk_put_ctx() 1121 rx->ring = rx_map + off.rx.desc; in xsk_socket__create_shared() 1142 tx->ring = tx_map + off.tx.desc; in xsk_socket__create_shared() 1222 munmap(umem->fill_save->ring - off.fr.desc, in xsk_umem__delete() 1224 munmap(umem->comp_save->ring - off.cr.desc, in xsk_umem__delete() 1260 munmap(xsk->rx->ring - off.rx.desc, in xsk_socket__delete() 1264 munmap(xsk->tx->ring - off.tx.desc, in xsk_socket__delete()
|
/tools/testing/selftests/bpf/prog_tests/ |
D | ringbuf_multi.c | 18 int ring = (unsigned long)ctx; in process_sample() local 23 CHECK(ring != 1, "sample1_ring", "exp %d, got %d\n", 1, ring); in process_sample() 28 CHECK(ring != 2, "sample2_ring", "exp %d, got %d\n", 2, ring); in process_sample()
|
/tools/testing/selftests/drivers/net/netdevsim/ |
D | ethtool-ring.sh | 21 if ! ethtool -h | grep -q set-ring >/dev/null; then 52 RING_MAX_LIST=$(ls $NSIM_DEV_DFS/ethtool/ring/) 55 echo $MAX_VALUE > $NSIM_DEV_DFS/ethtool/ring/$ring_max_entry
|
/tools/virtio/ |
D | virtio_test.c | 32 void *ring; member 103 memset(info->ring, 0, vring_size(num, 4096)); in vq_reset() 104 vring_init(&info->vring, num, info->ring, 4096); in vq_reset() 118 r = posix_memalign(&info->ring, 4096, vring_size(num, 4096)); in vq_info_add()
|
/tools/bpf/bpftool/Documentation/ |
D | bpftool-map.rst | 128 install perf ring for each CPU in the corresponding index 131 If **cpu** and **index** are specified, install perf ring 132 for given **cpu** at **index** in the array (single ring). 134 Note that installing a perf ring into an array will silently 135 replace any existing ring. Any other application will stop
|
/tools/lib/perf/Documentation/ |
D | libperf-sampling.txt | 144 Once the events list is open, we can create memory maps AKA perf ring buffers: 160 We will sleep for 3 seconds while the ring buffers get data from all CPUs, then we disable the even… 169 Following code walks through the ring buffers and reads stored events/samples:
|
D | libperf.txt | 153 *API to handle maps (perf ring buffers):* 231 struct perf_mmap:: Provides an abstraction for accessing perf ring buffer.
|
/tools/perf/ |
D | design.txt | 192 Such (and other) events will be recorded in a ring-buffer, which is 232 these events are recorded in the ring-buffer (see below). 235 This too is recorded in the ring-buffer (see below). 283 tracking are logged into a ring-buffer. This ring-buffer is created and 288 as where the ring-buffer head is. 335 The following 2^n pages are the ring-buffer which contains events of the form: 410 Future work will include a splice() interface to the ring-buffer.
|
/tools/testing/selftests/livepatch/ |
D | README | 39 check_result(). The latter function greps the kernel's ring buffer for
|
/tools/virtio/virtio-trace/ |
D | README | 6 - splice a page of ring-buffer to read_pipe without memory copying 18 5) The read/write threads start to read trace data from ring-buffers and
|