1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* XDP user-space ring structure
3 * Copyright(c) 2018 Intel Corporation.
4 */
5
6 #ifndef _LINUX_XSK_QUEUE_H
7 #define _LINUX_XSK_QUEUE_H
8
9 #include <linux/types.h>
10 #include <linux/if_xdp.h>
11 #include <net/xdp_sock.h>
12 #include <net/xsk_buff_pool.h>
13
14 #include "xsk.h"
15
16 struct xdp_ring {
17 u32 producer ____cacheline_aligned_in_smp;
18 /* Hinder the adjacent cache prefetcher to prefetch the consumer
19 * pointer if the producer pointer is touched and vice versa.
20 */
21 u32 pad ____cacheline_aligned_in_smp;
22 u32 consumer ____cacheline_aligned_in_smp;
23 u32 flags;
24 };
25
26 /* Used for the RX and TX queues for packets */
27 struct xdp_rxtx_ring {
28 struct xdp_ring ptrs;
29 struct xdp_desc desc[] ____cacheline_aligned_in_smp;
30 };
31
32 /* Used for the fill and completion queues for buffers */
33 struct xdp_umem_ring {
34 struct xdp_ring ptrs;
35 u64 desc[] ____cacheline_aligned_in_smp;
36 };
37
38 struct xsk_queue {
39 u32 ring_mask;
40 u32 nentries;
41 u32 cached_prod;
42 u32 cached_cons;
43 struct xdp_ring *ring;
44 u64 invalid_descs;
45 u64 queue_empty_descs;
46 };
47
48 /* The structure of the shared state of the rings are the same as the
49 * ring buffer in kernel/events/ring_buffer.c. For the Rx and completion
50 * ring, the kernel is the producer and user space is the consumer. For
51 * the Tx and fill rings, the kernel is the consumer and user space is
52 * the producer.
53 *
54 * producer consumer
55 *
56 * if (LOAD ->consumer) { LOAD ->producer
57 * (A) smp_rmb() (C)
58 * STORE $data LOAD $data
59 * smp_wmb() (B) smp_mb() (D)
60 * STORE ->producer STORE ->consumer
61 * }
62 *
63 * (A) pairs with (D), and (B) pairs with (C).
64 *
65 * Starting with (B), it protects the data from being written after
66 * the producer pointer. If this barrier was missing, the consumer
67 * could observe the producer pointer being set and thus load the data
68 * before the producer has written the new data. The consumer would in
69 * this case load the old data.
70 *
71 * (C) protects the consumer from speculatively loading the data before
72 * the producer pointer actually has been read. If we do not have this
73 * barrier, some architectures could load old data as speculative loads
74 * are not discarded as the CPU does not know there is a dependency
75 * between ->producer and data.
76 *
77 * (A) is a control dependency that separates the load of ->consumer
78 * from the stores of $data. In case ->consumer indicates there is no
79 * room in the buffer to store $data we do not. So no barrier is needed.
80 *
81 * (D) protects the load of the data to be observed to happen after the
82 * store of the consumer pointer. If we did not have this memory
83 * barrier, the producer could observe the consumer pointer being set
84 * and overwrite the data with a new value before the consumer got the
85 * chance to read the old value. The consumer would thus miss reading
86 * the old entry and very likely read the new entry twice, once right
87 * now and again after circling through the ring.
88 */
89
90 /* The operations on the rings are the following:
91 *
92 * producer consumer
93 *
94 * RESERVE entries PEEK in the ring for entries
95 * WRITE data into the ring READ data from the ring
96 * SUBMIT entries RELEASE entries
97 *
98 * The producer reserves one or more entries in the ring. It can then
99 * fill in these entries and finally submit them so that they can be
100 * seen and read by the consumer.
101 *
102 * The consumer peeks into the ring to see if the producer has written
103 * any new entries. If so, the consumer can then read these entries
104 * and when it is done reading them release them back to the producer
105 * so that the producer can use these slots to fill in new entries.
106 *
107 * The function names below reflect these operations.
108 */
109
110 /* Functions that read and validate content from consumer rings. */
111
xskq_cons_read_addr_unchecked(struct xsk_queue * q,u64 * addr)112 static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
113 {
114 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
115
116 if (q->cached_cons != q->cached_prod) {
117 u32 idx = q->cached_cons & q->ring_mask;
118
119 *addr = ring->desc[idx];
120 return true;
121 }
122
123 return false;
124 }
125
xp_aligned_validate_desc(struct xsk_buff_pool * pool,struct xdp_desc * desc)126 static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
127 struct xdp_desc *desc)
128 {
129 u64 chunk, chunk_end;
130
131 chunk = xp_aligned_extract_addr(pool, desc->addr);
132 if (likely(desc->len)) {
133 chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len - 1);
134 if (chunk != chunk_end)
135 return false;
136 }
137
138 if (chunk >= pool->addrs_cnt)
139 return false;
140
141 if (desc->options)
142 return false;
143 return true;
144 }
145
xp_unaligned_validate_desc(struct xsk_buff_pool * pool,struct xdp_desc * desc)146 static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool,
147 struct xdp_desc *desc)
148 {
149 u64 addr, base_addr;
150
151 base_addr = xp_unaligned_extract_addr(desc->addr);
152 addr = xp_unaligned_add_offset_to_addr(desc->addr);
153
154 if (desc->len > pool->chunk_size)
155 return false;
156
157 if (base_addr >= pool->addrs_cnt || addr >= pool->addrs_cnt ||
158 addr + desc->len > pool->addrs_cnt ||
159 xp_desc_crosses_non_contig_pg(pool, addr, desc->len))
160 return false;
161
162 if (desc->options)
163 return false;
164 return true;
165 }
166
xp_validate_desc(struct xsk_buff_pool * pool,struct xdp_desc * desc)167 static inline bool xp_validate_desc(struct xsk_buff_pool *pool,
168 struct xdp_desc *desc)
169 {
170 return pool->unaligned ? xp_unaligned_validate_desc(pool, desc) :
171 xp_aligned_validate_desc(pool, desc);
172 }
173
xskq_cons_is_valid_desc(struct xsk_queue * q,struct xdp_desc * d,struct xsk_buff_pool * pool)174 static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q,
175 struct xdp_desc *d,
176 struct xsk_buff_pool *pool)
177 {
178 if (!xp_validate_desc(pool, d)) {
179 q->invalid_descs++;
180 return false;
181 }
182 return true;
183 }
184
xskq_cons_read_desc(struct xsk_queue * q,struct xdp_desc * desc,struct xsk_buff_pool * pool)185 static inline bool xskq_cons_read_desc(struct xsk_queue *q,
186 struct xdp_desc *desc,
187 struct xsk_buff_pool *pool)
188 {
189 while (q->cached_cons != q->cached_prod) {
190 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
191 u32 idx = q->cached_cons & q->ring_mask;
192
193 *desc = ring->desc[idx];
194 if (xskq_cons_is_valid_desc(q, desc, pool))
195 return true;
196
197 q->cached_cons++;
198 }
199
200 return false;
201 }
202
203 /* Functions for consumers */
204
__xskq_cons_release(struct xsk_queue * q)205 static inline void __xskq_cons_release(struct xsk_queue *q)
206 {
207 smp_mb(); /* D, matches A */
208 WRITE_ONCE(q->ring->consumer, q->cached_cons);
209 }
210
__xskq_cons_peek(struct xsk_queue * q)211 static inline void __xskq_cons_peek(struct xsk_queue *q)
212 {
213 /* Refresh the local pointer */
214 q->cached_prod = READ_ONCE(q->ring->producer);
215 smp_rmb(); /* C, matches B */
216 }
217
xskq_cons_get_entries(struct xsk_queue * q)218 static inline void xskq_cons_get_entries(struct xsk_queue *q)
219 {
220 __xskq_cons_release(q);
221 __xskq_cons_peek(q);
222 }
223
xskq_cons_has_entries(struct xsk_queue * q,u32 cnt)224 static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt)
225 {
226 u32 entries = q->cached_prod - q->cached_cons;
227
228 if (entries >= cnt)
229 return true;
230
231 __xskq_cons_peek(q);
232 entries = q->cached_prod - q->cached_cons;
233
234 return entries >= cnt;
235 }
236
xskq_cons_peek_addr_unchecked(struct xsk_queue * q,u64 * addr)237 static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr)
238 {
239 if (q->cached_prod == q->cached_cons)
240 xskq_cons_get_entries(q);
241 return xskq_cons_read_addr_unchecked(q, addr);
242 }
243
xskq_cons_peek_desc(struct xsk_queue * q,struct xdp_desc * desc,struct xsk_buff_pool * pool)244 static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
245 struct xdp_desc *desc,
246 struct xsk_buff_pool *pool)
247 {
248 if (q->cached_prod == q->cached_cons)
249 xskq_cons_get_entries(q);
250 return xskq_cons_read_desc(q, desc, pool);
251 }
252
xskq_cons_release(struct xsk_queue * q)253 static inline void xskq_cons_release(struct xsk_queue *q)
254 {
255 /* To improve performance, only update local state here.
256 * Reflect this to global state when we get new entries
257 * from the ring in xskq_cons_get_entries() and whenever
258 * Rx or Tx processing are completed in the NAPI loop.
259 */
260 q->cached_cons++;
261 }
262
xskq_cons_is_full(struct xsk_queue * q)263 static inline bool xskq_cons_is_full(struct xsk_queue *q)
264 {
265 /* No barriers needed since data is not accessed */
266 return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer) ==
267 q->nentries;
268 }
269
xskq_cons_present_entries(struct xsk_queue * q)270 static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
271 {
272 /* No barriers needed since data is not accessed */
273 return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer);
274 }
275
276 /* Functions for producers */
277
xskq_prod_is_full(struct xsk_queue * q)278 static inline bool xskq_prod_is_full(struct xsk_queue *q)
279 {
280 u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
281
282 if (free_entries)
283 return false;
284
285 /* Refresh the local tail pointer */
286 q->cached_cons = READ_ONCE(q->ring->consumer);
287 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
288
289 return !free_entries;
290 }
291
xskq_prod_cancel(struct xsk_queue * q)292 static inline void xskq_prod_cancel(struct xsk_queue *q)
293 {
294 q->cached_prod--;
295 }
296
xskq_prod_reserve(struct xsk_queue * q)297 static inline int xskq_prod_reserve(struct xsk_queue *q)
298 {
299 if (xskq_prod_is_full(q))
300 return -ENOSPC;
301
302 /* A, matches D */
303 q->cached_prod++;
304 return 0;
305 }
306
xskq_prod_reserve_addr(struct xsk_queue * q,u64 addr)307 static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
308 {
309 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
310
311 if (xskq_prod_is_full(q))
312 return -ENOSPC;
313
314 /* A, matches D */
315 ring->desc[q->cached_prod++ & q->ring_mask] = addr;
316 return 0;
317 }
318
xskq_prod_reserve_desc(struct xsk_queue * q,u64 addr,u32 len)319 static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
320 u64 addr, u32 len)
321 {
322 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
323 u32 idx;
324
325 if (xskq_prod_is_full(q))
326 return -ENOSPC;
327
328 /* A, matches D */
329 idx = q->cached_prod++ & q->ring_mask;
330 ring->desc[idx].addr = addr;
331 ring->desc[idx].len = len;
332
333 return 0;
334 }
335
__xskq_prod_submit(struct xsk_queue * q,u32 idx)336 static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx)
337 {
338 smp_wmb(); /* B, matches C */
339
340 WRITE_ONCE(q->ring->producer, idx);
341 }
342
xskq_prod_submit(struct xsk_queue * q)343 static inline void xskq_prod_submit(struct xsk_queue *q)
344 {
345 __xskq_prod_submit(q, q->cached_prod);
346 }
347
xskq_prod_submit_addr(struct xsk_queue * q,u64 addr)348 static inline void xskq_prod_submit_addr(struct xsk_queue *q, u64 addr)
349 {
350 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
351 u32 idx = q->ring->producer;
352
353 ring->desc[idx++ & q->ring_mask] = addr;
354
355 __xskq_prod_submit(q, idx);
356 }
357
xskq_prod_submit_n(struct xsk_queue * q,u32 nb_entries)358 static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries)
359 {
360 __xskq_prod_submit(q, q->ring->producer + nb_entries);
361 }
362
xskq_prod_is_empty(struct xsk_queue * q)363 static inline bool xskq_prod_is_empty(struct xsk_queue *q)
364 {
365 /* No barriers needed since data is not accessed */
366 return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer);
367 }
368
369 /* For both producers and consumers */
370
xskq_nb_invalid_descs(struct xsk_queue * q)371 static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
372 {
373 return q ? q->invalid_descs : 0;
374 }
375
xskq_nb_queue_empty_descs(struct xsk_queue * q)376 static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q)
377 {
378 return q ? q->queue_empty_descs : 0;
379 }
380
381 struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
382 void xskq_destroy(struct xsk_queue *q_ops);
383
384 #endif /* _LINUX_XSK_QUEUE_H */
385