1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <net/xsk_buff_pool.h>
4 #include <net/xdp_sock.h>
5 #include <net/xdp_sock_drv.h>
6
7 #include "xsk_queue.h"
8 #include "xdp_umem.h"
9 #include "xsk.h"
10
xp_add_xsk(struct xsk_buff_pool * pool,struct xdp_sock * xs)11 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs)
12 {
13 unsigned long flags;
14
15 if (!xs->tx)
16 return;
17
18 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags);
19 list_add_rcu(&xs->tx_list, &pool->xsk_tx_list);
20 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags);
21 }
22
xp_del_xsk(struct xsk_buff_pool * pool,struct xdp_sock * xs)23 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs)
24 {
25 unsigned long flags;
26
27 if (!xs->tx)
28 return;
29
30 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags);
31 list_del_rcu(&xs->tx_list);
32 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags);
33 }
34
xp_destroy(struct xsk_buff_pool * pool)35 void xp_destroy(struct xsk_buff_pool *pool)
36 {
37 if (!pool)
38 return;
39
40 kvfree(pool->tx_descs);
41 kvfree(pool->heads);
42 kvfree(pool);
43 }
44
xp_alloc_tx_descs(struct xsk_buff_pool * pool,struct xdp_sock * xs)45 int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs)
46 {
47 pool->tx_descs = kvcalloc(xs->tx->nentries, sizeof(*pool->tx_descs),
48 GFP_KERNEL);
49 if (!pool->tx_descs)
50 return -ENOMEM;
51
52 return 0;
53 }
54
xp_create_and_assign_umem(struct xdp_sock * xs,struct xdp_umem * umem)55 struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
56 struct xdp_umem *umem)
57 {
58 bool unaligned = umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
59 struct xsk_buff_pool *pool;
60 struct xdp_buff_xsk *xskb;
61 u32 i, entries;
62
63 entries = unaligned ? umem->chunks : 0;
64 pool = kvzalloc(struct_size(pool, free_heads, entries), GFP_KERNEL);
65 if (!pool)
66 goto out;
67
68 pool->heads = kvcalloc(umem->chunks, sizeof(*pool->heads), GFP_KERNEL);
69 if (!pool->heads)
70 goto out;
71
72 if (xs->tx)
73 if (xp_alloc_tx_descs(pool, xs))
74 goto out;
75
76 pool->chunk_mask = ~((u64)umem->chunk_size - 1);
77 pool->addrs_cnt = umem->size;
78 pool->heads_cnt = umem->chunks;
79 pool->free_heads_cnt = umem->chunks;
80 pool->headroom = umem->headroom;
81 pool->chunk_size = umem->chunk_size;
82 pool->chunk_shift = ffs(umem->chunk_size) - 1;
83 pool->unaligned = unaligned;
84 pool->frame_len = umem->chunk_size - umem->headroom -
85 XDP_PACKET_HEADROOM;
86 pool->umem = umem;
87 pool->addrs = umem->addrs;
88 pool->tx_metadata_len = umem->tx_metadata_len;
89 pool->tx_sw_csum = umem->flags & XDP_UMEM_TX_SW_CSUM;
90 INIT_LIST_HEAD(&pool->free_list);
91 INIT_LIST_HEAD(&pool->xskb_list);
92 INIT_LIST_HEAD(&pool->xsk_tx_list);
93 spin_lock_init(&pool->xsk_tx_list_lock);
94 spin_lock_init(&pool->cq_lock);
95 refcount_set(&pool->users, 1);
96
97 pool->fq = xs->fq_tmp;
98 pool->cq = xs->cq_tmp;
99
100 for (i = 0; i < pool->free_heads_cnt; i++) {
101 xskb = &pool->heads[i];
102 xskb->pool = pool;
103 xskb->xdp.frame_sz = umem->chunk_size - umem->headroom;
104 INIT_LIST_HEAD(&xskb->free_list_node);
105 INIT_LIST_HEAD(&xskb->xskb_list_node);
106 if (pool->unaligned)
107 pool->free_heads[i] = xskb;
108 else
109 xp_init_xskb_addr(xskb, pool, (u64)i * pool->chunk_size);
110 }
111
112 return pool;
113
114 out:
115 xp_destroy(pool);
116 return NULL;
117 }
118
xp_set_rxq_info(struct xsk_buff_pool * pool,struct xdp_rxq_info * rxq)119 void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq)
120 {
121 u32 i;
122
123 for (i = 0; i < pool->heads_cnt; i++)
124 pool->heads[i].xdp.rxq = rxq;
125 }
126 EXPORT_SYMBOL(xp_set_rxq_info);
127
xp_fill_cb(struct xsk_buff_pool * pool,struct xsk_cb_desc * desc)128 void xp_fill_cb(struct xsk_buff_pool *pool, struct xsk_cb_desc *desc)
129 {
130 u32 i;
131
132 for (i = 0; i < pool->heads_cnt; i++) {
133 struct xdp_buff_xsk *xskb = &pool->heads[i];
134
135 memcpy(xskb->cb + desc->off, desc->src, desc->bytes);
136 }
137 }
138 EXPORT_SYMBOL(xp_fill_cb);
139
xp_disable_drv_zc(struct xsk_buff_pool * pool)140 static void xp_disable_drv_zc(struct xsk_buff_pool *pool)
141 {
142 struct netdev_bpf bpf;
143 int err;
144
145 ASSERT_RTNL();
146
147 if (pool->umem->zc) {
148 bpf.command = XDP_SETUP_XSK_POOL;
149 bpf.xsk.pool = NULL;
150 bpf.xsk.queue_id = pool->queue_id;
151
152 err = pool->netdev->netdev_ops->ndo_bpf(pool->netdev, &bpf);
153
154 if (err)
155 WARN(1, "Failed to disable zero-copy!\n");
156 }
157 }
158
159 #define NETDEV_XDP_ACT_ZC (NETDEV_XDP_ACT_BASIC | \
160 NETDEV_XDP_ACT_REDIRECT | \
161 NETDEV_XDP_ACT_XSK_ZEROCOPY)
162
xp_assign_dev(struct xsk_buff_pool * pool,struct net_device * netdev,u16 queue_id,u16 flags)163 int xp_assign_dev(struct xsk_buff_pool *pool,
164 struct net_device *netdev, u16 queue_id, u16 flags)
165 {
166 bool force_zc, force_copy;
167 struct netdev_bpf bpf;
168 int err = 0;
169
170 ASSERT_RTNL();
171
172 force_zc = flags & XDP_ZEROCOPY;
173 force_copy = flags & XDP_COPY;
174
175 if (force_zc && force_copy)
176 return -EINVAL;
177
178 if (xsk_get_pool_from_qid(netdev, queue_id))
179 return -EBUSY;
180
181 pool->netdev = netdev;
182 pool->queue_id = queue_id;
183 err = xsk_reg_pool_at_qid(netdev, pool, queue_id);
184 if (err)
185 return err;
186
187 if (flags & XDP_USE_SG)
188 pool->umem->flags |= XDP_UMEM_SG_FLAG;
189
190 if (flags & XDP_USE_NEED_WAKEUP)
191 pool->uses_need_wakeup = true;
192 /* Tx needs to be explicitly woken up the first time. Also
193 * for supporting drivers that do not implement this
194 * feature. They will always have to call sendto() or poll().
195 */
196 pool->cached_need_wakeup = XDP_WAKEUP_TX;
197
198 dev_hold(netdev);
199
200 if (force_copy)
201 /* For copy-mode, we are done. */
202 return 0;
203
204 if ((netdev->xdp_features & NETDEV_XDP_ACT_ZC) != NETDEV_XDP_ACT_ZC) {
205 err = -EOPNOTSUPP;
206 goto err_unreg_pool;
207 }
208
209 if (netdev->xdp_zc_max_segs == 1 && (flags & XDP_USE_SG)) {
210 err = -EOPNOTSUPP;
211 goto err_unreg_pool;
212 }
213
214 if (dev_get_min_mp_channel_count(netdev)) {
215 err = -EBUSY;
216 goto err_unreg_pool;
217 }
218
219 bpf.command = XDP_SETUP_XSK_POOL;
220 bpf.xsk.pool = pool;
221 bpf.xsk.queue_id = queue_id;
222
223 err = netdev->netdev_ops->ndo_bpf(netdev, &bpf);
224 if (err)
225 goto err_unreg_pool;
226
227 if (!pool->dma_pages) {
228 WARN(1, "Driver did not DMA map zero-copy buffers");
229 err = -EINVAL;
230 goto err_unreg_xsk;
231 }
232 pool->umem->zc = true;
233 return 0;
234
235 err_unreg_xsk:
236 xp_disable_drv_zc(pool);
237 err_unreg_pool:
238 if (!force_zc)
239 err = 0; /* fallback to copy mode */
240 if (err) {
241 xsk_clear_pool_at_qid(netdev, queue_id);
242 dev_put(netdev);
243 }
244 return err;
245 }
246
xp_assign_dev_shared(struct xsk_buff_pool * pool,struct xdp_sock * umem_xs,struct net_device * dev,u16 queue_id)247 int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
248 struct net_device *dev, u16 queue_id)
249 {
250 u16 flags;
251 struct xdp_umem *umem = umem_xs->umem;
252
253 /* One fill and completion ring required for each queue id. */
254 if (!pool->fq || !pool->cq)
255 return -EINVAL;
256
257 flags = umem->zc ? XDP_ZEROCOPY : XDP_COPY;
258 if (umem_xs->pool->uses_need_wakeup)
259 flags |= XDP_USE_NEED_WAKEUP;
260
261 return xp_assign_dev(pool, dev, queue_id, flags);
262 }
263
xp_clear_dev(struct xsk_buff_pool * pool)264 void xp_clear_dev(struct xsk_buff_pool *pool)
265 {
266 if (!pool->netdev)
267 return;
268
269 xp_disable_drv_zc(pool);
270 xsk_clear_pool_at_qid(pool->netdev, pool->queue_id);
271 dev_put(pool->netdev);
272 pool->netdev = NULL;
273 }
274
xp_release_deferred(struct work_struct * work)275 static void xp_release_deferred(struct work_struct *work)
276 {
277 struct xsk_buff_pool *pool = container_of(work, struct xsk_buff_pool,
278 work);
279
280 rtnl_lock();
281 xp_clear_dev(pool);
282 rtnl_unlock();
283
284 if (pool->fq) {
285 xskq_destroy(pool->fq);
286 pool->fq = NULL;
287 }
288
289 if (pool->cq) {
290 xskq_destroy(pool->cq);
291 pool->cq = NULL;
292 }
293
294 xdp_put_umem(pool->umem, false);
295 xp_destroy(pool);
296 }
297
xp_get_pool(struct xsk_buff_pool * pool)298 void xp_get_pool(struct xsk_buff_pool *pool)
299 {
300 refcount_inc(&pool->users);
301 }
302
xp_put_pool(struct xsk_buff_pool * pool)303 bool xp_put_pool(struct xsk_buff_pool *pool)
304 {
305 if (!pool)
306 return false;
307
308 if (refcount_dec_and_test(&pool->users)) {
309 INIT_WORK(&pool->work, xp_release_deferred);
310 schedule_work(&pool->work);
311 return true;
312 }
313
314 return false;
315 }
316
xp_find_dma_map(struct xsk_buff_pool * pool)317 static struct xsk_dma_map *xp_find_dma_map(struct xsk_buff_pool *pool)
318 {
319 struct xsk_dma_map *dma_map;
320
321 list_for_each_entry(dma_map, &pool->umem->xsk_dma_list, list) {
322 if (dma_map->netdev == pool->netdev)
323 return dma_map;
324 }
325
326 return NULL;
327 }
328
xp_create_dma_map(struct device * dev,struct net_device * netdev,u32 nr_pages,struct xdp_umem * umem)329 static struct xsk_dma_map *xp_create_dma_map(struct device *dev, struct net_device *netdev,
330 u32 nr_pages, struct xdp_umem *umem)
331 {
332 struct xsk_dma_map *dma_map;
333
334 dma_map = kzalloc(sizeof(*dma_map), GFP_KERNEL);
335 if (!dma_map)
336 return NULL;
337
338 dma_map->dma_pages = kvcalloc(nr_pages, sizeof(*dma_map->dma_pages), GFP_KERNEL);
339 if (!dma_map->dma_pages) {
340 kfree(dma_map);
341 return NULL;
342 }
343
344 dma_map->netdev = netdev;
345 dma_map->dev = dev;
346 dma_map->dma_pages_cnt = nr_pages;
347 refcount_set(&dma_map->users, 1);
348 list_add(&dma_map->list, &umem->xsk_dma_list);
349 return dma_map;
350 }
351
xp_destroy_dma_map(struct xsk_dma_map * dma_map)352 static void xp_destroy_dma_map(struct xsk_dma_map *dma_map)
353 {
354 list_del(&dma_map->list);
355 kvfree(dma_map->dma_pages);
356 kfree(dma_map);
357 }
358
__xp_dma_unmap(struct xsk_dma_map * dma_map,unsigned long attrs)359 static void __xp_dma_unmap(struct xsk_dma_map *dma_map, unsigned long attrs)
360 {
361 dma_addr_t *dma;
362 u32 i;
363
364 for (i = 0; i < dma_map->dma_pages_cnt; i++) {
365 dma = &dma_map->dma_pages[i];
366 if (*dma) {
367 *dma &= ~XSK_NEXT_PG_CONTIG_MASK;
368 dma_unmap_page_attrs(dma_map->dev, *dma, PAGE_SIZE,
369 DMA_BIDIRECTIONAL, attrs);
370 *dma = 0;
371 }
372 }
373
374 xp_destroy_dma_map(dma_map);
375 }
376
xp_dma_unmap(struct xsk_buff_pool * pool,unsigned long attrs)377 void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs)
378 {
379 struct xsk_dma_map *dma_map;
380
381 if (!pool->dma_pages)
382 return;
383
384 dma_map = xp_find_dma_map(pool);
385 if (!dma_map) {
386 WARN(1, "Could not find dma_map for device");
387 return;
388 }
389
390 if (refcount_dec_and_test(&dma_map->users))
391 __xp_dma_unmap(dma_map, attrs);
392
393 kvfree(pool->dma_pages);
394 pool->dma_pages = NULL;
395 pool->dma_pages_cnt = 0;
396 pool->dev = NULL;
397 }
398 EXPORT_SYMBOL(xp_dma_unmap);
399
xp_check_dma_contiguity(struct xsk_dma_map * dma_map)400 static void xp_check_dma_contiguity(struct xsk_dma_map *dma_map)
401 {
402 u32 i;
403
404 for (i = 0; i < dma_map->dma_pages_cnt - 1; i++) {
405 if (dma_map->dma_pages[i] + PAGE_SIZE == dma_map->dma_pages[i + 1])
406 dma_map->dma_pages[i] |= XSK_NEXT_PG_CONTIG_MASK;
407 else
408 dma_map->dma_pages[i] &= ~XSK_NEXT_PG_CONTIG_MASK;
409 }
410 }
411
xp_init_dma_info(struct xsk_buff_pool * pool,struct xsk_dma_map * dma_map)412 static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_map)
413 {
414 if (!pool->unaligned) {
415 u32 i;
416
417 for (i = 0; i < pool->heads_cnt; i++) {
418 struct xdp_buff_xsk *xskb = &pool->heads[i];
419
420 xp_init_xskb_dma(xskb, pool, dma_map->dma_pages, xskb->orig_addr);
421 }
422 }
423
424 pool->dma_pages = kvcalloc(dma_map->dma_pages_cnt, sizeof(*pool->dma_pages), GFP_KERNEL);
425 if (!pool->dma_pages)
426 return -ENOMEM;
427
428 pool->dev = dma_map->dev;
429 pool->dma_pages_cnt = dma_map->dma_pages_cnt;
430 memcpy(pool->dma_pages, dma_map->dma_pages,
431 pool->dma_pages_cnt * sizeof(*pool->dma_pages));
432
433 return 0;
434 }
435
xp_dma_map(struct xsk_buff_pool * pool,struct device * dev,unsigned long attrs,struct page ** pages,u32 nr_pages)436 int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
437 unsigned long attrs, struct page **pages, u32 nr_pages)
438 {
439 struct xsk_dma_map *dma_map;
440 dma_addr_t dma;
441 int err;
442 u32 i;
443
444 dma_map = xp_find_dma_map(pool);
445 if (dma_map) {
446 err = xp_init_dma_info(pool, dma_map);
447 if (err)
448 return err;
449
450 refcount_inc(&dma_map->users);
451 return 0;
452 }
453
454 dma_map = xp_create_dma_map(dev, pool->netdev, nr_pages, pool->umem);
455 if (!dma_map)
456 return -ENOMEM;
457
458 for (i = 0; i < dma_map->dma_pages_cnt; i++) {
459 dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
460 DMA_BIDIRECTIONAL, attrs);
461 if (dma_mapping_error(dev, dma)) {
462 __xp_dma_unmap(dma_map, attrs);
463 return -ENOMEM;
464 }
465 dma_map->dma_pages[i] = dma;
466 }
467
468 if (pool->unaligned)
469 xp_check_dma_contiguity(dma_map);
470
471 err = xp_init_dma_info(pool, dma_map);
472 if (err) {
473 __xp_dma_unmap(dma_map, attrs);
474 return err;
475 }
476
477 return 0;
478 }
479 EXPORT_SYMBOL(xp_dma_map);
480
xp_addr_crosses_non_contig_pg(struct xsk_buff_pool * pool,u64 addr)481 static bool xp_addr_crosses_non_contig_pg(struct xsk_buff_pool *pool,
482 u64 addr)
483 {
484 return xp_desc_crosses_non_contig_pg(pool, addr, pool->chunk_size);
485 }
486
xp_check_unaligned(struct xsk_buff_pool * pool,u64 * addr)487 static bool xp_check_unaligned(struct xsk_buff_pool *pool, u64 *addr)
488 {
489 *addr = xp_unaligned_extract_addr(*addr);
490 if (*addr >= pool->addrs_cnt ||
491 *addr + pool->chunk_size > pool->addrs_cnt ||
492 xp_addr_crosses_non_contig_pg(pool, *addr))
493 return false;
494 return true;
495 }
496
xp_check_aligned(struct xsk_buff_pool * pool,u64 * addr)497 static bool xp_check_aligned(struct xsk_buff_pool *pool, u64 *addr)
498 {
499 *addr = xp_aligned_extract_addr(pool, *addr);
500 return *addr < pool->addrs_cnt;
501 }
502
__xp_alloc(struct xsk_buff_pool * pool)503 static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool)
504 {
505 struct xdp_buff_xsk *xskb;
506 u64 addr;
507 bool ok;
508
509 if (pool->free_heads_cnt == 0)
510 return NULL;
511
512 for (;;) {
513 if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) {
514 pool->fq->queue_empty_descs++;
515 return NULL;
516 }
517
518 ok = pool->unaligned ? xp_check_unaligned(pool, &addr) :
519 xp_check_aligned(pool, &addr);
520 if (!ok) {
521 pool->fq->invalid_descs++;
522 xskq_cons_release(pool->fq);
523 continue;
524 }
525 break;
526 }
527
528 if (pool->unaligned) {
529 xskb = pool->free_heads[--pool->free_heads_cnt];
530 xp_init_xskb_addr(xskb, pool, addr);
531 if (pool->dma_pages)
532 xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr);
533 } else {
534 xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)];
535 }
536
537 xskq_cons_release(pool->fq);
538 return xskb;
539 }
540
xp_alloc(struct xsk_buff_pool * pool)541 struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
542 {
543 struct xdp_buff_xsk *xskb;
544
545 if (!pool->free_list_cnt) {
546 xskb = __xp_alloc(pool);
547 if (!xskb)
548 return NULL;
549 } else {
550 pool->free_list_cnt--;
551 xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk,
552 free_list_node);
553 list_del_init(&xskb->free_list_node);
554 }
555
556 xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
557 xskb->xdp.data_meta = xskb->xdp.data;
558 xskb->xdp.flags = 0;
559
560 if (pool->dev)
561 xp_dma_sync_for_device(pool, xskb->dma, pool->frame_len);
562
563 return &xskb->xdp;
564 }
565 EXPORT_SYMBOL(xp_alloc);
566
xp_alloc_new_from_fq(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 max)567 static u32 xp_alloc_new_from_fq(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
568 {
569 u32 i, cached_cons, nb_entries;
570
571 if (max > pool->free_heads_cnt)
572 max = pool->free_heads_cnt;
573 max = xskq_cons_nb_entries(pool->fq, max);
574
575 cached_cons = pool->fq->cached_cons;
576 nb_entries = max;
577 i = max;
578 while (i--) {
579 struct xdp_buff_xsk *xskb;
580 u64 addr;
581 bool ok;
582
583 __xskq_cons_read_addr_unchecked(pool->fq, cached_cons++, &addr);
584
585 ok = pool->unaligned ? xp_check_unaligned(pool, &addr) :
586 xp_check_aligned(pool, &addr);
587 if (unlikely(!ok)) {
588 pool->fq->invalid_descs++;
589 nb_entries--;
590 continue;
591 }
592
593 if (pool->unaligned) {
594 xskb = pool->free_heads[--pool->free_heads_cnt];
595 xp_init_xskb_addr(xskb, pool, addr);
596 if (pool->dma_pages)
597 xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr);
598 } else {
599 xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)];
600 }
601
602 *xdp = &xskb->xdp;
603 xdp++;
604 }
605
606 xskq_cons_release_n(pool->fq, max);
607 return nb_entries;
608 }
609
xp_alloc_reused(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 nb_entries)610 static u32 xp_alloc_reused(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 nb_entries)
611 {
612 struct xdp_buff_xsk *xskb;
613 u32 i;
614
615 nb_entries = min_t(u32, nb_entries, pool->free_list_cnt);
616
617 i = nb_entries;
618 while (i--) {
619 xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, free_list_node);
620 list_del_init(&xskb->free_list_node);
621
622 *xdp = &xskb->xdp;
623 xdp++;
624 }
625 pool->free_list_cnt -= nb_entries;
626
627 return nb_entries;
628 }
629
xp_alloc_slow(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 max)630 static u32 xp_alloc_slow(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
631 u32 max)
632 {
633 int i;
634
635 for (i = 0; i < max; i++) {
636 struct xdp_buff *buff;
637
638 buff = xp_alloc(pool);
639 if (unlikely(!buff))
640 return i;
641 *xdp = buff;
642 xdp++;
643 }
644
645 return max;
646 }
647
xp_alloc_batch(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 max)648 u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
649 {
650 u32 nb_entries1 = 0, nb_entries2;
651
652 if (unlikely(pool->dev && dma_dev_need_sync(pool->dev)))
653 return xp_alloc_slow(pool, xdp, max);
654
655 if (unlikely(pool->free_list_cnt)) {
656 nb_entries1 = xp_alloc_reused(pool, xdp, max);
657 if (nb_entries1 == max)
658 return nb_entries1;
659
660 max -= nb_entries1;
661 xdp += nb_entries1;
662 }
663
664 nb_entries2 = xp_alloc_new_from_fq(pool, xdp, max);
665 if (!nb_entries2)
666 pool->fq->queue_empty_descs++;
667
668 return nb_entries1 + nb_entries2;
669 }
670 EXPORT_SYMBOL(xp_alloc_batch);
671
xp_can_alloc(struct xsk_buff_pool * pool,u32 count)672 bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count)
673 {
674 u32 req_count, avail_count;
675
676 if (pool->free_list_cnt >= count)
677 return true;
678
679 req_count = count - pool->free_list_cnt;
680 avail_count = xskq_cons_nb_entries(pool->fq, req_count);
681 if (!avail_count)
682 pool->fq->queue_empty_descs++;
683
684 return avail_count >= req_count;
685 }
686 EXPORT_SYMBOL(xp_can_alloc);
687
xp_free(struct xdp_buff_xsk * xskb)688 void xp_free(struct xdp_buff_xsk *xskb)
689 {
690 if (!list_empty(&xskb->free_list_node))
691 return;
692
693 xskb->pool->free_list_cnt++;
694 list_add(&xskb->free_list_node, &xskb->pool->free_list);
695 }
696 EXPORT_SYMBOL(xp_free);
697
xp_raw_get_data(struct xsk_buff_pool * pool,u64 addr)698 void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
699 {
700 addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr;
701 return pool->addrs + addr;
702 }
703 EXPORT_SYMBOL(xp_raw_get_data);
704
xp_raw_get_dma(struct xsk_buff_pool * pool,u64 addr)705 dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr)
706 {
707 addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr;
708 return (pool->dma_pages[addr >> PAGE_SHIFT] &
709 ~XSK_NEXT_PG_CONTIG_MASK) +
710 (addr & ~PAGE_MASK);
711 }
712 EXPORT_SYMBOL(xp_raw_get_dma);
713