• Home
  • Raw
  • Download

Lines Matching +full:dma +full:- +full:pool

1 // SPDX-License-Identifier: GPL-2.0
11 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_add_xsk() argument
15 if (!xs->tx) in xp_add_xsk()
18 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_add_xsk()
19 list_add_rcu(&xs->tx_list, &pool->xsk_tx_list); in xp_add_xsk()
20 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_add_xsk()
23 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_del_xsk() argument
27 if (!xs->tx) in xp_del_xsk()
30 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_del_xsk()
31 list_del_rcu(&xs->tx_list); in xp_del_xsk()
32 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_del_xsk()
35 void xp_destroy(struct xsk_buff_pool *pool) in xp_destroy() argument
37 if (!pool) in xp_destroy()
40 kvfree(pool->heads); in xp_destroy()
41 kvfree(pool); in xp_destroy()
47 struct xsk_buff_pool *pool; in xp_create_and_assign_umem() local
51 pool = kvzalloc(struct_size(pool, free_heads, umem->chunks), in xp_create_and_assign_umem()
53 if (!pool) in xp_create_and_assign_umem()
56 pool->heads = kvcalloc(umem->chunks, sizeof(*pool->heads), GFP_KERNEL); in xp_create_and_assign_umem()
57 if (!pool->heads) in xp_create_and_assign_umem()
60 pool->chunk_mask = ~((u64)umem->chunk_size - 1); in xp_create_and_assign_umem()
61 pool->addrs_cnt = umem->size; in xp_create_and_assign_umem()
62 pool->heads_cnt = umem->chunks; in xp_create_and_assign_umem()
63 pool->free_heads_cnt = umem->chunks; in xp_create_and_assign_umem()
64 pool->headroom = umem->headroom; in xp_create_and_assign_umem()
65 pool->chunk_size = umem->chunk_size; in xp_create_and_assign_umem()
66 pool->unaligned = umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG; in xp_create_and_assign_umem()
67 pool->frame_len = umem->chunk_size - umem->headroom - in xp_create_and_assign_umem()
69 pool->umem = umem; in xp_create_and_assign_umem()
70 pool->addrs = umem->addrs; in xp_create_and_assign_umem()
71 INIT_LIST_HEAD(&pool->free_list); in xp_create_and_assign_umem()
72 INIT_LIST_HEAD(&pool->xsk_tx_list); in xp_create_and_assign_umem()
73 spin_lock_init(&pool->xsk_tx_list_lock); in xp_create_and_assign_umem()
74 spin_lock_init(&pool->cq_lock); in xp_create_and_assign_umem()
75 refcount_set(&pool->users, 1); in xp_create_and_assign_umem()
77 pool->fq = xs->fq_tmp; in xp_create_and_assign_umem()
78 pool->cq = xs->cq_tmp; in xp_create_and_assign_umem()
80 for (i = 0; i < pool->free_heads_cnt; i++) { in xp_create_and_assign_umem()
81 xskb = &pool->heads[i]; in xp_create_and_assign_umem()
82 xskb->pool = pool; in xp_create_and_assign_umem()
83 xskb->xdp.frame_sz = umem->chunk_size - umem->headroom; in xp_create_and_assign_umem()
84 pool->free_heads[i] = xskb; in xp_create_and_assign_umem()
87 return pool; in xp_create_and_assign_umem()
90 xp_destroy(pool); in xp_create_and_assign_umem()
94 void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq) in xp_set_rxq_info() argument
98 for (i = 0; i < pool->heads_cnt; i++) in xp_set_rxq_info()
99 pool->heads[i].xdp.rxq = rxq; in xp_set_rxq_info()
103 static void xp_disable_drv_zc(struct xsk_buff_pool *pool) in xp_disable_drv_zc() argument
110 if (pool->umem->zc) { in xp_disable_drv_zc()
112 bpf.xsk.pool = NULL; in xp_disable_drv_zc()
113 bpf.xsk.queue_id = pool->queue_id; in xp_disable_drv_zc()
115 err = pool->netdev->netdev_ops->ndo_bpf(pool->netdev, &bpf); in xp_disable_drv_zc()
118 WARN(1, "Failed to disable zero-copy!\n"); in xp_disable_drv_zc()
122 static int __xp_assign_dev(struct xsk_buff_pool *pool, in __xp_assign_dev() argument
135 return -EINVAL; in __xp_assign_dev()
138 return -EBUSY; in __xp_assign_dev()
140 pool->netdev = netdev; in __xp_assign_dev()
141 pool->queue_id = queue_id; in __xp_assign_dev()
142 err = xsk_reg_pool_at_qid(netdev, pool, queue_id); in __xp_assign_dev()
147 pool->uses_need_wakeup = true; in __xp_assign_dev()
152 pool->cached_need_wakeup = XDP_WAKEUP_TX; in __xp_assign_dev()
158 /* For copy-mode, we are done. */ in __xp_assign_dev()
161 if (!netdev->netdev_ops->ndo_bpf || in __xp_assign_dev()
162 !netdev->netdev_ops->ndo_xsk_wakeup) { in __xp_assign_dev()
163 err = -EOPNOTSUPP; in __xp_assign_dev()
168 bpf.xsk.pool = pool; in __xp_assign_dev()
171 err = netdev->netdev_ops->ndo_bpf(netdev, &bpf); in __xp_assign_dev()
175 if (!pool->dma_pages) { in __xp_assign_dev()
176 WARN(1, "Driver did not DMA map zero-copy buffers"); in __xp_assign_dev()
177 err = -EINVAL; in __xp_assign_dev()
180 pool->umem->zc = true; in __xp_assign_dev()
184 xp_disable_drv_zc(pool); in __xp_assign_dev()
195 int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev, in xp_assign_dev() argument
198 return __xp_assign_dev(pool, dev, queue_id, flags); in xp_assign_dev()
201 int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs, in xp_assign_dev_shared() argument
205 struct xdp_umem *umem = umem_xs->umem; in xp_assign_dev_shared()
208 if (!pool->fq || !pool->cq) in xp_assign_dev_shared()
209 return -EINVAL; in xp_assign_dev_shared()
211 flags = umem->zc ? XDP_ZEROCOPY : XDP_COPY; in xp_assign_dev_shared()
212 if (umem_xs->pool->uses_need_wakeup) in xp_assign_dev_shared()
215 return __xp_assign_dev(pool, dev, queue_id, flags); in xp_assign_dev_shared()
218 void xp_clear_dev(struct xsk_buff_pool *pool) in xp_clear_dev() argument
220 if (!pool->netdev) in xp_clear_dev()
223 xp_disable_drv_zc(pool); in xp_clear_dev()
224 xsk_clear_pool_at_qid(pool->netdev, pool->queue_id); in xp_clear_dev()
225 dev_put(pool->netdev); in xp_clear_dev()
226 pool->netdev = NULL; in xp_clear_dev()
231 struct xsk_buff_pool *pool = container_of(work, struct xsk_buff_pool, in xp_release_deferred() local
235 xp_clear_dev(pool); in xp_release_deferred()
238 if (pool->fq) { in xp_release_deferred()
239 xskq_destroy(pool->fq); in xp_release_deferred()
240 pool->fq = NULL; in xp_release_deferred()
243 if (pool->cq) { in xp_release_deferred()
244 xskq_destroy(pool->cq); in xp_release_deferred()
245 pool->cq = NULL; in xp_release_deferred()
248 xdp_put_umem(pool->umem, false); in xp_release_deferred()
249 xp_destroy(pool); in xp_release_deferred()
252 void xp_get_pool(struct xsk_buff_pool *pool) in xp_get_pool() argument
254 refcount_inc(&pool->users); in xp_get_pool()
257 bool xp_put_pool(struct xsk_buff_pool *pool) in xp_put_pool() argument
259 if (!pool) in xp_put_pool()
262 if (refcount_dec_and_test(&pool->users)) { in xp_put_pool()
263 INIT_WORK(&pool->work, xp_release_deferred); in xp_put_pool()
264 schedule_work(&pool->work); in xp_put_pool()
271 static struct xsk_dma_map *xp_find_dma_map(struct xsk_buff_pool *pool) in xp_find_dma_map() argument
275 list_for_each_entry(dma_map, &pool->umem->xsk_dma_list, list) { in xp_find_dma_map()
276 if (dma_map->netdev == pool->netdev) in xp_find_dma_map()
292 dma_map->dma_pages = kvcalloc(nr_pages, sizeof(*dma_map->dma_pages), GFP_KERNEL); in xp_create_dma_map()
293 if (!dma_map->dma_pages) { in xp_create_dma_map()
298 dma_map->netdev = netdev; in xp_create_dma_map()
299 dma_map->dev = dev; in xp_create_dma_map()
300 dma_map->dma_need_sync = false; in xp_create_dma_map()
301 dma_map->dma_pages_cnt = nr_pages; in xp_create_dma_map()
302 refcount_set(&dma_map->users, 1); in xp_create_dma_map()
303 list_add(&dma_map->list, &umem->xsk_dma_list); in xp_create_dma_map()
309 list_del(&dma_map->list); in xp_destroy_dma_map()
310 kvfree(dma_map->dma_pages); in xp_destroy_dma_map()
316 dma_addr_t *dma; in __xp_dma_unmap() local
319 for (i = 0; i < dma_map->dma_pages_cnt; i++) { in __xp_dma_unmap()
320 dma = &dma_map->dma_pages[i]; in __xp_dma_unmap()
321 if (*dma) { in __xp_dma_unmap()
322 *dma &= ~XSK_NEXT_PG_CONTIG_MASK; in __xp_dma_unmap()
323 dma_unmap_page_attrs(dma_map->dev, *dma, PAGE_SIZE, in __xp_dma_unmap()
325 *dma = 0; in __xp_dma_unmap()
332 void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs) in xp_dma_unmap() argument
336 if (pool->dma_pages_cnt == 0) in xp_dma_unmap()
339 dma_map = xp_find_dma_map(pool); in xp_dma_unmap()
345 if (!refcount_dec_and_test(&dma_map->users)) in xp_dma_unmap()
349 kvfree(pool->dma_pages); in xp_dma_unmap()
350 pool->dma_pages_cnt = 0; in xp_dma_unmap()
351 pool->dev = NULL; in xp_dma_unmap()
359 for (i = 0; i < dma_map->dma_pages_cnt - 1; i++) { in xp_check_dma_contiguity()
360 if (dma_map->dma_pages[i] + PAGE_SIZE == dma_map->dma_pages[i + 1]) in xp_check_dma_contiguity()
361 dma_map->dma_pages[i] |= XSK_NEXT_PG_CONTIG_MASK; in xp_check_dma_contiguity()
363 dma_map->dma_pages[i] &= ~XSK_NEXT_PG_CONTIG_MASK; in xp_check_dma_contiguity()
367 static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_map) in xp_init_dma_info() argument
369 pool->dma_pages = kvcalloc(dma_map->dma_pages_cnt, sizeof(*pool->dma_pages), GFP_KERNEL); in xp_init_dma_info()
370 if (!pool->dma_pages) in xp_init_dma_info()
371 return -ENOMEM; in xp_init_dma_info()
373 pool->dev = dma_map->dev; in xp_init_dma_info()
374 pool->dma_pages_cnt = dma_map->dma_pages_cnt; in xp_init_dma_info()
375 pool->dma_need_sync = dma_map->dma_need_sync; in xp_init_dma_info()
376 memcpy(pool->dma_pages, dma_map->dma_pages, in xp_init_dma_info()
377 pool->dma_pages_cnt * sizeof(*pool->dma_pages)); in xp_init_dma_info()
382 int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, in xp_dma_map() argument
386 dma_addr_t dma; in xp_dma_map() local
390 dma_map = xp_find_dma_map(pool); in xp_dma_map()
392 err = xp_init_dma_info(pool, dma_map); in xp_dma_map()
396 refcount_inc(&dma_map->users); in xp_dma_map()
400 dma_map = xp_create_dma_map(dev, pool->netdev, nr_pages, pool->umem); in xp_dma_map()
402 return -ENOMEM; in xp_dma_map()
404 for (i = 0; i < dma_map->dma_pages_cnt; i++) { in xp_dma_map()
405 dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE, in xp_dma_map()
407 if (dma_mapping_error(dev, dma)) { in xp_dma_map()
409 return -ENOMEM; in xp_dma_map()
411 if (dma_need_sync(dev, dma)) in xp_dma_map()
412 dma_map->dma_need_sync = true; in xp_dma_map()
413 dma_map->dma_pages[i] = dma; in xp_dma_map()
416 if (pool->unaligned) in xp_dma_map()
419 err = xp_init_dma_info(pool, dma_map); in xp_dma_map()
429 static bool xp_addr_crosses_non_contig_pg(struct xsk_buff_pool *pool, in xp_addr_crosses_non_contig_pg() argument
432 return xp_desc_crosses_non_contig_pg(pool, addr, pool->chunk_size); in xp_addr_crosses_non_contig_pg()
435 static bool xp_check_unaligned(struct xsk_buff_pool *pool, u64 *addr) in xp_check_unaligned() argument
438 if (*addr >= pool->addrs_cnt || in xp_check_unaligned()
439 *addr + pool->chunk_size > pool->addrs_cnt || in xp_check_unaligned()
440 xp_addr_crosses_non_contig_pg(pool, *addr)) in xp_check_unaligned()
445 static bool xp_check_aligned(struct xsk_buff_pool *pool, u64 *addr) in xp_check_aligned() argument
447 *addr = xp_aligned_extract_addr(pool, *addr); in xp_check_aligned()
448 return *addr < pool->addrs_cnt; in xp_check_aligned()
451 static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool) in __xp_alloc() argument
457 if (pool->free_heads_cnt == 0) in __xp_alloc()
460 xskb = pool->free_heads[--pool->free_heads_cnt]; in __xp_alloc()
463 if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) { in __xp_alloc()
464 pool->fq->queue_empty_descs++; in __xp_alloc()
469 ok = pool->unaligned ? xp_check_unaligned(pool, &addr) : in __xp_alloc()
470 xp_check_aligned(pool, &addr); in __xp_alloc()
472 pool->fq->invalid_descs++; in __xp_alloc()
473 xskq_cons_release(pool->fq); in __xp_alloc()
478 xskq_cons_release(pool->fq); in __xp_alloc()
480 xskb->orig_addr = addr; in __xp_alloc()
481 xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom; in __xp_alloc()
482 if (pool->dma_pages_cnt) { in __xp_alloc()
483 xskb->frame_dma = (pool->dma_pages[addr >> PAGE_SHIFT] & in __xp_alloc()
486 xskb->dma = xskb->frame_dma + pool->headroom + in __xp_alloc()
492 struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool) in xp_alloc() argument
496 if (!pool->free_list_cnt) { in xp_alloc()
497 xskb = __xp_alloc(pool); in xp_alloc()
501 pool->free_list_cnt--; in xp_alloc()
502 xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, in xp_alloc()
504 list_del(&xskb->free_list_node); in xp_alloc()
507 xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM; in xp_alloc()
508 xskb->xdp.data_meta = xskb->xdp.data; in xp_alloc()
510 if (pool->dma_need_sync) { in xp_alloc()
511 dma_sync_single_range_for_device(pool->dev, xskb->dma, 0, in xp_alloc()
512 pool->frame_len, in xp_alloc()
515 return &xskb->xdp; in xp_alloc()
519 bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count) in xp_can_alloc() argument
521 if (pool->free_list_cnt >= count) in xp_can_alloc()
523 return xskq_cons_has_entries(pool->fq, count - pool->free_list_cnt); in xp_can_alloc()
529 xskb->pool->free_list_cnt++; in xp_free()
530 list_add(&xskb->free_list_node, &xskb->pool->free_list); in xp_free()
534 void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr) in xp_raw_get_data() argument
536 addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr; in xp_raw_get_data()
537 return pool->addrs + addr; in xp_raw_get_data()
541 dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr) in xp_raw_get_dma() argument
543 addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr; in xp_raw_get_dma()
544 return (pool->dma_pages[addr >> PAGE_SHIFT] & in xp_raw_get_dma()
552 dma_sync_single_range_for_cpu(xskb->pool->dev, xskb->dma, 0, in xp_dma_sync_for_cpu_slow()
553 xskb->pool->frame_len, DMA_BIDIRECTIONAL); in xp_dma_sync_for_cpu_slow()
557 void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma, in xp_dma_sync_for_device_slow() argument
560 dma_sync_single_range_for_device(pool->dev, dma, 0, in xp_dma_sync_for_device_slow()