Home
last modified time | relevance | path

Searched refs:umem (Results 1 – 5 of 5) sorted by relevance

/net/xdp/
Dxdp_umem.c26 static void xdp_umem_unpin_pages(struct xdp_umem *umem) in xdp_umem_unpin_pages() argument
28 unpin_user_pages_dirty_lock(umem->pgs, umem->npgs, true); in xdp_umem_unpin_pages()
30 kvfree(umem->pgs); in xdp_umem_unpin_pages()
31 umem->pgs = NULL; in xdp_umem_unpin_pages()
34 static void xdp_umem_unaccount_pages(struct xdp_umem *umem) in xdp_umem_unaccount_pages() argument
36 if (umem->user) { in xdp_umem_unaccount_pages()
37 atomic_long_sub(umem->npgs, &umem->user->locked_vm); in xdp_umem_unaccount_pages()
38 free_uid(umem->user); in xdp_umem_unaccount_pages()
42 static void xdp_umem_addr_unmap(struct xdp_umem *umem) in xdp_umem_addr_unmap() argument
44 vunmap(umem->addrs); in xdp_umem_addr_unmap()
[all …]
Dxsk_buff_pool.c56 struct xdp_umem *umem) in xp_create_and_assign_umem() argument
62 pool = kvzalloc(struct_size(pool, free_heads, umem->chunks), in xp_create_and_assign_umem()
67 pool->heads = kvcalloc(umem->chunks, sizeof(*pool->heads), GFP_KERNEL); in xp_create_and_assign_umem()
75 pool->chunk_mask = ~((u64)umem->chunk_size - 1); in xp_create_and_assign_umem()
76 pool->addrs_cnt = umem->size; in xp_create_and_assign_umem()
77 pool->heads_cnt = umem->chunks; in xp_create_and_assign_umem()
78 pool->free_heads_cnt = umem->chunks; in xp_create_and_assign_umem()
79 pool->headroom = umem->headroom; in xp_create_and_assign_umem()
80 pool->chunk_size = umem->chunk_size; in xp_create_and_assign_umem()
81 pool->unaligned = umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG; in xp_create_and_assign_umem()
[all …]
Dxsk_diag.c50 struct xdp_umem *umem = xs->umem; in xsk_diag_put_umem() local
54 if (!umem) in xsk_diag_put_umem()
57 du.id = umem->id; in xsk_diag_put_umem()
58 du.size = umem->size; in xsk_diag_put_umem()
59 du.num_pages = umem->npgs; in xsk_diag_put_umem()
60 du.chunk_size = umem->chunk_size; in xsk_diag_put_umem()
61 du.headroom = umem->headroom; in xsk_diag_put_umem()
65 if (umem->zc) in xsk_diag_put_umem()
67 du.refs = refcount_read(&umem->users); in xsk_diag_put_umem()
Dxsk.c466 page = pool->umem->pgs[addr >> PAGE_SHIFT]; in xsk_build_skb_zerocopy()
945 if (xs->umem) { in xsk_bind()
969 umem_xs->umem); in xsk_bind()
1011 xdp_get_umem(umem_xs->umem); in xsk_bind()
1012 WRITE_ONCE(xs->umem, umem_xs->umem); in xsk_bind()
1014 } else if (!xs->umem || !xsk_validate_queues(xs)) { in xsk_bind()
1019 xs->pool = xp_create_and_assign_umem(xs, xs->umem); in xsk_bind()
1038 xs->zc = xs->umem->zc; in xsk_bind()
1104 struct xdp_umem *umem; in xsk_setsockopt() local
1115 if (xs->state != XSK_READY || xs->umem) { in xsk_setsockopt()
[all …]
Dxdp_umem.h11 void xdp_get_umem(struct xdp_umem *umem);
12 void xdp_put_umem(struct xdp_umem *umem, bool defer_cleanup);