Lines Matching +full:dma +full:- +full:pool
1 /* SPDX-License-Identifier: GPL-2.0
12 * uses one-frame-per-page, but have fallbacks that act like the
19 * API keeps track of in-flight pages, in-order to let API user know
27 * will release the DMA mapping and in-flight state accounting. We
35 #include <linux/dma-direction.h>
37 #define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA
42 * DMA-synced-for-device according to
45 * Please note DMA-sync-for-CPU is still
54 * use-case. The NAPI budget is 64 packets. After a NAPI poll the RX
58 * Keeping room for more objects, is due to XDP_DROP use-case. As
76 struct device *dev; /* device, for DMA pre-mapping purposes */
77 enum dma_data_direction dma_dir; /* DMA mapping direction */
78 unsigned int max_len; /* max DMA sync memory size */
79 unsigned int offset; /* DMA addr offset */
100 * RX-queue. As the RX-queue is already protected by
114 * effeciently, it a way that doesn't bounce cache-lines.
122 /* A page_pool is strictly tied to a single RX-queue being
131 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
133 static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) in page_pool_dev_alloc_pages() argument
137 return page_pool_alloc_pages(pool, gfp); in page_pool_dev_alloc_pages()
140 /* get the stored dma direction. A driver might decide to treat this locally and
144 inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool) in page_pool_get_dma_dir() argument
146 return pool->p.dma_dir; in page_pool_get_dma_dir()
152 void page_pool_destroy(struct page_pool *pool);
153 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *));
154 void page_pool_release_page(struct page_pool *pool, struct page *page);
156 static inline void page_pool_destroy(struct page_pool *pool) in page_pool_destroy() argument
160 static inline void page_pool_use_xdp_mem(struct page_pool *pool, in page_pool_use_xdp_mem() argument
164 static inline void page_pool_release_page(struct page_pool *pool, in page_pool_release_page() argument
170 void page_pool_put_page(struct page_pool *pool, struct page *page,
173 /* Same as above but will try to sync the entire area pool->max_len */
174 static inline void page_pool_put_full_page(struct page_pool *pool, in page_pool_put_full_page() argument
177 /* When page_pool isn't compiled-in, net/core/xdp.c doesn't in page_pool_put_full_page()
181 page_pool_put_page(pool, page, -1, allow_direct); in page_pool_put_full_page()
186 static inline void page_pool_recycle_direct(struct page_pool *pool, in page_pool_recycle_direct() argument
189 page_pool_put_full_page(pool, page, true); in page_pool_recycle_direct()
194 dma_addr_t ret = page->dma_addr[0]; in page_pool_get_dma_addr()
196 ret |= (dma_addr_t)page->dma_addr[1] << 16 << 16; in page_pool_get_dma_addr()
202 page->dma_addr[0] = addr; in page_pool_set_dma_addr()
204 page->dma_addr[1] = upper_32_bits(addr); in page_pool_set_dma_addr()
216 static inline bool page_pool_put(struct page_pool *pool) in page_pool_put() argument
218 return refcount_dec_and_test(&pool->user_cnt); in page_pool_put()
222 void page_pool_update_nid(struct page_pool *pool, int new_nid);
223 static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid) in page_pool_nid_changed() argument
225 if (unlikely(pool->p.nid != new_nid)) in page_pool_nid_changed()
226 page_pool_update_nid(pool, new_nid); in page_pool_nid_changed()