/kernel/linux/linux-5.10/include/net/ |
D | page_pool.h | 82 struct page_pool { struct 131 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp); argument 133 static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) in page_pool_dev_alloc_pages() 144 inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool) in page_pool_get_dma_dir() 149 struct page_pool *page_pool_create(const struct page_pool_params *params); 152 void page_pool_destroy(struct page_pool *pool); 153 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *)); 154 void page_pool_release_page(struct page_pool *pool, struct page *page); 156 static inline void page_pool_destroy(struct page_pool *pool) in page_pool_destroy() 160 static inline void page_pool_use_xdp_mem(struct page_pool *pool, in page_pool_use_xdp_mem() [all …]
|
D | xdp_priv.h | 12 struct page_pool *page_pool; member
|
D | xdp.h | 55 struct page_pool;
|
/kernel/linux/linux-5.10/net/core/ |
D | page_pool.c | 24 static int page_pool_init(struct page_pool *pool, in page_pool_init() 81 struct page_pool *page_pool_create(const struct page_pool_params *params) in page_pool_create() 83 struct page_pool *pool; in page_pool_create() 101 static void page_pool_return_page(struct page_pool *pool, struct page *page); 104 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool) in page_pool_refill_alloc_cache() 156 static struct page *__page_pool_get_cached(struct page_pool *pool) in __page_pool_get_cached() 171 static void page_pool_dma_sync_for_device(struct page_pool *pool, in page_pool_dma_sync_for_device() 185 static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, in __page_pool_alloc_pages_slow() 247 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp) in page_pool_alloc_pages() 267 static s32 page_pool_inflight(struct page_pool *pool) in page_pool_inflight() [all …]
|
D | xdp.c | 129 page_pool_destroy(xa->page_pool); in xdp_rxq_info_unreg_mem_model() 354 page_pool_put_full_page(xa->page_pool, page, napi_direct); in __xdp_return() 402 page_pool_release_page(xa->page_pool, page); in __xdp_release_frame()
|
D | Makefile | 17 obj-$(CONFIG_PAGE_POOL) += page_pool.o
|
/kernel/linux/linux-5.10/include/trace/events/ |
D | page_pool.h | 3 #define TRACE_SYSTEM page_pool 16 TP_PROTO(const struct page_pool *pool, 22 __field(const struct page_pool *, pool) 44 TP_PROTO(const struct page_pool *pool, 50 __field(const struct page_pool *, pool) 69 TP_PROTO(const struct page_pool *pool, 75 __field(const struct page_pool *, pool) 94 TP_PROTO(const struct page_pool *pool, int new_nid), 99 __field(const struct page_pool *, pool)
|
/kernel/linux/linux-5.10/Documentation/networking/ |
D | page_pool.rst | 7 The page_pool allocator is optimized for the XDP mode that uses one frame 15 when it is safe to free a page_pool object. Thus, API users 16 must run page_pool_release_page() when a page is leaving the page_pool or 93 * page_pool_dev_alloc_pages(): Get a page from the page allocator or page_pool 114 /* internal DMA mapping in page_pool */ 120 page_pool = page_pool_create(&pp_params); 126 err = xdp_rxq_info_reg_mem_model(&xdp_rxq, MEM_TYPE_PAGE_POOL, page_pool); 139 dma_dir = page_pool_get_dma_dir(dring->page_pool); 142 page_pool_recycle_direct(page_pool, page); 145 page_pool_recycle_direct(page_pool, page); [all …]
|
D | index.rst | 27 page_pool
|
/kernel/linux/linux-5.10/drivers/net/ethernet/apm/xgene/ |
D | xgene_enet_main.c | 672 struct xgene_enet_desc_ring *buf_pool, *page_pool; in xgene_enet_rx_frame() local 690 page_pool = rx_ring->page_pool; in xgene_enet_rx_frame() 719 xgene_enet_free_pagepool(page_pool, raw_desc, exp_desc); in xgene_enet_rx_frame() 729 slots = page_pool->slots - 1; in xgene_enet_rx_frame() 730 head = page_pool->head; in xgene_enet_rx_frame() 741 page = page_pool->frag_page[head]; in xgene_enet_rx_frame() 747 page_pool->frag_page[head] = NULL; in xgene_enet_rx_frame() 751 page_pool->head = head; in xgene_enet_rx_frame() 764 ret = xgene_enet_refill_pagepool(page_pool, NUM_NXTBUFPOOL); in xgene_enet_rx_frame() 1062 struct xgene_enet_desc_ring *buf_pool, *page_pool; in xgene_enet_delete_desc_rings() local [all …]
|
D | xgene_enet_cle.c | 709 if (pdata->rx_ring[idx]->page_pool) { in xgene_cle_set_rss_idt() 710 pool_id = pdata->rx_ring[idx]->page_pool->id; in xgene_cle_set_rss_idt() 786 if (pdata->rx_ring[0]->page_pool) { in xgene_enet_cle_init() 787 pool_id = pdata->rx_ring[0]->page_pool->id; in xgene_enet_cle_init()
|
D | xgene_enet_main.h | 119 struct xgene_enet_desc_ring *page_pool; member
|
/kernel/linux/linux-5.10/block/ |
D | bounce.c | 32 static mempool_t page_pool, isa_page_pool; variable 61 ret = mempool_init_page_pool(&page_pool, POOL_SIZE, 0); in init_emergency_pool() 188 bounce_end_io(bio, &page_pool); in bounce_end_io_write() 209 __bounce_end_io_read(bio, &page_pool); in bounce_end_io_read() 347 if (pool == &page_pool) { in __blk_queue_bounce() 379 pool = &page_pool; in blk_queue_bounce()
|
/kernel/linux/linux-5.10/mm/ |
D | readahead.c | 179 LIST_HEAD(page_pool); in page_cache_ra_unbounded() 212 read_pages(ractl, &page_pool, true); in page_cache_ra_unbounded() 221 list_add(&page->lru, &page_pool); in page_cache_ra_unbounded() 225 read_pages(ractl, &page_pool, true); in page_cache_ra_unbounded() 238 read_pages(ractl, &page_pool, false); in page_cache_ra_unbounded()
|
/kernel/linux/linux-5.10/drivers/net/ethernet/socionext/ |
D | netsec.c | 286 struct page_pool *page_pool; member 728 page = page_pool_dev_alloc_pages(dring->page_pool); in netsec_alloc_rx_data() 849 page_pool_get_dma_dir(rx_ring->page_pool); in netsec_xdp_queue_one() 908 page_pool_put_page(dring->page_pool, page, sync, true); in netsec_run_xdp() 918 page_pool_put_page(dring->page_pool, page, sync, true); in netsec_run_xdp() 930 page_pool_put_page(dring->page_pool, page, sync, true); in netsec_run_xdp() 954 dma_dir = page_pool_get_dma_dir(dring->page_pool); in netsec_process_rx() 1031 page_pool_put_page(dring->page_pool, page, pkt_len, in netsec_process_rx() 1037 page_pool_release_page(dring->page_pool, page); in netsec_process_rx() 1207 page_pool_put_full_page(dring->page_pool, page, false); in netsec_uninit_pkt_dring() [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/ti/ |
D | cpsw_priv.c | 1103 struct page_pool *pool; in cpsw_fill_rx_channels() 1110 pool = cpsw->page_pool[ch]; in cpsw_fill_rx_channels() 1144 static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw, in cpsw_create_page_pool() 1148 struct page_pool *pool; in cpsw_create_page_pool() 1166 struct page_pool *pool; in cpsw_create_rx_pool() 1174 cpsw->page_pool[ch] = pool; in cpsw_create_rx_pool() 1183 struct page_pool *pool; in cpsw_ndev_create_xdp_rxq() 1186 pool = cpsw->page_pool[ch]; in cpsw_ndev_create_xdp_rxq() 1224 page_pool_destroy(cpsw->page_pool[ch]); in cpsw_destroy_xdp_rxqs() 1225 cpsw->page_pool[ch] = NULL; in cpsw_destroy_xdp_rxqs() [all …]
|
D | cpsw_priv.h | 358 struct page_pool *page_pool[CPSW_MAX_QUEUES]; member
|
/kernel/linux/linux-5.10/drivers/net/ethernet/stmicro/stmmac/ |
D | stmmac.h | 74 struct page_pool *page_pool; member
|
D | stmmac_main.c | 1343 buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); in stmmac_init_rx_buffers() 1348 buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); in stmmac_init_rx_buffers() 1379 page_pool_put_full_page(rx_q->page_pool, buf->page, false); in stmmac_free_rx_buffer() 1383 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false); in stmmac_free_rx_buffer() 1648 if (rx_q->page_pool) in free_dma_rx_desc_resources() 1649 page_pool_destroy(rx_q->page_pool); in free_dma_rx_desc_resources() 1722 rx_q->page_pool = page_pool_create(&pp_params); in alloc_dma_rx_desc_resources() 1723 if (IS_ERR(rx_q->page_pool)) { in alloc_dma_rx_desc_resources() 1724 ret = PTR_ERR(rx_q->page_pool); in alloc_dma_rx_desc_resources() 1725 rx_q->page_pool = NULL; in alloc_dma_rx_desc_resources() [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/marvell/mvpp2/ |
D | mvpp2_main.c | 94 static struct page_pool * 351 struct page_pool *page_pool) in mvpp2_frag_alloc() argument 353 if (page_pool) in mvpp2_frag_alloc() 354 return page_pool_dev_alloc_pages(page_pool); in mvpp2_frag_alloc() 363 struct page_pool *page_pool, void *data) in mvpp2_frag_free() argument 365 if (page_pool) in mvpp2_frag_free() 366 page_pool_put_full_page(page_pool, virt_to_head_page(data), false); in mvpp2_frag_free() 472 struct page_pool *pp = NULL; in mvpp2_bm_bufs_free() 482 pp = priv->page_pool[bm_pool->id]; in mvpp2_bm_bufs_free() 547 page_pool_destroy(priv->page_pool[bm_pool->id]); in mvpp2_bm_pool_destroy() [all …]
|
/kernel/linux/linux-5.10/drivers/net/ |
D | xen-netfront.c | 158 struct page_pool *page_pool; member 283 page = page_pool_alloc_pages(queue->page_pool, in xennet_alloc_one_rx_buffer() 1850 page_pool_destroy(queue->page_pool); in xennet_disconnect_backend() 2205 queue->page_pool = page_pool_create(&pp_params); in xennet_create_page_pool() 2206 if (IS_ERR(queue->page_pool)) { in xennet_create_page_pool() 2207 err = PTR_ERR(queue->page_pool); in xennet_create_page_pool() 2208 queue->page_pool = NULL; in xennet_create_page_pool() 2220 MEM_TYPE_PAGE_POOL, queue->page_pool); in xennet_create_page_pool() 2230 page_pool_destroy(queue->page_pool); in xennet_create_page_pool() 2231 queue->page_pool = NULL; in xennet_create_page_pool()
|
/kernel/linux/linux-5.10/drivers/net/ethernet/marvell/ |
D | mvneta.c | 679 struct page_pool *page_pool; member 1898 page = page_pool_alloc_pages(rxq->page_pool, in mvneta_rx_refill() 1971 page_pool_put_full_page(rxq->page_pool, data, false); in mvneta_rxq_drop_pkts() 1975 page_pool_destroy(rxq->page_pool); in mvneta_rxq_drop_pkts() 1976 rxq->page_pool = NULL; in mvneta_rxq_drop_pkts() 2034 page_pool_put_full_page(rxq->page_pool, in mvneta_xdp_put_buff() 2036 page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data), in mvneta_xdp_put_buff() 2248 dma_dir = page_pool_get_dma_dir(rxq->page_pool); in mvneta_swbm_rx_frame() 2286 dma_dir = page_pool_get_dma_dir(rxq->page_pool); in mvneta_swbm_add_rx_fragment() 2300 page_pool_put_full_page(rxq->page_pool, page, true); in mvneta_swbm_add_rx_fragment() [all …]
|
/kernel/linux/patches/linux-5.10/zhiyuan_patch/ |
D | kernel.patch | 20 drivers/dma-buf/heaps/page_pool.c | 247 ++++++++ 21 drivers/dma-buf/heaps/page_pool.h | 55 ++ 31 create mode 100755 drivers/dma-buf/heaps/page_pool.c 32 create mode 100755 drivers/dma-buf/heaps/page_pool.h 644 +obj-$(CONFIG_DMABUF_HEAPS_PAGE_POOL) += page_pool.o 1283 diff --git a/drivers/dma-buf/heaps/page_pool.c b/drivers/dma-buf/heaps/page_pool.c 1287 +++ b/drivers/dma-buf/heaps/page_pool.c 1304 +#include "page_pool.h" 1536 diff --git a/drivers/dma-buf/heaps/page_pool.h b/drivers/dma-buf/heaps/page_pool.h 1540 +++ b/drivers/dma-buf/heaps/page_pool.h [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx5/core/ |
D | en.h | 61 struct page_pool; 621 struct page_pool *page_pool; member
|
D | en_rx.c | 275 dma_info->page = page_pool_dev_alloc_pages(rq->page_pool); in mlx5e_page_alloc_pool() 282 page_pool_recycle_direct(rq->page_pool, dma_info->page); in mlx5e_page_alloc_pool() 314 page_pool_recycle_direct(rq->page_pool, dma_info->page); in mlx5e_page_release_dynamic() 317 page_pool_release_page(rq->page_pool, dma_info->page); in mlx5e_page_release_dynamic() 1563 if (rq->page_pool) in mlx5e_poll_rx_cq() 1564 page_pool_nid_changed(rq->page_pool, numa_mem_id()); in mlx5e_poll_rx_cq()
|