• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0
2  *
3  * page_pool.h
4  *	Author:	Jesper Dangaard Brouer <netoptimizer@brouer.com>
5  *	Copyright (C) 2016 Red Hat, Inc.
6  */
7 
8 /**
9  * DOC: page_pool allocator
10  *
11  * This page_pool allocator is optimized for the XDP mode that
12  * uses one-frame-per-page, but have fallbacks that act like the
13  * regular page allocator APIs.
14  *
15  * Basic use involve replacing alloc_pages() calls with the
16  * page_pool_alloc_pages() call.  Drivers should likely use
17  * page_pool_dev_alloc_pages() replacing dev_alloc_pages().
18  *
19  * API keeps track of in-flight pages, in-order to let API user know
20  * when it is safe to dealloactor page_pool object.  Thus, API users
21  * must make sure to call page_pool_release_page() when a page is
22  * "leaving" the page_pool.  Or call page_pool_put_page() where
23  * appropiate.  For maintaining correct accounting.
24  *
25  * API user must only call page_pool_put_page() once on a page, as it
26  * will either recycle the page, or in case of elevated refcnt, it
27  * will release the DMA mapping and in-flight state accounting.  We
28  * hope to lift this requirement in the future.
29  */
30 #ifndef _NET_PAGE_POOL_H
31 #define _NET_PAGE_POOL_H
32 
33 #include <linux/mm.h> /* Needed by ptr_ring */
34 #include <linux/ptr_ring.h>
35 #include <linux/dma-direction.h>
36 #include <linux/android_kabi.h>
37 
38 #define PP_FLAG_DMA_MAP		BIT(0) /* Should page_pool do the DMA
39 					* map/unmap
40 					*/
41 #define PP_FLAG_DMA_SYNC_DEV	BIT(1) /* If set all pages that the driver gets
42 					* from page_pool will be
43 					* DMA-synced-for-device according to
44 					* the length provided by the device
45 					* driver.
46 					* Please note DMA-sync-for-CPU is still
47 					* device driver responsibility
48 					*/
49 #define PP_FLAG_ALL		(PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV)
50 
51 /*
52  * Fast allocation side cache array/stack
53  *
54  * The cache size and refill watermark is related to the network
55  * use-case.  The NAPI budget is 64 packets.  After a NAPI poll the RX
56  * ring is usually refilled and the max consumed elements will be 64,
57  * thus a natural max size of objects needed in the cache.
58  *
59  * Keeping room for more objects, is due to XDP_DROP use-case.  As
60  * XDP_DROP allows the opportunity to recycle objects directly into
61  * this array, as it shares the same softirq/NAPI protection.  If
62  * cache is already full (or partly full) then the XDP_DROP recycles
63  * would have to take a slower code path.
64  */
65 #define PP_ALLOC_CACHE_SIZE	128
66 #define PP_ALLOC_CACHE_REFILL	64
67 struct pp_alloc_cache {
68 	u32 count;
69 	void *cache[PP_ALLOC_CACHE_SIZE];
70 };
71 
72 struct page_pool_params {
73 	unsigned int	flags;
74 	unsigned int	order;
75 	unsigned int	pool_size;
76 	int		nid;  /* Numa node id to allocate from pages from */
77 	struct device	*dev; /* device, for DMA pre-mapping purposes */
78 	enum dma_data_direction dma_dir; /* DMA mapping direction */
79 	unsigned int	max_len; /* max DMA sync memory size */
80 	unsigned int	offset;  /* DMA addr offset */
81 };
82 
83 struct page_pool {
84 	struct page_pool_params p;
85 
86 	struct delayed_work release_dw;
87 	void (*disconnect)(void *);
88 	unsigned long defer_start;
89 	unsigned long defer_warn;
90 
91 	u32 pages_state_hold_cnt;
92 
93 	/*
94 	 * Data structure for allocation side
95 	 *
96 	 * Drivers allocation side usually already perform some kind
97 	 * of resource protection.  Piggyback on this protection, and
98 	 * require driver to protect allocation side.
99 	 *
100 	 * For NIC drivers this means, allocate a page_pool per
101 	 * RX-queue. As the RX-queue is already protected by
102 	 * Softirq/BH scheduling and napi_schedule. NAPI schedule
103 	 * guarantee that a single napi_struct will only be scheduled
104 	 * on a single CPU (see napi_schedule).
105 	 */
106 	struct pp_alloc_cache alloc ____cacheline_aligned_in_smp;
107 
108 	/* Data structure for storing recycled pages.
109 	 *
110 	 * Returning/freeing pages is more complicated synchronization
111 	 * wise, because free's can happen on remote CPUs, with no
112 	 * association with allocation resource.
113 	 *
114 	 * Use ptr_ring, as it separates consumer and producer
115 	 * effeciently, it a way that doesn't bounce cache-lines.
116 	 *
117 	 * TODO: Implement bulk return pages into this structure.
118 	 */
119 	struct ptr_ring ring;
120 
121 	atomic_t pages_state_release_cnt;
122 
123 	/* A page_pool is strictly tied to a single RX-queue being
124 	 * protected by NAPI, due to above pp_alloc_cache. This
125 	 * refcnt serves purpose is to simplify drivers error handling.
126 	 */
127 	refcount_t user_cnt;
128 
129 	u64 destroy_cnt;
130 
131 	ANDROID_KABI_RESERVE(1);
132 };
133 
134 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
135 
page_pool_dev_alloc_pages(struct page_pool * pool)136 static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
137 {
138 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
139 
140 	return page_pool_alloc_pages(pool, gfp);
141 }
142 
143 /* get the stored dma direction. A driver might decide to treat this locally and
144  * avoid the extra cache line from page_pool to determine the direction
145  */
146 static
page_pool_get_dma_dir(struct page_pool * pool)147 inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
148 {
149 	return pool->p.dma_dir;
150 }
151 
152 struct page_pool *page_pool_create(const struct page_pool_params *params);
153 
154 #ifdef CONFIG_PAGE_POOL
155 void page_pool_destroy(struct page_pool *pool);
156 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *));
157 void page_pool_release_page(struct page_pool *pool, struct page *page);
158 #else
page_pool_destroy(struct page_pool * pool)159 static inline void page_pool_destroy(struct page_pool *pool)
160 {
161 }
162 
page_pool_use_xdp_mem(struct page_pool * pool,void (* disconnect)(void *))163 static inline void page_pool_use_xdp_mem(struct page_pool *pool,
164 					 void (*disconnect)(void *))
165 {
166 }
page_pool_release_page(struct page_pool * pool,struct page * page)167 static inline void page_pool_release_page(struct page_pool *pool,
168 					  struct page *page)
169 {
170 }
171 #endif
172 
173 void page_pool_put_page(struct page_pool *pool, struct page *page,
174 			unsigned int dma_sync_size, bool allow_direct);
175 
176 /* Same as above but will try to sync the entire area pool->max_len */
page_pool_put_full_page(struct page_pool * pool,struct page * page,bool allow_direct)177 static inline void page_pool_put_full_page(struct page_pool *pool,
178 					   struct page *page, bool allow_direct)
179 {
180 	/* When page_pool isn't compiled-in, net/core/xdp.c doesn't
181 	 * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
182 	 */
183 #ifdef CONFIG_PAGE_POOL
184 	page_pool_put_page(pool, page, -1, allow_direct);
185 #endif
186 }
187 
188 /* Same as above but the caller must guarantee safe context. e.g NAPI */
page_pool_recycle_direct(struct page_pool * pool,struct page * page)189 static inline void page_pool_recycle_direct(struct page_pool *pool,
190 					    struct page *page)
191 {
192 	page_pool_put_full_page(pool, page, true);
193 }
194 
page_pool_get_dma_addr(struct page * page)195 static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
196 {
197 	dma_addr_t ret = page->dma_addr[0];
198 	if (sizeof(dma_addr_t) > sizeof(unsigned long))
199 		ret |= (dma_addr_t)page->dma_addr[1] << 16 << 16;
200 	return ret;
201 }
202 
page_pool_set_dma_addr(struct page * page,dma_addr_t addr)203 static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
204 {
205 	page->dma_addr[0] = addr;
206 	if (sizeof(dma_addr_t) > sizeof(unsigned long))
207 		page->dma_addr[1] = upper_32_bits(addr);
208 }
209 
is_page_pool_compiled_in(void)210 static inline bool is_page_pool_compiled_in(void)
211 {
212 #ifdef CONFIG_PAGE_POOL
213 	return true;
214 #else
215 	return false;
216 #endif
217 }
218 
page_pool_put(struct page_pool * pool)219 static inline bool page_pool_put(struct page_pool *pool)
220 {
221 	return refcount_dec_and_test(&pool->user_cnt);
222 }
223 
224 /* Caller must provide appropriate safe context, e.g. NAPI. */
225 void page_pool_update_nid(struct page_pool *pool, int new_nid);
page_pool_nid_changed(struct page_pool * pool,int new_nid)226 static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
227 {
228 	if (unlikely(pool->p.nid != new_nid))
229 		page_pool_update_nid(pool, new_nid);
230 }
231 #endif /* _NET_PAGE_POOL_H */
232