• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DMA BUF page pool system
4  *
5  * Copyright (C) 2020 Linaro Ltd.
6  *
7  * Based on the ION page pool code
8  * Copyright (C) 2011 Google, Inc.
9  */
10 
11 #include "page_pool.h"
12 
13 #include <linux/list.h>
14 #include <linux/shrinker.h>
15 #include <linux/spinlock.h>
16 #include <linux/swap.h>
17 #include <linux/sched/signal.h>
18 
19 /* page types we track in the pool */
20 enum {
21 	POOL_LOWPAGE,      /* Clean lowmem pages */
22 	POOL_HIGHPAGE,     /* Clean highmem pages */
23 
24 	POOL_TYPE_SIZE,
25 };
26 
27 /**
28  * struct dmabuf_page_pool - pagepool struct
29  * @count[]:		array of number of pages of that type in the pool
30  * @items[]:		array of list of pages of the specific type
31  * @lock:		lock protecting this struct and especially the count
32  *			item list
33  * @gfp_mask:		gfp_mask to use from alloc
34  * @order:		order of pages in the pool
35  * @list:		list node for list of pools
36  *
37  * Allows you to keep a pool of pre allocated pages to use
38  */
39 struct dmabuf_page_pool {
40 	int count[POOL_TYPE_SIZE];
41 	struct list_head items[POOL_TYPE_SIZE];
42 	spinlock_t lock;
43 	gfp_t gfp_mask;
44 	unsigned int order;
45 	struct list_head list;
46 };
47 
48 static LIST_HEAD(pool_list);
49 static DEFINE_MUTEX(pool_list_lock);
50 
51 static inline
dmabuf_page_pool_alloc_pages(struct dmabuf_page_pool * pool)52 struct page *dmabuf_page_pool_alloc_pages(struct dmabuf_page_pool *pool)
53 {
54 	if (fatal_signal_pending(current))
55 		return NULL;
56 	return alloc_pages(pool->gfp_mask, pool->order);
57 }
58 
dmabuf_page_pool_free_pages(struct dmabuf_page_pool * pool,struct page * page)59 static inline void dmabuf_page_pool_free_pages(struct dmabuf_page_pool *pool,
60 					       struct page *page)
61 {
62 	__free_pages(page, pool->order);
63 }
64 
dmabuf_page_pool_add(struct dmabuf_page_pool * pool,struct page * page)65 static void dmabuf_page_pool_add(struct dmabuf_page_pool *pool, struct page *page)
66 {
67 	int index;
68 
69 	if (PageHighMem(page))
70 		index = POOL_HIGHPAGE;
71 	else
72 		index = POOL_LOWPAGE;
73 
74 	spin_lock(&pool->lock);
75 	list_add_tail(&page->lru, &pool->items[index]);
76 	pool->count[index]++;
77 	spin_unlock(&pool->lock);
78 	mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
79 			    1 << pool->order);
80 }
81 
dmabuf_page_pool_remove(struct dmabuf_page_pool * pool,int index)82 static struct page *dmabuf_page_pool_remove(struct dmabuf_page_pool *pool, int index)
83 {
84 	struct page *page;
85 
86 	spin_lock(&pool->lock);
87 	page = list_first_entry_or_null(&pool->items[index], struct page, lru);
88 	if (page) {
89 		pool->count[index]--;
90 		list_del(&page->lru);
91 		spin_unlock(&pool->lock);
92 		mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
93 				    -(1 << pool->order));
94 		goto out;
95 	}
96 	spin_unlock(&pool->lock);
97 out:
98 	return page;
99 }
100 
dmabuf_page_pool_fetch(struct dmabuf_page_pool * pool)101 static struct page *dmabuf_page_pool_fetch(struct dmabuf_page_pool *pool)
102 {
103 	struct page *page = NULL;
104 
105 	page = dmabuf_page_pool_remove(pool, POOL_HIGHPAGE);
106 	if (!page)
107 		page = dmabuf_page_pool_remove(pool, POOL_LOWPAGE);
108 
109 	return page;
110 }
111 
dmabuf_page_pool_alloc(struct dmabuf_page_pool * pool)112 struct page *dmabuf_page_pool_alloc(struct dmabuf_page_pool *pool)
113 {
114 	struct page *page = NULL;
115 
116 	if (WARN_ON(!pool))
117 		return NULL;
118 
119 	page = dmabuf_page_pool_fetch(pool);
120 
121 	if (!page)
122 		page = dmabuf_page_pool_alloc_pages(pool);
123 	return page;
124 }
125 EXPORT_SYMBOL_GPL(dmabuf_page_pool_alloc);
126 
dmabuf_page_pool_free(struct dmabuf_page_pool * pool,struct page * page)127 void dmabuf_page_pool_free(struct dmabuf_page_pool *pool, struct page *page)
128 {
129 	if (WARN_ON(pool->order != compound_order(page)))
130 		return;
131 
132 	dmabuf_page_pool_add(pool, page);
133 }
134 EXPORT_SYMBOL_GPL(dmabuf_page_pool_free);
135 
dmabuf_page_pool_total(struct dmabuf_page_pool * pool,bool high)136 static int dmabuf_page_pool_total(struct dmabuf_page_pool *pool, bool high)
137 {
138 	int count = pool->count[POOL_LOWPAGE];
139 
140 	if (high)
141 		count += pool->count[POOL_HIGHPAGE];
142 
143 	return count << pool->order;
144 }
145 
dmabuf_page_pool_create(gfp_t gfp_mask,unsigned int order)146 struct dmabuf_page_pool *dmabuf_page_pool_create(gfp_t gfp_mask, unsigned int order)
147 {
148 	struct dmabuf_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
149 	int i;
150 
151 	if (!pool)
152 		return NULL;
153 
154 	for (i = 0; i < POOL_TYPE_SIZE; i++) {
155 		pool->count[i] = 0;
156 		INIT_LIST_HEAD(&pool->items[i]);
157 	}
158 	pool->gfp_mask = gfp_mask | __GFP_COMP;
159 	pool->order = order;
160 	spin_lock_init(&pool->lock);
161 
162 	mutex_lock(&pool_list_lock);
163 	list_add(&pool->list, &pool_list);
164 	mutex_unlock(&pool_list_lock);
165 
166 	return pool;
167 }
168 EXPORT_SYMBOL_GPL(dmabuf_page_pool_create);
169 
dmabuf_page_pool_destroy(struct dmabuf_page_pool * pool)170 void dmabuf_page_pool_destroy(struct dmabuf_page_pool *pool)
171 {
172 	struct page *page;
173 	int i;
174 
175 	/* Remove us from the pool list */
176 	mutex_lock(&pool_list_lock);
177 	list_del(&pool->list);
178 	mutex_unlock(&pool_list_lock);
179 
180 	/* Free any remaining pages in the pool */
181 	for (i = 0; i < POOL_TYPE_SIZE; i++) {
182 		while ((page = dmabuf_page_pool_remove(pool, i)))
183 			dmabuf_page_pool_free_pages(pool, page);
184 	}
185 
186 	kfree(pool);
187 }
188 EXPORT_SYMBOL_GPL(dmabuf_page_pool_destroy);
189 
dmabuf_page_pool_get_size(struct dmabuf_page_pool * pool)190 unsigned long dmabuf_page_pool_get_size(struct dmabuf_page_pool *pool)
191 {
192 	int i;
193 	unsigned long num_pages = 0;
194 
195 	spin_lock(&pool->lock);
196 	for (i = 0; i < POOL_TYPE_SIZE; ++i)
197 		num_pages += pool->count[i];
198 	spin_unlock(&pool->lock);
199 	num_pages <<= pool->order; /* pool order is immutable */
200 
201 	return num_pages * PAGE_SIZE;
202 }
203 EXPORT_SYMBOL_GPL(dmabuf_page_pool_get_size);
204 
dmabuf_page_pool_do_shrink(struct dmabuf_page_pool * pool,gfp_t gfp_mask,int nr_to_scan)205 static int dmabuf_page_pool_do_shrink(struct dmabuf_page_pool *pool, gfp_t gfp_mask,
206 				      int nr_to_scan)
207 {
208 	int freed = 0;
209 	bool high;
210 
211 	if (current_is_kswapd())
212 		high = true;
213 	else
214 		high = !!(gfp_mask & __GFP_HIGHMEM);
215 
216 	if (nr_to_scan == 0)
217 		return dmabuf_page_pool_total(pool, high);
218 
219 	while (freed < nr_to_scan) {
220 		struct page *page;
221 
222 		/* Try to free low pages first */
223 		page = dmabuf_page_pool_remove(pool, POOL_LOWPAGE);
224 		if (!page)
225 			page = dmabuf_page_pool_remove(pool, POOL_HIGHPAGE);
226 
227 		if (!page)
228 			break;
229 
230 		dmabuf_page_pool_free_pages(pool, page);
231 		freed += (1 << pool->order);
232 	}
233 
234 	return freed;
235 }
236 
dmabuf_page_pool_shrink(gfp_t gfp_mask,int nr_to_scan)237 static int dmabuf_page_pool_shrink(gfp_t gfp_mask, int nr_to_scan)
238 {
239 	struct dmabuf_page_pool *pool;
240 	int nr_total = 0;
241 	int nr_freed;
242 	int only_scan = 0;
243 
244 	if (!nr_to_scan)
245 		only_scan = 1;
246 
247 	mutex_lock(&pool_list_lock);
248 	list_for_each_entry(pool, &pool_list, list) {
249 		if (only_scan) {
250 			nr_total += dmabuf_page_pool_do_shrink(pool,
251 							       gfp_mask,
252 							       nr_to_scan);
253 		} else {
254 			nr_freed = dmabuf_page_pool_do_shrink(pool,
255 							      gfp_mask,
256 							      nr_to_scan);
257 			nr_to_scan -= nr_freed;
258 			nr_total += nr_freed;
259 			if (nr_to_scan <= 0)
260 				break;
261 		}
262 	}
263 	mutex_unlock(&pool_list_lock);
264 
265 	return nr_total;
266 }
267 
dmabuf_page_pool_shrink_count(struct shrinker * shrinker,struct shrink_control * sc)268 static unsigned long dmabuf_page_pool_shrink_count(struct shrinker *shrinker,
269 						   struct shrink_control *sc)
270 {
271 	return dmabuf_page_pool_shrink(sc->gfp_mask, 0);
272 }
273 
dmabuf_page_pool_shrink_scan(struct shrinker * shrinker,struct shrink_control * sc)274 static unsigned long dmabuf_page_pool_shrink_scan(struct shrinker *shrinker,
275 						  struct shrink_control *sc)
276 {
277 	if (sc->nr_to_scan == 0)
278 		return 0;
279 	return dmabuf_page_pool_shrink(sc->gfp_mask, sc->nr_to_scan);
280 }
281 
282 struct shrinker pool_shrinker = {
283 	.count_objects = dmabuf_page_pool_shrink_count,
284 	.scan_objects = dmabuf_page_pool_shrink_scan,
285 	.seeks = DEFAULT_SEEKS,
286 	.batch = 0,
287 };
288 
dmabuf_page_pool_init_shrinker(void)289 static int dmabuf_page_pool_init_shrinker(void)
290 {
291 	return register_shrinker(&pool_shrinker);
292 }
293 module_init(dmabuf_page_pool_init_shrinker);
294 MODULE_LICENSE("GPL v2");
295