1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * DMA BUF page pool system
4 *
5 * Copyright (C) 2020 Linaro Ltd.
6 *
7 * Based on the ION page pool code
8 * Copyright (C) 2011 Google, Inc.
9 */
10
11 #include <linux/freezer.h>
12 #include <linux/list.h>
13 #include <linux/slab.h>
14 #include <linux/swap.h>
15 #include <linux/sched/signal.h>
16 #include "page_pool.h"
17
18 static LIST_HEAD(pool_list);
19 static DEFINE_MUTEX(pool_list_lock);
20
21 static inline
dmabuf_page_pool_alloc_pages(struct dmabuf_page_pool * pool)22 struct page *dmabuf_page_pool_alloc_pages(struct dmabuf_page_pool *pool)
23 {
24 if (fatal_signal_pending(current))
25 return NULL;
26 return alloc_pages(pool->gfp_mask, pool->order);
27 }
28
dmabuf_page_pool_free_pages(struct dmabuf_page_pool * pool,struct page * page)29 static inline void dmabuf_page_pool_free_pages(struct dmabuf_page_pool *pool,
30 struct page *page)
31 {
32 __free_pages(page, pool->order);
33 }
34
dmabuf_page_pool_add(struct dmabuf_page_pool * pool,struct page * page)35 static void dmabuf_page_pool_add(struct dmabuf_page_pool *pool, struct page *page)
36 {
37 int index;
38
39 if (PageHighMem(page))
40 index = POOL_HIGHPAGE;
41 else
42 index = POOL_LOWPAGE;
43
44 mutex_lock(&pool->mutex);
45 list_add_tail(&page->lru, &pool->items[index]);
46 pool->count[index]++;
47 mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
48 1 << pool->order);
49 mutex_unlock(&pool->mutex);
50 }
51
dmabuf_page_pool_remove(struct dmabuf_page_pool * pool,int index)52 static struct page *dmabuf_page_pool_remove(struct dmabuf_page_pool *pool, int index)
53 {
54 struct page *page;
55
56 mutex_lock(&pool->mutex);
57 page = list_first_entry_or_null(&pool->items[index], struct page, lru);
58 if (page) {
59 pool->count[index]--;
60 list_del(&page->lru);
61 mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
62 -(1 << pool->order));
63 }
64 mutex_unlock(&pool->mutex);
65
66 return page;
67 }
68
dmabuf_page_pool_fetch(struct dmabuf_page_pool * pool)69 static struct page *dmabuf_page_pool_fetch(struct dmabuf_page_pool *pool)
70 {
71 struct page *page = NULL;
72
73 page = dmabuf_page_pool_remove(pool, POOL_HIGHPAGE);
74 if (!page)
75 page = dmabuf_page_pool_remove(pool, POOL_LOWPAGE);
76
77 return page;
78 }
79
dmabuf_page_pool_alloc(struct dmabuf_page_pool * pool)80 struct page *dmabuf_page_pool_alloc(struct dmabuf_page_pool *pool)
81 {
82 struct page *page = NULL;
83
84 if (WARN_ON(!pool))
85 return NULL;
86
87 page = dmabuf_page_pool_fetch(pool);
88
89 if (!page)
90 page = dmabuf_page_pool_alloc_pages(pool);
91 return page;
92 }
93 EXPORT_SYMBOL_GPL(dmabuf_page_pool_alloc);
94
dmabuf_page_pool_free(struct dmabuf_page_pool * pool,struct page * page)95 void dmabuf_page_pool_free(struct dmabuf_page_pool *pool, struct page *page)
96 {
97 if (WARN_ON(pool->order != compound_order(page)))
98 return;
99
100 dmabuf_page_pool_add(pool, page);
101 }
102 EXPORT_SYMBOL_GPL(dmabuf_page_pool_free);
103
dmabuf_page_pool_total(struct dmabuf_page_pool * pool,bool high)104 static int dmabuf_page_pool_total(struct dmabuf_page_pool *pool, bool high)
105 {
106 int count = pool->count[POOL_LOWPAGE];
107
108 if (high)
109 count += pool->count[POOL_HIGHPAGE];
110
111 return count << pool->order;
112 }
113
dmabuf_page_pool_create(gfp_t gfp_mask,unsigned int order)114 struct dmabuf_page_pool *dmabuf_page_pool_create(gfp_t gfp_mask, unsigned int order)
115 {
116 struct dmabuf_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
117 int i;
118
119 if (!pool)
120 return NULL;
121
122 for (i = 0; i < POOL_TYPE_SIZE; i++) {
123 pool->count[i] = 0;
124 INIT_LIST_HEAD(&pool->items[i]);
125 }
126 pool->gfp_mask = gfp_mask | __GFP_COMP;
127 pool->order = order;
128 mutex_init(&pool->mutex);
129
130 mutex_lock(&pool_list_lock);
131 list_add(&pool->list, &pool_list);
132 mutex_unlock(&pool_list_lock);
133
134 return pool;
135 }
136 EXPORT_SYMBOL_GPL(dmabuf_page_pool_create);
137
dmabuf_page_pool_destroy(struct dmabuf_page_pool * pool)138 void dmabuf_page_pool_destroy(struct dmabuf_page_pool *pool)
139 {
140 struct page *page;
141 int i;
142
143 /* Remove us from the pool list */
144 mutex_lock(&pool_list_lock);
145 list_del(&pool->list);
146 mutex_unlock(&pool_list_lock);
147
148 /* Free any remaining pages in the pool */
149 for (i = 0; i < POOL_TYPE_SIZE; i++) {
150 while ((page = dmabuf_page_pool_remove(pool, i)))
151 dmabuf_page_pool_free_pages(pool, page);
152 }
153
154 kfree(pool);
155 }
156 EXPORT_SYMBOL_GPL(dmabuf_page_pool_destroy);
157
dmabuf_page_pool_do_shrink(struct dmabuf_page_pool * pool,gfp_t gfp_mask,int nr_to_scan)158 static int dmabuf_page_pool_do_shrink(struct dmabuf_page_pool *pool, gfp_t gfp_mask,
159 int nr_to_scan)
160 {
161 int freed = 0;
162 bool high;
163
164 if (current_is_kswapd())
165 high = true;
166 else
167 high = !!(gfp_mask & __GFP_HIGHMEM);
168
169 if (nr_to_scan == 0)
170 return dmabuf_page_pool_total(pool, high);
171
172 while (freed < nr_to_scan) {
173 struct page *page;
174
175 /* Try to free low pages first */
176 page = dmabuf_page_pool_remove(pool, POOL_LOWPAGE);
177 if (!page)
178 page = dmabuf_page_pool_remove(pool, POOL_HIGHPAGE);
179
180 if (!page)
181 break;
182
183 dmabuf_page_pool_free_pages(pool, page);
184 freed += (1 << pool->order);
185 }
186
187 return freed;
188 }
189
dmabuf_page_pool_shrink(gfp_t gfp_mask,int nr_to_scan)190 static int dmabuf_page_pool_shrink(gfp_t gfp_mask, int nr_to_scan)
191 {
192 struct dmabuf_page_pool *pool;
193 int nr_total = 0;
194 int nr_freed;
195 int only_scan = 0;
196
197 if (!nr_to_scan)
198 only_scan = 1;
199
200 mutex_lock(&pool_list_lock);
201 list_for_each_entry(pool, &pool_list, list) {
202 if (only_scan) {
203 nr_total += dmabuf_page_pool_do_shrink(pool,
204 gfp_mask,
205 nr_to_scan);
206 } else {
207 nr_freed = dmabuf_page_pool_do_shrink(pool,
208 gfp_mask,
209 nr_to_scan);
210 nr_to_scan -= nr_freed;
211 nr_total += nr_freed;
212 if (nr_to_scan <= 0)
213 break;
214 }
215 }
216 mutex_unlock(&pool_list_lock);
217
218 return nr_total;
219 }
220
dmabuf_page_pool_shrink_count(struct shrinker * shrinker,struct shrink_control * sc)221 static unsigned long dmabuf_page_pool_shrink_count(struct shrinker *shrinker,
222 struct shrink_control *sc)
223 {
224 return dmabuf_page_pool_shrink(sc->gfp_mask, 0);
225 }
226
dmabuf_page_pool_shrink_scan(struct shrinker * shrinker,struct shrink_control * sc)227 static unsigned long dmabuf_page_pool_shrink_scan(struct shrinker *shrinker,
228 struct shrink_control *sc)
229 {
230 if (sc->nr_to_scan == 0)
231 return 0;
232 return dmabuf_page_pool_shrink(sc->gfp_mask, sc->nr_to_scan);
233 }
234
235 struct shrinker pool_shrinker = {
236 .count_objects = dmabuf_page_pool_shrink_count,
237 .scan_objects = dmabuf_page_pool_shrink_scan,
238 .seeks = DEFAULT_SEEKS,
239 .batch = 0,
240 };
241
dmabuf_page_pool_init_shrinker(void)242 static int dmabuf_page_pool_init_shrinker(void)
243 {
244 return register_shrinker(&pool_shrinker);
245 }
246 module_init(dmabuf_page_pool_init_shrinker);
247 MODULE_LICENSE("GPL v2");
248