1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * DMA BUF page pool system
4 *
5 * Copyright (C) 2020 Linaro Ltd.
6 *
7 * Based on the ION page pool code
8 * Copyright (C) 2011 Google, Inc.
9 */
10
11 #include <linux/freezer.h>
12 #include <linux/list.h>
13 #include <linux/slab.h>
14 #include <linux/swap.h>
15 #include <linux/sched/signal.h>
16 #include "page_pool.h"
17
18 static LIST_HEAD(pool_list);
19 static DEFINE_MUTEX(pool_list_lock);
20
dmabuf_page_pool_alloc_pages(struct dmabuf_page_pool * pool)21 static inline struct page *dmabuf_page_pool_alloc_pages(struct dmabuf_page_pool *pool)
22 {
23 if (fatal_signal_pending(current)) {
24 return NULL;
25 }
26 return alloc_pages(pool->gfp_mask, pool->order);
27 }
28
dmabuf_page_pool_free_pages(struct dmabuf_page_pool * pool,struct page * page)29 static inline void dmabuf_page_pool_free_pages(struct dmabuf_page_pool *pool, struct page *page)
30 {
31 __free_pages(page, pool->order);
32 }
33
dmabuf_page_pool_add(struct dmabuf_page_pool * pool,struct page * page)34 static void dmabuf_page_pool_add(struct dmabuf_page_pool *pool, struct page *page)
35 {
36 int index;
37
38 if (PageHighMem(page)) {
39 index = POOL_HIGHPAGE;
40 } else {
41 index = POOL_LOWPAGE;
42 }
43
44 mutex_lock(&pool->mutex);
45 list_add_tail(&page->lru, &pool->items[index]);
46 pool->count[index]++;
47 mutex_unlock(&pool->mutex);
48 mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE, 1 << pool->order);
49 }
50
dmabuf_page_pool_remove(struct dmabuf_page_pool * pool,int index)51 static struct page *dmabuf_page_pool_remove(struct dmabuf_page_pool *pool, int index)
52 {
53 struct page *page;
54
55 mutex_lock(&pool->mutex);
56 page = list_first_entry_or_null(&pool->items[index], struct page, lru);
57 if (page) {
58 pool->count[index]--;
59 list_del(&page->lru);
60 mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE, -(1 << pool->order));
61 }
62 mutex_unlock(&pool->mutex);
63
64 return page;
65 }
66
dmabuf_page_pool_fetch(struct dmabuf_page_pool * pool)67 static struct page *dmabuf_page_pool_fetch(struct dmabuf_page_pool *pool)
68 {
69 struct page *page = NULL;
70
71 page = dmabuf_page_pool_remove(pool, POOL_HIGHPAGE);
72 if (!page) {
73 page = dmabuf_page_pool_remove(pool, POOL_LOWPAGE);
74 }
75
76 return page;
77 }
78
dmabuf_page_pool_alloc(struct dmabuf_page_pool * pool)79 struct page *dmabuf_page_pool_alloc(struct dmabuf_page_pool *pool)
80 {
81 struct page *page = NULL;
82
83 if (WARN_ON(!pool)) {
84 return NULL;
85 }
86
87 page = dmabuf_page_pool_fetch(pool);
88 if (!page) {
89 page = dmabuf_page_pool_alloc_pages(pool);
90 }
91 return page;
92 }
93 EXPORT_SYMBOL_GPL(dmabuf_page_pool_alloc);
94
dmabuf_page_pool_free(struct dmabuf_page_pool * pool,struct page * page)95 void dmabuf_page_pool_free(struct dmabuf_page_pool *pool, struct page *page)
96 {
97 if (WARN_ON(pool->order != compound_order(page))) {
98 return;
99 }
100
101 dmabuf_page_pool_add(pool, page);
102 }
103 EXPORT_SYMBOL_GPL(dmabuf_page_pool_free);
104
dmabuf_page_pool_total(struct dmabuf_page_pool * pool,bool high)105 static int dmabuf_page_pool_total(struct dmabuf_page_pool *pool, bool high)
106 {
107 int count = pool->count[POOL_LOWPAGE];
108
109 if (high) {
110 count += pool->count[POOL_HIGHPAGE];
111 }
112
113 return count << pool->order;
114 }
115
dmabuf_page_pool_create(gfp_t gfp_mask,unsigned int order)116 struct dmabuf_page_pool *dmabuf_page_pool_create(gfp_t gfp_mask, unsigned int order)
117 {
118 struct dmabuf_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
119 int i;
120
121 if (!pool) {
122 return NULL;
123 }
124
125 for (i = 0; i < POOL_TYPE_SIZE; i++) {
126 pool->count[i] = 0;
127 INIT_LIST_HEAD(&pool->items[i]);
128 }
129 pool->gfp_mask = gfp_mask | __GFP_COMP;
130 pool->order = order;
131 mutex_init(&pool->mutex);
132
133 mutex_lock(&pool_list_lock);
134 list_add(&pool->list, &pool_list);
135 mutex_unlock(&pool_list_lock);
136
137 return pool;
138 }
139 EXPORT_SYMBOL_GPL(dmabuf_page_pool_create);
140
dmabuf_page_pool_destroy(struct dmabuf_page_pool * pool)141 void dmabuf_page_pool_destroy(struct dmabuf_page_pool *pool)
142 {
143 struct page *page;
144 int i;
145
146 /* Remove us from the pool list */
147 mutex_lock(&pool_list_lock);
148 list_del(&pool->list);
149 mutex_unlock(&pool_list_lock);
150
151 /* Free any remaining pages in the pool */
152 for (i = 0; i < POOL_TYPE_SIZE; i++) {
153 while ((page = dmabuf_page_pool_remove(pool, i))) {
154 dmabuf_page_pool_free_pages(pool, page);
155 }
156 }
157
158 kfree(pool);
159 }
160 EXPORT_SYMBOL_GPL(dmabuf_page_pool_destroy);
161
dmabuf_page_pool_do_shrink(struct dmabuf_page_pool * pool,gfp_t gfp_mask,int nr_to_scan)162 static int dmabuf_page_pool_do_shrink(struct dmabuf_page_pool *pool, gfp_t gfp_mask, int nr_to_scan)
163 {
164 int freed = 0;
165 bool high;
166
167 if (current_is_kswapd()) {
168 high = true;
169 } else {
170 high = !!(gfp_mask & __GFP_HIGHMEM);
171 }
172
173 if (nr_to_scan == 0) {
174 return dmabuf_page_pool_total(pool, high);
175 }
176
177 while (freed < nr_to_scan) {
178 struct page *page;
179
180 /* Try to free low pages first */
181 page = dmabuf_page_pool_remove(pool, POOL_LOWPAGE);
182 if (!page) {
183 page = dmabuf_page_pool_remove(pool, POOL_HIGHPAGE);
184 }
185
186 if (!page) {
187 break;
188 }
189
190 dmabuf_page_pool_free_pages(pool, page);
191 freed += (1 << pool->order);
192 }
193
194 return freed;
195 }
196
dmabuf_page_pool_shrink(gfp_t gfp_mask,int nr_to_scan)197 static int dmabuf_page_pool_shrink(gfp_t gfp_mask, int nr_to_scan)
198 {
199 struct dmabuf_page_pool *pool;
200 int nr_total = 0;
201 int nr_freed;
202 int only_scan = 0;
203
204 if (!nr_to_scan) {
205 only_scan = 1;
206 }
207
208 mutex_lock(&pool_list_lock);
209 list_for_each_entry(pool, &pool_list, list)
210 {
211 if (only_scan) {
212 nr_total += dmabuf_page_pool_do_shrink(pool, gfp_mask, nr_to_scan);
213 } else {
214 nr_freed = dmabuf_page_pool_do_shrink(pool, gfp_mask, nr_to_scan);
215 nr_to_scan -= nr_freed;
216 nr_total += nr_freed;
217 if (nr_to_scan <= 0) {
218 break;
219 }
220 }
221 }
222 mutex_unlock(&pool_list_lock);
223
224 return nr_total;
225 }
226
dmabuf_page_pool_shrink_count(struct shrinker * shrinker,struct shrink_control * sc)227 static unsigned long dmabuf_page_pool_shrink_count(struct shrinker *shrinker, struct shrink_control *sc)
228 {
229 return dmabuf_page_pool_shrink(sc->gfp_mask, 0);
230 }
231
dmabuf_page_pool_shrink_scan(struct shrinker * shrinker,struct shrink_control * sc)232 static unsigned long dmabuf_page_pool_shrink_scan(struct shrinker *shrinker, struct shrink_control *sc)
233 {
234 if (sc->nr_to_scan == 0) {
235 return 0;
236 }
237 return dmabuf_page_pool_shrink(sc->gfp_mask, sc->nr_to_scan);
238 }
239
240 struct shrinker pool_shrinker = {
241 .count_objects = dmabuf_page_pool_shrink_count,
242 .scan_objects = dmabuf_page_pool_shrink_scan,
243 .seeks = DEFAULT_SEEKS,
244 .batch = 0,
245 };
246
dmabuf_page_pool_init_shrinker(void)247 static int dmabuf_page_pool_init_shrinker(void)
248 {
249 return register_shrinker(&pool_shrinker);
250 }
251 module_init(dmabuf_page_pool_init_shrinker);
252 MODULE_LICENSE("GPL v2");
253