1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2015 Intel Corporation. All rights reserved. */
3 #include <linux/device.h>
4 #include <linux/io.h>
5 #include <linux/kasan.h>
6 #include <linux/memory_hotplug.h>
7 #include <linux/mm.h>
8 #include <linux/pfn_t.h>
9 #include <linux/swap.h>
10 #include <linux/swapops.h>
11 #include <linux/types.h>
12 #include <linux/wait_bit.h>
13 #include <linux/xarray.h>
14
15 static DEFINE_XARRAY(pgmap_array);
16
17 #ifdef CONFIG_DEV_PAGEMAP_OPS
18 DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
19 EXPORT_SYMBOL(devmap_managed_key);
20 static atomic_t devmap_managed_enable;
21
devmap_managed_enable_put(void)22 static void devmap_managed_enable_put(void)
23 {
24 if (atomic_dec_and_test(&devmap_managed_enable))
25 static_branch_disable(&devmap_managed_key);
26 }
27
devmap_managed_enable_get(struct dev_pagemap * pgmap)28 static int devmap_managed_enable_get(struct dev_pagemap *pgmap)
29 {
30 if (!pgmap->ops || !pgmap->ops->page_free) {
31 WARN(1, "Missing page_free method\n");
32 return -EINVAL;
33 }
34
35 if (atomic_inc_return(&devmap_managed_enable) == 1)
36 static_branch_enable(&devmap_managed_key);
37 return 0;
38 }
39 #else
devmap_managed_enable_get(struct dev_pagemap * pgmap)40 static int devmap_managed_enable_get(struct dev_pagemap *pgmap)
41 {
42 return -EINVAL;
43 }
devmap_managed_enable_put(void)44 static void devmap_managed_enable_put(void)
45 {
46 }
47 #endif /* CONFIG_DEV_PAGEMAP_OPS */
48
pgmap_array_delete(struct resource * res)49 static void pgmap_array_delete(struct resource *res)
50 {
51 xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end),
52 NULL, GFP_KERNEL);
53 synchronize_rcu();
54 }
55
pfn_first(struct dev_pagemap * pgmap)56 static unsigned long pfn_first(struct dev_pagemap *pgmap)
57 {
58 return PHYS_PFN(pgmap->res.start) +
59 vmem_altmap_offset(pgmap_altmap(pgmap));
60 }
61
pfn_end(struct dev_pagemap * pgmap)62 static unsigned long pfn_end(struct dev_pagemap *pgmap)
63 {
64 const struct resource *res = &pgmap->res;
65
66 return (res->start + resource_size(res)) >> PAGE_SHIFT;
67 }
68
pfn_next(unsigned long pfn)69 static unsigned long pfn_next(unsigned long pfn)
70 {
71 if (pfn % 1024 == 0)
72 cond_resched();
73 return pfn + 1;
74 }
75
76 #define for_each_device_pfn(pfn, map) \
77 for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
78
dev_pagemap_kill(struct dev_pagemap * pgmap)79 static void dev_pagemap_kill(struct dev_pagemap *pgmap)
80 {
81 if (pgmap->ops && pgmap->ops->kill)
82 pgmap->ops->kill(pgmap);
83 else
84 percpu_ref_kill(pgmap->ref);
85 }
86
dev_pagemap_cleanup(struct dev_pagemap * pgmap)87 static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
88 {
89 if (pgmap->ops && pgmap->ops->cleanup) {
90 pgmap->ops->cleanup(pgmap);
91 } else {
92 wait_for_completion(&pgmap->done);
93 percpu_ref_exit(pgmap->ref);
94 }
95 /*
96 * Undo the pgmap ref assignment for the internal case as the
97 * caller may re-enable the same pgmap.
98 */
99 if (pgmap->ref == &pgmap->internal_ref)
100 pgmap->ref = NULL;
101 }
102
memunmap_pages(struct dev_pagemap * pgmap)103 void memunmap_pages(struct dev_pagemap *pgmap)
104 {
105 struct resource *res = &pgmap->res;
106 struct page *first_page;
107 unsigned long pfn;
108 int nid;
109
110 dev_pagemap_kill(pgmap);
111 for_each_device_pfn(pfn, pgmap)
112 put_page(pfn_to_page(pfn));
113 dev_pagemap_cleanup(pgmap);
114
115 /* make sure to access a memmap that was actually initialized */
116 first_page = pfn_to_page(pfn_first(pgmap));
117
118 /* pages are dead and unused, undo the arch mapping */
119 nid = page_to_nid(first_page);
120
121 mem_hotplug_begin();
122 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
123 __remove_pages(PHYS_PFN(res->start),
124 PHYS_PFN(resource_size(res)), NULL);
125 } else {
126 arch_remove_memory(nid, res->start, resource_size(res),
127 pgmap_altmap(pgmap));
128 kasan_remove_zero_shadow(__va(res->start), resource_size(res));
129 }
130 mem_hotplug_done();
131
132 untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));
133 pgmap_array_delete(res);
134 WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n");
135 devmap_managed_enable_put();
136 }
137 EXPORT_SYMBOL_GPL(memunmap_pages);
138
devm_memremap_pages_release(void * data)139 static void devm_memremap_pages_release(void *data)
140 {
141 memunmap_pages(data);
142 }
143
dev_pagemap_percpu_release(struct percpu_ref * ref)144 static void dev_pagemap_percpu_release(struct percpu_ref *ref)
145 {
146 struct dev_pagemap *pgmap =
147 container_of(ref, struct dev_pagemap, internal_ref);
148
149 complete(&pgmap->done);
150 }
151
152 /*
153 * Not device managed version of dev_memremap_pages, undone by
154 * memunmap_pages(). Please use dev_memremap_pages if you have a struct
155 * device available.
156 */
memremap_pages(struct dev_pagemap * pgmap,int nid)157 void *memremap_pages(struct dev_pagemap *pgmap, int nid)
158 {
159 struct resource *res = &pgmap->res;
160 struct dev_pagemap *conflict_pgmap;
161 struct mhp_restrictions restrictions = {
162 /*
163 * We do not want any optional features only our own memmap
164 */
165 .altmap = pgmap_altmap(pgmap),
166 };
167 pgprot_t pgprot = PAGE_KERNEL;
168 int error, is_ram;
169 bool need_devmap_managed = true;
170
171 switch (pgmap->type) {
172 case MEMORY_DEVICE_PRIVATE:
173 if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) {
174 WARN(1, "Device private memory not supported\n");
175 return ERR_PTR(-EINVAL);
176 }
177 if (!pgmap->ops || !pgmap->ops->migrate_to_ram) {
178 WARN(1, "Missing migrate_to_ram method\n");
179 return ERR_PTR(-EINVAL);
180 }
181 break;
182 case MEMORY_DEVICE_FS_DAX:
183 if (!IS_ENABLED(CONFIG_ZONE_DEVICE) ||
184 IS_ENABLED(CONFIG_FS_DAX_LIMITED)) {
185 WARN(1, "File system DAX not supported\n");
186 return ERR_PTR(-EINVAL);
187 }
188 break;
189 case MEMORY_DEVICE_DEVDAX:
190 case MEMORY_DEVICE_PCI_P2PDMA:
191 need_devmap_managed = false;
192 break;
193 default:
194 WARN(1, "Invalid pgmap type %d\n", pgmap->type);
195 break;
196 }
197
198 if (!pgmap->ref) {
199 if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
200 return ERR_PTR(-EINVAL);
201
202 init_completion(&pgmap->done);
203 error = percpu_ref_init(&pgmap->internal_ref,
204 dev_pagemap_percpu_release, 0, GFP_KERNEL);
205 if (error)
206 return ERR_PTR(error);
207 pgmap->ref = &pgmap->internal_ref;
208 } else {
209 if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
210 WARN(1, "Missing reference count teardown definition\n");
211 return ERR_PTR(-EINVAL);
212 }
213 }
214
215 if (need_devmap_managed) {
216 error = devmap_managed_enable_get(pgmap);
217 if (error)
218 return ERR_PTR(error);
219 }
220
221 conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->start), NULL);
222 if (conflict_pgmap) {
223 WARN(1, "Conflicting mapping in same section\n");
224 put_dev_pagemap(conflict_pgmap);
225 error = -ENOMEM;
226 goto err_array;
227 }
228
229 conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->end), NULL);
230 if (conflict_pgmap) {
231 WARN(1, "Conflicting mapping in same section\n");
232 put_dev_pagemap(conflict_pgmap);
233 error = -ENOMEM;
234 goto err_array;
235 }
236
237 is_ram = region_intersects(res->start, resource_size(res),
238 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
239
240 if (is_ram != REGION_DISJOINT) {
241 WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__,
242 is_ram == REGION_MIXED ? "mixed" : "ram", res);
243 error = -ENXIO;
244 goto err_array;
245 }
246
247 error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(res->start),
248 PHYS_PFN(res->end), pgmap, GFP_KERNEL));
249 if (error)
250 goto err_array;
251
252 if (nid < 0)
253 nid = numa_mem_id();
254
255 error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(res->start), 0,
256 resource_size(res));
257 if (error)
258 goto err_pfn_remap;
259
260 mem_hotplug_begin();
261
262 /*
263 * For device private memory we call add_pages() as we only need to
264 * allocate and initialize struct page for the device memory. More-
265 * over the device memory is un-accessible thus we do not want to
266 * create a linear mapping for the memory like arch_add_memory()
267 * would do.
268 *
269 * For all other device memory types, which are accessible by
270 * the CPU, we do want the linear mapping and thus use
271 * arch_add_memory().
272 */
273 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
274 error = add_pages(nid, PHYS_PFN(res->start),
275 PHYS_PFN(resource_size(res)), &restrictions);
276 } else {
277 error = kasan_add_zero_shadow(__va(res->start), resource_size(res));
278 if (error) {
279 mem_hotplug_done();
280 goto err_kasan;
281 }
282
283 error = arch_add_memory(nid, res->start, resource_size(res),
284 &restrictions);
285 }
286
287 if (!error) {
288 struct zone *zone;
289
290 zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
291 move_pfn_range_to_zone(zone, PHYS_PFN(res->start),
292 PHYS_PFN(resource_size(res)), restrictions.altmap);
293 }
294
295 mem_hotplug_done();
296 if (error)
297 goto err_add_memory;
298
299 /*
300 * Initialization of the pages has been deferred until now in order
301 * to allow us to do the work while not holding the hotplug lock.
302 */
303 memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
304 PHYS_PFN(res->start),
305 PHYS_PFN(resource_size(res)), pgmap);
306 percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap));
307 return __va(res->start);
308
309 err_add_memory:
310 kasan_remove_zero_shadow(__va(res->start), resource_size(res));
311 err_kasan:
312 untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));
313 err_pfn_remap:
314 pgmap_array_delete(res);
315 err_array:
316 dev_pagemap_kill(pgmap);
317 dev_pagemap_cleanup(pgmap);
318 devmap_managed_enable_put();
319 return ERR_PTR(error);
320 }
321 EXPORT_SYMBOL_GPL(memremap_pages);
322
323 /**
324 * devm_memremap_pages - remap and provide memmap backing for the given resource
325 * @dev: hosting device for @res
326 * @pgmap: pointer to a struct dev_pagemap
327 *
328 * Notes:
329 * 1/ At a minimum the res and type members of @pgmap must be initialized
330 * by the caller before passing it to this function
331 *
332 * 2/ The altmap field may optionally be initialized, in which case
333 * PGMAP_ALTMAP_VALID must be set in pgmap->flags.
334 *
335 * 3/ The ref field may optionally be provided, in which pgmap->ref must be
336 * 'live' on entry and will be killed and reaped at
337 * devm_memremap_pages_release() time, or if this routine fails.
338 *
339 * 4/ res is expected to be a host memory range that could feasibly be
340 * treated as a "System RAM" range, i.e. not a device mmio range, but
341 * this is not enforced.
342 */
devm_memremap_pages(struct device * dev,struct dev_pagemap * pgmap)343 void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
344 {
345 int error;
346 void *ret;
347
348 ret = memremap_pages(pgmap, dev_to_node(dev));
349 if (IS_ERR(ret))
350 return ret;
351
352 error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
353 pgmap);
354 if (error)
355 return ERR_PTR(error);
356 return ret;
357 }
358 EXPORT_SYMBOL_GPL(devm_memremap_pages);
359
devm_memunmap_pages(struct device * dev,struct dev_pagemap * pgmap)360 void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap)
361 {
362 devm_release_action(dev, devm_memremap_pages_release, pgmap);
363 }
364 EXPORT_SYMBOL_GPL(devm_memunmap_pages);
365
vmem_altmap_offset(struct vmem_altmap * altmap)366 unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
367 {
368 /* number of pfns from base where pfn_to_page() is valid */
369 if (altmap)
370 return altmap->reserve + altmap->free;
371 return 0;
372 }
373
vmem_altmap_free(struct vmem_altmap * altmap,unsigned long nr_pfns)374 void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
375 {
376 altmap->alloc -= nr_pfns;
377 }
378
379 /**
380 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
381 * @pfn: page frame number to lookup page_map
382 * @pgmap: optional known pgmap that already has a reference
383 *
384 * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
385 * is non-NULL but does not cover @pfn the reference to it will be released.
386 */
get_dev_pagemap(unsigned long pfn,struct dev_pagemap * pgmap)387 struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
388 struct dev_pagemap *pgmap)
389 {
390 resource_size_t phys = PFN_PHYS(pfn);
391
392 /*
393 * In the cached case we're already holding a live reference.
394 */
395 if (pgmap) {
396 if (phys >= pgmap->res.start && phys <= pgmap->res.end)
397 return pgmap;
398 put_dev_pagemap(pgmap);
399 }
400
401 /* fall back to slow path lookup */
402 rcu_read_lock();
403 pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
404 if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
405 pgmap = NULL;
406 rcu_read_unlock();
407
408 return pgmap;
409 }
410 EXPORT_SYMBOL_GPL(get_dev_pagemap);
411
412 #ifdef CONFIG_DEV_PAGEMAP_OPS
__put_devmap_managed_page(struct page * page)413 void __put_devmap_managed_page(struct page *page)
414 {
415 int count = page_ref_dec_return(page);
416
417 /*
418 * If refcount is 1 then page is freed and refcount is stable as nobody
419 * holds a reference on the page.
420 */
421 if (count == 1) {
422 /* Clear Active bit in case of parallel mark_page_accessed */
423 __ClearPageActive(page);
424 __ClearPageWaiters(page);
425
426 mem_cgroup_uncharge(page);
427
428 /*
429 * When a device_private page is freed, the page->mapping field
430 * may still contain a (stale) mapping value. For example, the
431 * lower bits of page->mapping may still identify the page as
432 * an anonymous page. Ultimately, this entire field is just
433 * stale and wrong, and it will cause errors if not cleared.
434 * One example is:
435 *
436 * migrate_vma_pages()
437 * migrate_vma_insert_page()
438 * page_add_new_anon_rmap()
439 * __page_set_anon_rmap()
440 * ...checks page->mapping, via PageAnon(page) call,
441 * and incorrectly concludes that the page is an
442 * anonymous page. Therefore, it incorrectly,
443 * silently fails to set up the new anon rmap.
444 *
445 * For other types of ZONE_DEVICE pages, migration is either
446 * handled differently or not done at all, so there is no need
447 * to clear page->mapping.
448 */
449 if (is_device_private_page(page))
450 page->mapping = NULL;
451
452 page->pgmap->ops->page_free(page);
453 } else if (!count)
454 __put_page(page);
455 }
456 EXPORT_SYMBOL(__put_devmap_managed_page);
457 #endif /* CONFIG_DEV_PAGEMAP_OPS */
458