Lines Matching +full:linear +full:- +full:mapping +full:- +full:mode
1 /* SPDX-License-Identifier: GPL-2.0 */
21 * constraints on the alignment and size of the mapping (namespace).
24 * the minimum mapping granularity of memremap_pages() is 16MB.
30 * reconfigured into a mode that requires SUBSECTION_SIZE alignment.
46 if (pgmap->type == MEMORY_DEVICE_PRIVATE || in devmap_managed_enable_put()
47 pgmap->type == MEMORY_DEVICE_FS_DAX) in devmap_managed_enable_put()
53 if (pgmap->type == MEMORY_DEVICE_PRIVATE || in devmap_managed_enable_get()
54 pgmap->type == MEMORY_DEVICE_FS_DAX) in devmap_managed_enable_get()
68 xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end), in pgmap_array_delete()
75 struct range *range = &pgmap->ranges[range_id]; in pfn_first()
76 unsigned long pfn = PHYS_PFN(range->start); in pfn_first()
87 for (i = 0; i < pgmap->nr_range; i++) { in pgmap_pfn_valid()
88 struct range *range = &pgmap->ranges[i]; in pgmap_pfn_valid()
90 if (pfn >= PHYS_PFN(range->start) && in pgmap_pfn_valid()
91 pfn <= PHYS_PFN(range->end)) in pgmap_pfn_valid()
100 const struct range *range = &pgmap->ranges[range_id]; in pfn_end()
102 return (range->start + range_len(range)) >> PAGE_SHIFT; in pfn_end()
117 if (pgmap->ops && pgmap->ops->kill) in dev_pagemap_kill()
118 pgmap->ops->kill(pgmap); in dev_pagemap_kill()
120 percpu_ref_kill(pgmap->ref); in dev_pagemap_kill()
125 if (pgmap->ops && pgmap->ops->cleanup) { in dev_pagemap_cleanup()
126 pgmap->ops->cleanup(pgmap); in dev_pagemap_cleanup()
128 wait_for_completion(&pgmap->done); in dev_pagemap_cleanup()
129 percpu_ref_exit(pgmap->ref); in dev_pagemap_cleanup()
133 * caller may re-enable the same pgmap. in dev_pagemap_cleanup()
135 if (pgmap->ref == &pgmap->internal_ref) in dev_pagemap_cleanup()
136 pgmap->ref = NULL; in dev_pagemap_cleanup()
141 struct range *range = &pgmap->ranges[range_id]; in pageunmap_range()
148 /* pages are dead and unused, undo the arch mapping */ in pageunmap_range()
152 remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start), in pageunmap_range()
154 if (pgmap->type == MEMORY_DEVICE_PRIVATE) { in pageunmap_range()
155 __remove_pages(PHYS_PFN(range->start), in pageunmap_range()
158 arch_remove_memory(nid, range->start, range_len(range), in pageunmap_range()
160 kasan_remove_zero_shadow(__va(range->start), range_len(range)); in pageunmap_range()
164 untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range)); in pageunmap_range()
174 for (i = 0; i < pgmap->nr_range; i++) in memunmap_pages()
179 for (i = 0; i < pgmap->nr_range; i++) in memunmap_pages()
182 WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n"); in memunmap_pages()
197 complete(&pgmap->done); in dev_pagemap_percpu_release()
203 struct range *range = &pgmap->ranges[range_id]; in pagemap_range()
209 return -EINVAL; in pagemap_range()
211 conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL); in pagemap_range()
213 WARN(1, "Conflicting mapping in same section\n"); in pagemap_range()
215 return -ENOMEM; in pagemap_range()
218 conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL); in pagemap_range()
220 WARN(1, "Conflicting mapping in same section\n"); in pagemap_range()
222 return -ENOMEM; in pagemap_range()
225 is_ram = region_intersects(range->start, range_len(range), in pagemap_range()
229 WARN_ONCE(1, "attempted on %s region %#llx-%#llx\n", in pagemap_range()
231 range->start, range->end); in pagemap_range()
232 return -ENXIO; in pagemap_range()
235 error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(range->start), in pagemap_range()
236 PHYS_PFN(range->end), pgmap, GFP_KERNEL)); in pagemap_range()
243 error = track_pfn_remap(NULL, ¶ms->pgprot, PHYS_PFN(range->start), 0, in pagemap_range()
252 * allocate and initialize struct page for the device memory. More- in pagemap_range()
253 * over the device memory is un-accessible thus we do not want to in pagemap_range()
254 * create a linear mapping for the memory like arch_add_memory() in pagemap_range()
258 * the CPU, we do want the linear mapping and thus use in pagemap_range()
261 if (pgmap->type == MEMORY_DEVICE_PRIVATE) { in pagemap_range()
262 error = add_pages(nid, PHYS_PFN(range->start), in pagemap_range()
265 error = kasan_add_zero_shadow(__va(range->start), range_len(range)); in pagemap_range()
271 error = arch_add_memory(nid, range->start, range_len(range), in pagemap_range()
278 zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE]; in pagemap_range()
279 move_pfn_range_to_zone(zone, PHYS_PFN(range->start), in pagemap_range()
280 PHYS_PFN(range_len(range)), params->altmap, in pagemap_range()
292 memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], in pagemap_range()
293 PHYS_PFN(range->start), in pagemap_range()
295 percpu_ref_get_many(pgmap->ref, pfn_end(pgmap, range_id) in pagemap_range()
296 - pfn_first(pgmap, range_id)); in pagemap_range()
300 kasan_remove_zero_shadow(__va(range->start), range_len(range)); in pagemap_range()
302 untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range)); in pagemap_range()
320 const int nr_range = pgmap->nr_range; in memremap_pages()
324 return ERR_PTR(-EINVAL); in memremap_pages()
326 switch (pgmap->type) { in memremap_pages()
330 return ERR_PTR(-EINVAL); in memremap_pages()
332 if (!pgmap->ops || !pgmap->ops->migrate_to_ram) { in memremap_pages()
334 return ERR_PTR(-EINVAL); in memremap_pages()
336 if (!pgmap->ops->page_free) { in memremap_pages()
338 return ERR_PTR(-EINVAL); in memremap_pages()
340 if (!pgmap->owner) { in memremap_pages()
342 return ERR_PTR(-EINVAL); in memremap_pages()
349 return ERR_PTR(-EINVAL); in memremap_pages()
358 WARN(1, "Invalid pgmap type %d\n", pgmap->type); in memremap_pages()
362 if (!pgmap->ref) { in memremap_pages()
363 if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup)) in memremap_pages()
364 return ERR_PTR(-EINVAL); in memremap_pages()
366 init_completion(&pgmap->done); in memremap_pages()
367 error = percpu_ref_init(&pgmap->internal_ref, in memremap_pages()
371 pgmap->ref = &pgmap->internal_ref; in memremap_pages()
373 if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) { in memremap_pages()
375 return ERR_PTR(-EINVAL); in memremap_pages()
386 pgmap->nr_range = 0; in memremap_pages()
392 pgmap->nr_range++; in memremap_pages()
397 pgmap->nr_range = nr_range; in memremap_pages()
401 return __va(pgmap->ranges[0].start); in memremap_pages()
406 * devm_memremap_pages - remap and provide memmap backing for the given resource
415 * PGMAP_ALTMAP_VALID must be set in pgmap->flags.
417 * 3/ The ref field may optionally be provided, in which pgmap->ref must be
452 return altmap->reserve + altmap->free; in vmem_altmap_offset()
458 altmap->alloc -= nr_pfns; in vmem_altmap_free()
462 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
466 * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
467 * is non-NULL but does not cover @pfn the reference to it will be released.
478 if (phys >= pgmap->range.start && phys <= pgmap->range.end) in get_dev_pagemap()
486 if (pgmap && !percpu_ref_tryget_live(pgmap->ref)) in get_dev_pagemap()
499 wake_up_var(&page->_refcount); in free_devmap_managed_page()
508 * When a device_private page is freed, the page->mapping field in free_devmap_managed_page()
509 * may still contain a (stale) mapping value. For example, the in free_devmap_managed_page()
510 * lower bits of page->mapping may still identify the page as an in free_devmap_managed_page()
519 * ...checks page->mapping, via PageAnon(page) call, in free_devmap_managed_page()
526 * to clear page->mapping. in free_devmap_managed_page()
528 page->mapping = NULL; in free_devmap_managed_page()
529 page->pgmap->ops->page_free(page); in free_devmap_managed_page()