/kernel/linux/linux-5.10/mm/ |
D | memremap.c | 44 static void devmap_managed_enable_put(struct dev_pagemap *pgmap) in devmap_managed_enable_put() argument 46 if (pgmap->type == MEMORY_DEVICE_PRIVATE || in devmap_managed_enable_put() 47 pgmap->type == MEMORY_DEVICE_FS_DAX) in devmap_managed_enable_put() 51 static void devmap_managed_enable_get(struct dev_pagemap *pgmap) in devmap_managed_enable_get() argument 53 if (pgmap->type == MEMORY_DEVICE_PRIVATE || in devmap_managed_enable_get() 54 pgmap->type == MEMORY_DEVICE_FS_DAX) in devmap_managed_enable_get() 58 static void devmap_managed_enable_get(struct dev_pagemap *pgmap) in devmap_managed_enable_get() argument 61 static void devmap_managed_enable_put(struct dev_pagemap *pgmap) in devmap_managed_enable_put() argument 73 static unsigned long pfn_first(struct dev_pagemap *pgmap, int range_id) in pfn_first() argument 75 struct range *range = &pgmap->ranges[range_id]; in pfn_first() [all …]
|
D | gup.c | 27 struct dev_pagemap *pgmap; member 396 struct dev_pagemap **pgmap) in follow_page_pte() argument 458 *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap); in follow_page_pte() 459 if (*pgmap) in follow_page_pte() 607 page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap); in follow_pmd_mask() 613 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); in follow_pmd_mask() 633 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); in follow_pmd_mask() 663 follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); in follow_pmd_mask() 700 page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap); in follow_pud_mask() 803 if (ctx.pgmap) in follow_page() [all …]
|
D | memory-failure.c | 1223 struct dev_pagemap *pgmap) in memory_failure_dev_pagemap() argument 1241 if (!pgmap_pfn_valid(pgmap, pfn)) { in memory_failure_dev_pagemap() 1262 if (pgmap->type == MEMORY_DEVICE_PRIVATE) { in memory_failure_dev_pagemap() 1304 put_dev_pagemap(pgmap); in memory_failure_dev_pagemap() 1331 struct dev_pagemap *pgmap; in memory_failure() local 1342 pgmap = get_dev_pagemap(pfn, NULL); in memory_failure() 1343 if (pgmap) in memory_failure() 1345 pgmap); in memory_failure()
|
D | huge_memory.c | 965 pmd_t *pmd, int flags, struct dev_pagemap **pgmap) in follow_devmap_pmd() argument 1003 *pgmap = get_dev_pagemap(pfn, *pgmap); in follow_devmap_pmd() 1004 if (!*pgmap) in follow_devmap_pmd() 1137 pud_t *pud, int flags, struct dev_pagemap **pgmap) in follow_devmap_pud() argument 1171 *pgmap = get_dev_pagemap(pfn, *pgmap); in follow_devmap_pud() 1172 if (!*pgmap) in follow_devmap_pud()
|
D | hmm.c | 217 device_private_entry_to_page(entry)->pgmap->owner == in hmm_is_device_private_entry()
|
/kernel/linux/linux-5.10/include/linux/ |
D | memremap.h | 78 void (*kill)(struct dev_pagemap *pgmap); 83 void (*cleanup)(struct dev_pagemap *pgmap); 126 static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap) in pgmap_altmap() argument 128 if (pgmap->flags & PGMAP_ALTMAP_VALID) in pgmap_altmap() 129 return &pgmap->altmap; in pgmap_altmap() 134 void *memremap_pages(struct dev_pagemap *pgmap, int nid); 135 void memunmap_pages(struct dev_pagemap *pgmap); 136 void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap); 137 void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap); 139 struct dev_pagemap *pgmap); [all …]
|
D | huge_mm.h | 290 pmd_t *pmd, int flags, struct dev_pagemap **pgmap); 292 pud_t *pud, int flags, struct dev_pagemap **pgmap); 474 unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap) in follow_devmap_pmd() argument 480 unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap) in follow_devmap_pud() argument
|
D | mmu_notifier.h | 527 unsigned long start, unsigned long end, void *pgmap) in mmu_notifier_range_init_migrate() argument 531 range->migrate_pgmap_owner = pgmap; in mmu_notifier_range_init_migrate() 659 pgmap) \ argument
|
D | mm_types.h | 165 struct dev_pagemap *pgmap; member
|
D | mm.h | 1145 switch (page->pgmap->type) { in page_is_devmap_managed() 1173 page->pgmap->type == MEMORY_DEVICE_PRIVATE; in is_device_private_page() 1181 page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; in is_pci_p2pdma_page()
|
/kernel/linux/linux-5.10/tools/testing/nvdimm/test/ |
D | iomap.c | 101 struct dev_pagemap *pgmap = _pgmap; in nfit_test_kill() local 103 WARN_ON(!pgmap || !pgmap->ref); in nfit_test_kill() 105 if (pgmap->ops && pgmap->ops->kill) in nfit_test_kill() 106 pgmap->ops->kill(pgmap); in nfit_test_kill() 108 percpu_ref_kill(pgmap->ref); in nfit_test_kill() 110 if (pgmap->ops && pgmap->ops->cleanup) { in nfit_test_kill() 111 pgmap->ops->cleanup(pgmap); in nfit_test_kill() 113 wait_for_completion(&pgmap->done); in nfit_test_kill() 114 percpu_ref_exit(pgmap->ref); in nfit_test_kill() 120 struct dev_pagemap *pgmap = in dev_pagemap_percpu_release() local [all …]
|
/kernel/linux/linux-5.10/drivers/xen/ |
D | unpopulated-alloc.c | 20 struct dev_pagemap *pgmap; in fill_list() local 41 pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL); in fill_list() 42 if (!pgmap) { in fill_list() 47 pgmap->type = MEMORY_DEVICE_GENERIC; in fill_list() 48 pgmap->range = (struct range) { in fill_list() 52 pgmap->nr_range = 1; in fill_list() 53 pgmap->owner = res; in fill_list() 78 vaddr = memremap_pages(pgmap, NUMA_NO_NODE); in fill_list() 97 kfree(pgmap); in fill_list()
|
/kernel/linux/linux-5.10/drivers/pci/ |
D | p2pdma.c | 37 struct dev_pagemap pgmap; member 42 static struct pci_p2pdma_pagemap *to_p2p_pgmap(struct dev_pagemap *pgmap) in to_p2p_pgmap() argument 44 return container_of(pgmap, struct pci_p2pdma_pagemap, pgmap); in to_p2p_pgmap() 161 struct dev_pagemap *pgmap; in pci_p2pdma_add_resource() local 187 pgmap = &p2p_pgmap->pgmap; in pci_p2pdma_add_resource() 188 pgmap->range.start = pci_resource_start(pdev, bar) + offset; in pci_p2pdma_add_resource() 189 pgmap->range.end = pgmap->range.start + size - 1; in pci_p2pdma_add_resource() 190 pgmap->nr_range = 1; in pci_p2pdma_add_resource() 191 pgmap->type = MEMORY_DEVICE_PCI_P2PDMA; in pci_p2pdma_add_resource() 197 addr = devm_memremap_pages(&pdev->dev, pgmap); in pci_p2pdma_add_resource() [all …]
|
/kernel/linux/linux-5.10/drivers/nvdimm/ |
D | pmem.c | 335 static void pmem_pagemap_cleanup(struct dev_pagemap *pgmap) in pmem_pagemap_cleanup() argument 338 container_of(pgmap->ref, struct request_queue, q_usage_counter); in pmem_pagemap_cleanup() 343 static void pmem_release_queue(void *pgmap) in pmem_release_queue() argument 345 pmem_pagemap_cleanup(pgmap); in pmem_release_queue() 348 static void pmem_pagemap_kill(struct dev_pagemap *pgmap) in pmem_pagemap_kill() argument 351 container_of(pgmap->ref, struct request_queue, q_usage_counter); in pmem_pagemap_kill() 401 rc = nvdimm_setup_pfn(nd_pfn, &pmem->pgmap); in pmem_attach_disk() 429 pmem->pgmap.ref = &q->q_usage_counter; in pmem_attach_disk() 431 pmem->pgmap.type = MEMORY_DEVICE_FS_DAX; in pmem_attach_disk() 432 pmem->pgmap.ops = &fsdax_pagemap_ops; in pmem_attach_disk() [all …]
|
D | pfn_devs.c | 673 static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap) in __nvdimm_setup_pfn() argument 675 struct range *range = &pgmap->range; in __nvdimm_setup_pfn() 676 struct vmem_altmap *altmap = &pgmap->altmap; in __nvdimm_setup_pfn() 696 pgmap->nr_range = 1; in __nvdimm_setup_pfn() 711 pgmap->flags |= PGMAP_ALTMAP_VALID; in __nvdimm_setup_pfn() 835 int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap) in nvdimm_setup_pfn() argument 847 return __nvdimm_setup_pfn(nd_pfn, pgmap); in nvdimm_setup_pfn()
|
D | pmem.h | 26 struct dev_pagemap pgmap; member
|
D | nd.h | 390 int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap); 393 struct dev_pagemap *pgmap) in nvdimm_setup_pfn() argument
|
/kernel/linux/linux-5.10/drivers/dax/pmem/ |
D | core.c | 20 struct dev_pagemap pgmap = { }; in __dax_pmem_probe() local 34 rc = nvdimm_setup_pfn(nd_pfn, &pgmap); in __dax_pmem_probe() 54 range = pgmap.range; in __dax_pmem_probe() 65 .pgmap = &pgmap, in __dax_pmem_probe()
|
/kernel/linux/linux-5.10/drivers/dax/ |
D | device.c | 395 struct dev_pagemap *pgmap; in dev_dax_probe() local 401 pgmap = dev_dax->pgmap; in dev_dax_probe() 402 if (dev_WARN_ONCE(dev, pgmap && dev_dax->nr_range > 1, in dev_dax_probe() 406 if (!pgmap) { in dev_dax_probe() 407 pgmap = devm_kzalloc(dev, sizeof(*pgmap) + sizeof(struct range) in dev_dax_probe() 409 if (!pgmap) in dev_dax_probe() 411 pgmap->nr_range = dev_dax->nr_range; in dev_dax_probe() 424 if (!dev_dax->pgmap) in dev_dax_probe() 425 pgmap->ranges[i] = *range; in dev_dax_probe() 428 pgmap->type = MEMORY_DEVICE_GENERIC; in dev_dax_probe() [all …]
|
D | super.c | 133 struct dev_pagemap *pgmap, *end_pgmap; in __generic_fsdax_supported() local 135 pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL); in __generic_fsdax_supported() 137 if (pgmap && pgmap == end_pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX in __generic_fsdax_supported() 138 && pfn_t_to_page(pfn)->pgmap == pgmap in __generic_fsdax_supported() 139 && pfn_t_to_page(end_pfn)->pgmap == pgmap in __generic_fsdax_supported() 143 put_dev_pagemap(pgmap); in __generic_fsdax_supported()
|
D | bus.h | 26 struct dev_pagemap *pgmap; member
|
D | dax-private.h | 70 struct dev_pagemap *pgmap; member
|
D | bus.c | 1276 kfree(dev_dax->pgmap); in dev_dax_release() 1329 if (data->pgmap) { in devm_create_dev_dax() 1333 dev_dax->pgmap = kmemdup(data->pgmap, in devm_create_dev_dax() 1335 if (!dev_dax->pgmap) { in devm_create_dev_dax() 1390 kfree(dev_dax->pgmap); in devm_create_dev_dax()
|
/kernel/linux/linux-5.10/fs/fuse/ |
D | virtio_fs.c | 801 struct dev_pagemap *pgmap; in virtio_fs_setup_dax() local 825 pgmap = devm_kzalloc(&vdev->dev, sizeof(*pgmap), GFP_KERNEL); in virtio_fs_setup_dax() 826 if (!pgmap) in virtio_fs_setup_dax() 829 pgmap->type = MEMORY_DEVICE_FS_DAX; in virtio_fs_setup_dax() 836 pgmap->range = (struct range) { in virtio_fs_setup_dax() 840 pgmap->nr_range = 1; in virtio_fs_setup_dax() 842 fs->window_kaddr = devm_memremap_pages(&vdev->dev, pgmap); in virtio_fs_setup_dax()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/ |
D | nouveau_dmem.c | 90 return container_of(page->pgmap, struct nouveau_dmem_chunk, pagemap); in nouveau_page_to_chunk()
|