/drivers/hv/ |
D | hv_balloon.c | 435 unsigned long start_pfn; member 448 unsigned long start_pfn; member 588 if ((pfn >= gap->start_pfn) && (pfn < gap->end_pfn)) in has_pfn_is_backed() 595 static unsigned long hv_page_offline_check(unsigned long start_pfn, in hv_page_offline_check() argument 598 unsigned long pfn = start_pfn, count = 0; in hv_page_offline_check() 602 while (pfn < start_pfn + nr_pages) { in hv_page_offline_check() 609 while ((pfn >= has->start_pfn) && in hv_page_offline_check() 611 (pfn < start_pfn + nr_pages)) { in hv_page_offline_check() 645 pfn_count = hv_page_offline_check(mem->start_pfn, in hv_memory_notifier() 692 unsigned long start_pfn, unsigned long size) in hv_bring_pgs_online() argument [all …]
|
/drivers/base/ |
D | memory.c | 180 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); in memory_block_online() local 187 start_pfn, nr_pages); in memory_block_online() 197 ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone); in memory_block_online() 202 ret = online_pages(start_pfn + nr_vmemmap_pages, in memory_block_online() 206 mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages); in memory_block_online() 215 adjust_present_page_count(pfn_to_page(start_pfn), mem->group, in memory_block_online() 223 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); in memory_block_offline() local 233 adjust_present_page_count(pfn_to_page(start_pfn), mem->group, in memory_block_offline() 236 ret = offline_pages(start_pfn + nr_vmemmap_pages, in memory_block_offline() 241 adjust_present_page_count(pfn_to_page(start_pfn), in memory_block_offline() [all …]
|
D | arch_numa.c | 221 static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) in setup_node_data() argument 228 if (start_pfn >= end_pfn) in setup_node_data() 248 NODE_DATA(nid)->node_start_pfn = start_pfn; in setup_node_data() 249 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; in setup_node_data() 367 unsigned long start_pfn, end_pfn; in numa_register_nodes() local 369 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); in numa_register_nodes() 370 setup_node_data(nid, start_pfn, end_pfn); in numa_register_nodes()
|
D | node.c | 832 unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr); in register_mem_block_under_node_early() local 833 unsigned long end_pfn = start_pfn + memory_block_pfns - 1; in register_mem_block_under_node_early() 837 for (pfn = start_pfn; pfn <= end_pfn; pfn++) { in register_mem_block_under_node_early() 895 void link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn, in link_mem_sections() argument 905 walk_memory_blocks(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), in link_mem_sections()
|
/drivers/xen/ |
D | balloon.c | 359 unsigned long start_pfn = page_to_pfn(page); in xen_online_page() local 362 pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn); in xen_online_page() 365 p = pfn_to_page(start_pfn + i); in xen_online_page() 691 static void __init balloon_add_region(unsigned long start_pfn, in balloon_add_region() argument 701 extra_pfn_end = min(max_pfn, start_pfn + pages); in balloon_add_region() 703 for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) { in balloon_add_region() 710 balloon_stats.total_pages += extra_pfn_end - start_pfn; in balloon_add_region() 756 balloon_add_region(xen_extra_mem[i].start_pfn, in balloon_init()
|
D | unpopulated-alloc.c | 193 pfn_to_page(xen_extra_mem[i].start_pfn + j); in init()
|
D | grant-table.c | 980 unsigned long pfn, start_pfn; in gnttab_dma_alloc_pages() local 1001 start_pfn = __phys_to_pfn(args->dev_bus_addr); in gnttab_dma_alloc_pages() 1002 for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages; in gnttab_dma_alloc_pages()
|
/drivers/iommu/intel/ |
D | iommu.c | 1107 unsigned long start_pfn, in dma_pte_clear_range() argument 1113 BUG_ON(!domain_pfn_supported(domain, start_pfn)); in dma_pte_clear_range() 1115 BUG_ON(start_pfn > last_pfn); in dma_pte_clear_range() 1120 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page); in dma_pte_clear_range() 1122 start_pfn = align_to_level(start_pfn + 1, large_page + 1); in dma_pte_clear_range() 1127 start_pfn += lvl_to_nr_pages(large_page); in dma_pte_clear_range() 1129 } while (start_pfn <= last_pfn && !first_pte_in_page(pte)); in dma_pte_clear_range() 1134 } while (start_pfn && start_pfn <= last_pfn); in dma_pte_clear_range() 1139 unsigned long pfn, unsigned long start_pfn, in dma_pte_free_level() argument 1142 pfn = max(start_pfn, pfn); in dma_pte_free_level() [all …]
|
/drivers/virtio/ |
D | virtio_mem.c | 847 unsigned long start_pfn) in virtio_mem_sbm_notify_online() argument 849 const bool is_movable = page_zonenum(pfn_to_page(start_pfn)) == in virtio_mem_sbm_notify_online() 940 const unsigned long start = PFN_PHYS(mhp->start_pfn); in virtio_mem_memory_notifier_cb() 991 mhp->start_pfn, in virtio_mem_memory_notifier_cb() 1022 virtio_mem_sbm_notify_online(vm, id, mhp->start_pfn); in virtio_mem_memory_notifier_cb() 1045 mhp->start_pfn, in virtio_mem_memory_notifier_cb() 1862 unsigned long start_pfn; in virtio_mem_sbm_unplug_sb_online() local 1865 start_pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) + in virtio_mem_sbm_unplug_sb_online() 1868 rc = virtio_mem_fake_offline(start_pfn, nr_pages); in virtio_mem_sbm_unplug_sb_online() 1876 virtio_mem_fake_online(start_pfn, nr_pages); in virtio_mem_sbm_unplug_sb_online() [all …]
|
/drivers/net/ethernet/ibm/ehea/ |
D | ehea_qmr.c | 686 unsigned long pfn, start_pfn, end_pfn, nr_pages; in ehea_create_busmap_callback() local 693 start_pfn = initial_pfn; in ehea_create_busmap_callback() 695 pfn = start_pfn; in ehea_create_busmap_callback() 700 nr_pages = pfn - start_pfn; in ehea_create_busmap_callback() 701 ret = ehea_update_busmap(start_pfn, nr_pages, in ehea_create_busmap_callback() 708 start_pfn = pfn; in ehea_create_busmap_callback() 714 nr_pages = pfn - start_pfn; in ehea_create_busmap_callback() 715 return ehea_update_busmap(start_pfn, nr_pages, EHEA_BUSMAP_ADD_SECT); in ehea_create_busmap_callback()
|
D | ehea_main.c | 3258 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) in ehea_mem_notifier() 3266 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) in ehea_mem_notifier()
|
/drivers/of/ |
D | kexec.c | 128 unsigned long start_pfn, end_pfn; in ima_get_kexec_buffer() local 151 start_pfn = PHYS_PFN(tmp_addr); in ima_get_kexec_buffer() 153 if (!page_is_ram(start_pfn) || !page_is_ram(end_pfn)) { in ima_get_kexec_buffer()
|
/drivers/iommu/ |
D | iova.c | 76 unsigned long start_pfn) in init_iova_domain() argument 90 iovad->start_pfn = start_pfn; in init_iova_domain() 270 unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn; in __alloc_and_insert_iova_range() 295 if (low_pfn == iovad->start_pfn && retry_pfn < limit_pfn) { in __alloc_and_insert_iova_range() 362 gap = curr_iova->pfn_lo - iovad->start_pfn; in __alloc_and_insert_iova_best_fit() 363 if (limit_pfn >= size && new_pfn >= iovad->start_pfn && in __alloc_and_insert_iova_best_fit()
|
D | dma-iommu.c | 392 if (iovad->start_pfn) { in iommu_dma_init_domain() 394 base_pfn != iovad->start_pfn) { in iommu_dma_init_domain()
|
/drivers/s390/char/ |
D | sclp_cmd.c | 182 int arch_get_memory_phys_device(unsigned long start_pfn) in arch_get_memory_phys_device() argument 186 return PFN_PHYS(start_pfn) >> ilog2(sclp.rzm); in arch_get_memory_phys_device() 329 start = arg->start_pfn << PAGE_SHIFT; in sclp_mem_notifier()
|
/drivers/gpu/drm/gma500/ |
D | mmu.h | 71 uint32_t start_pfn,
|
D | mmu.c | 620 int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn, in psb_mmu_insert_pfn_sequence() argument 645 pte = psb_mmu_mask_pte(start_pfn++, type); in psb_mmu_insert_pfn_sequence()
|