/arch/powerpc/platforms/powernv/ |
D | memtrace.c | 91 static void memtrace_clear_range(unsigned long start_pfn, in memtrace_clear_range() argument 97 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) { in memtrace_clear_range() 106 flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn), in memtrace_clear_range() 107 (unsigned long)pfn_to_kaddr(start_pfn + nr_pages), in memtrace_clear_range() 114 unsigned long pfn, start_pfn; in memtrace_alloc_node() local 125 start_pfn = page_to_pfn(page); in memtrace_alloc_node() 132 memtrace_clear_range(start_pfn, nr_pages); in memtrace_alloc_node() 138 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) in memtrace_alloc_node() 141 arch_remove_linear_mapping(PFN_PHYS(start_pfn), size); in memtrace_alloc_node() 143 return PFN_PHYS(start_pfn); in memtrace_alloc_node() [all …]
|
/arch/x86/xen/ |
D | setup.c | 94 static void __init xen_add_extra_mem(unsigned long start_pfn, in xen_add_extra_mem() argument 106 xen_extra_mem[i].start_pfn = start_pfn; in xen_add_extra_mem() 111 if (xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns == in xen_add_extra_mem() 112 start_pfn) { in xen_add_extra_mem() 120 memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns)); in xen_add_extra_mem() 123 static void __init xen_del_extra_mem(unsigned long start_pfn, in xen_del_extra_mem() argument 130 start_r = xen_extra_mem[i].start_pfn; in xen_del_extra_mem() 134 if (start_r == start_pfn) { in xen_del_extra_mem() 136 xen_extra_mem[i].start_pfn += n_pfns; in xen_del_extra_mem() 141 if (start_r + size_r == start_pfn + n_pfns) { in xen_del_extra_mem() [all …]
|
/arch/sparc/mm/ |
D | init_32.c | 65 unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; in calc_highpages() local 71 if (start_pfn < max_low_pfn) in calc_highpages() 72 start_pfn = max_low_pfn; in calc_highpages() 74 nr += end_pfn - start_pfn; in calc_highpages() 134 unsigned long start_pfn, bytes_avail, size; in bootmem_init() local 172 start_pfn = (unsigned long)__pa(PAGE_ALIGN((unsigned long) &_end)); in bootmem_init() 175 start_pfn >>= PAGE_SHIFT; in bootmem_init() 193 size = (start_pfn << PAGE_SHIFT) - phys_base; in bootmem_init() 236 static void map_high_region(unsigned long start_pfn, unsigned long end_pfn) in map_high_region() argument 241 printk("mapping high region %08lx - %08lx\n", start_pfn, end_pfn); in map_high_region() [all …]
|
/arch/sh/mm/ |
D | numa.c | 28 unsigned long start_pfn, end_pfn; in setup_bootmem_node() local 33 start_pfn = PFN_DOWN(start); in setup_bootmem_node() 41 __add_active_range(nid, start_pfn, end_pfn); in setup_bootmem_node() 51 NODE_DATA(nid)->node_start_pfn = start_pfn; in setup_bootmem_node() 52 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; in setup_bootmem_node()
|
D | init.c | 210 unsigned long start_pfn, end_pfn; in allocate_pgdat() local 212 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); in allocate_pgdat() 223 NODE_DATA(nid)->node_start_pfn = start_pfn; in allocate_pgdat() 224 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; in allocate_pgdat() 229 unsigned long start_pfn, end_pfn; in do_init_bootmem() local 233 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) in do_init_bootmem() 234 __add_active_range(0, start_pfn, end_pfn); in do_init_bootmem() 247 unsigned long start_pfn; in early_reserve_mem() local 255 start_pfn = PFN_UP(__pa(_end)); in early_reserve_mem() 263 memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start); in early_reserve_mem() [all …]
|
/arch/x86/mm/ |
D | init.c | 345 unsigned long start_pfn, unsigned long end_pfn, in save_mr() argument 348 if (start_pfn < end_pfn) { in save_mr() 351 mr[nr_range].start = start_pfn<<PAGE_SHIFT; in save_mr() 423 unsigned long start_pfn, end_pfn, limit_pfn; in split_mem_range() local 430 pfn = start_pfn = PFN_DOWN(start); in split_mem_range() 447 if (start_pfn < end_pfn) { in split_mem_range() 448 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); in split_mem_range() 453 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() 462 if (start_pfn < end_pfn) { in split_mem_range() 463 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, in split_mem_range() [all …]
|
D | init_32.c | 265 unsigned long start_pfn, end_pfn; in kernel_physical_mapping_init() local 275 start_pfn = start >> PAGE_SHIFT; in kernel_physical_mapping_init() 299 pfn = start_pfn; in kernel_physical_mapping_init() 408 unsigned long start_pfn, unsigned long end_pfn) in add_highpages_with_active_regions() argument 415 start_pfn, end_pfn); in add_highpages_with_active_regions() 417 start_pfn, end_pfn); in add_highpages_with_active_regions() 786 unsigned long start_pfn = start >> PAGE_SHIFT; in arch_add_memory() local 801 return __add_pages(nid, start_pfn, nr_pages, params); in arch_add_memory() 806 unsigned long start_pfn = start >> PAGE_SHIFT; in arch_remove_memory() local 809 __remove_pages(start_pfn, nr_pages, altmap); in arch_remove_memory()
|
D | ioremap.c | 71 unsigned long start_pfn, stop_pfn; in __ioremap_check_ram() local 77 start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT; in __ioremap_check_ram() 79 if (stop_pfn > start_pfn) { in __ioremap_check_ram() 80 for (i = 0; i < (stop_pfn - start_pfn); ++i) in __ioremap_check_ram() 81 if (pfn_valid(start_pfn + i) && in __ioremap_check_ram() 82 !PageReserved(pfn_to_page(start_pfn + i))) in __ioremap_check_ram()
|
/arch/parisc/mm/ |
D | init.c | 132 if (pmem_ranges[j-1].start_pfn < in setup_bootmem() 133 pmem_ranges[j].start_pfn) { in setup_bootmem() 150 if (pmem_ranges[i].start_pfn - in setup_bootmem() 151 (pmem_ranges[i-1].start_pfn + in setup_bootmem() 156 pmem_ranges[i].start_pfn - in setup_bootmem() 157 (pmem_ranges[i-1].start_pfn + in setup_bootmem() 173 start = (pmem_ranges[i].start_pfn << PAGE_SHIFT); in setup_bootmem() 227 end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages; in setup_bootmem() 230 hole_pages = pmem_ranges[i].start_pfn - end_pfn; in setup_bootmem() 232 pmem_holes[npmem_holes].start_pfn = end_pfn; in setup_bootmem() [all …]
|
/arch/mips/loongson64/ |
D | numa.c | 88 unsigned long start_pfn, end_pfn; in node_mem_init() local 97 get_pfn_range_for_nid(node, &start_pfn, &end_pfn); in node_mem_init() 99 node, start_pfn, end_pfn); in node_mem_init() 111 NODE_DATA(node)->node_start_pfn = start_pfn; in node_mem_init() 112 NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn; in node_mem_init() 134 memblock_reserve(0, PAGE_SIZE * start_pfn); in node_mem_init()
|
/arch/powerpc/mm/ |
D | mem.c | 122 int __ref add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, in add_pages() argument 127 ret = __add_pages(nid, start_pfn, nr_pages, params); in add_pages() 132 update_end_of_memory_vars(start_pfn << PAGE_SHIFT, in add_pages() 141 unsigned long start_pfn = start >> PAGE_SHIFT; in arch_add_memory() local 148 rc = add_pages(nid, start_pfn, nr_pages, params); in arch_add_memory() 156 unsigned long start_pfn = start >> PAGE_SHIFT; in arch_remove_memory() local 159 __remove_pages(start_pfn, nr_pages, altmap); in arch_remove_memory()
|
D | init_64.c | 74 unsigned long start_pfn; in vmemmap_subsection_start() local 78 start_pfn = (offset / sizeof(struct page)) & PAGE_SUBSECTION_MASK; in vmemmap_subsection_start() 79 return pfn_to_page(start_pfn); in vmemmap_subsection_start() 189 unsigned long start_pfn = page_to_pfn((struct page *)start); in altmap_cross_boundary() local 191 if ((start_pfn + nr_pfn - 1) > altmap->end_pfn) in altmap_cross_boundary() 194 if (start_pfn < altmap->base_pfn) in altmap_cross_boundary()
|
D | numa.c | 1030 unsigned long start_pfn, end_pfn; in setup_nonnuma() local 1037 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { in setup_nonnuma() 1039 memblock_set_node(PFN_PHYS(start_pfn), in setup_nonnuma() 1040 PFN_PHYS(end_pfn - start_pfn), in setup_nonnuma() 1082 static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) in setup_node_data() argument 1084 u64 spanned_pages = end_pfn - start_pfn; in setup_node_data() 1107 NODE_DATA(nid)->node_start_pfn = start_pfn; in setup_node_data() 1215 unsigned long start_pfn, end_pfn; in initmem_init() local 1217 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); in initmem_init() 1218 setup_node_data(nid, start_pfn, end_pfn); in initmem_init()
|
/arch/mips/mm/ |
D | ioremap.c | 25 static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, in __ioremap_check_ram() argument 31 if (pfn_valid(start_pfn + i) && in __ioremap_check_ram() 32 !PageReserved(pfn_to_page(start_pfn + i))) in __ioremap_check_ram()
|
/arch/x86/platform/efi/ |
D | efi_32.c | 38 u64 start_pfn, end_pfn, end; in efi_map_region() local 42 start_pfn = PFN_DOWN(md->phys_addr); in efi_map_region() 47 if (pfn_range_is_mapped(start_pfn, end_pfn)) { in efi_map_region()
|
/arch/sh/kernel/ |
D | setup.c | 198 void __init __add_active_range(unsigned int nid, unsigned long start_pfn, in __add_active_range() argument 206 start = start_pfn << PAGE_SHIFT; in __add_active_range() 216 start_pfn, end_pfn); in __add_active_range() 240 memblock_set_node(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), in __add_active_range()
|
/arch/s390/mm/ |
D | init.c | 267 mem_data.start = arg->start_pfn << PAGE_SHIFT; in s390_cma_mem_notifier() 289 unsigned long start_pfn = PFN_DOWN(start); in arch_add_memory() local 304 rc = __add_pages(nid, start_pfn, size_pages, params); in arch_add_memory() 312 unsigned long start_pfn = start >> PAGE_SHIFT; in arch_remove_memory() local 315 __remove_pages(start_pfn, nr_pages, altmap); in arch_remove_memory()
|
/arch/x86/include/asm/ |
D | highmem.h | 71 extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
|
D | page_types.h | 72 bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn);
|
/arch/mips/sgi-ip27/ |
D | ip27-memory.c | 354 unsigned long start_pfn, end_pfn; in node_mem_init() local 356 get_pfn_range_for_nid(node, &start_pfn, &end_pfn); in node_mem_init() 364 NODE_DATA(node)->node_start_pfn = start_pfn; in node_mem_init() 365 NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn; in node_mem_init()
|
/arch/ia64/mm/ |
D | init.c | 472 unsigned long start_pfn = start >> PAGE_SHIFT; in arch_add_memory() local 479 ret = __add_pages(nid, start_pfn, nr_pages, params); in arch_add_memory() 489 unsigned long start_pfn = start >> PAGE_SHIFT; in arch_remove_memory() local 492 __remove_pages(start_pfn, nr_pages, altmap); in arch_remove_memory()
|
/arch/sh/include/asm/ |
D | mmzone.h | 40 void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
|
/arch/s390/include/asm/ |
D | diag.h | 46 static inline void diag10_range(unsigned long start_pfn, unsigned long num_pfn) in diag10_range() argument 50 start_addr = start_pfn << PAGE_SHIFT; in diag10_range() 51 end_addr = (start_pfn + num_pfn - 1) << PAGE_SHIFT; in diag10_range()
|
/arch/x86/mm/pat/ |
D | memtype.c | 459 unsigned long start_pfn = start >> PAGE_SHIFT; in pat_pagerange_is_ram() local 461 struct pagerange_state state = {start_pfn, 0, 0}; in pat_pagerange_is_ram() 470 if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT) in pat_pagerange_is_ram() 471 start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT; in pat_pagerange_is_ram() 473 if (start_pfn < end_pfn) { in pat_pagerange_is_ram() 474 ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, in pat_pagerange_is_ram()
|
/arch/alpha/kernel/ |
D | setup.c | 319 i, cluster->usage, cluster->start_pfn, in setup_memory() 320 cluster->start_pfn + cluster->numpages); in setup_memory() 322 end = cluster->start_pfn + cluster->numpages; in setup_memory() 326 memblock_add(PFN_PHYS(cluster->start_pfn), in setup_memory() 333 memblock_reserve(PFN_PHYS(cluster->start_pfn), in setup_memory() 398 if (pfn >= cluster->start_pfn && in page_is_ram() 399 pfn < cluster->start_pfn + cluster->numpages) { in page_is_ram()
|