/arch/x86/mm/ |
D | init.c | 190 unsigned long start_pfn, unsigned long end_pfn, in save_mr() argument 193 if (start_pfn < end_pfn) { in save_mr() 197 mr[nr_range].end = end_pfn<<PAGE_SHIFT; in save_mr() 268 unsigned long start_pfn, end_pfn, limit_pfn; in split_mem_range() local 284 end_pfn = PFN_DOWN(PMD_SIZE); in split_mem_range() 286 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() 288 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() 290 if (end_pfn > limit_pfn) in split_mem_range() 291 end_pfn = limit_pfn; in split_mem_range() 292 if (start_pfn < end_pfn) { in split_mem_range() [all …]
|
D | numa_32.c | 65 unsigned long end_pfn) in node_memmap_size_bytes() argument 67 unsigned long nr_pages = end_pfn - start_pfn; in node_memmap_size_bytes()
|
D | init_32.c | 258 unsigned long start_pfn, end_pfn; in kernel_physical_mapping_init() local 269 end_pfn = end >> PAGE_SHIFT; in kernel_physical_mapping_init() 298 if (pfn >= end_pfn) in kernel_physical_mapping_init() 306 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn; in kernel_physical_mapping_init() 346 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn; in kernel_physical_mapping_init() 432 unsigned long start_pfn, unsigned long end_pfn) in add_highpages_with_active_regions() argument 439 start_pfn, end_pfn); in add_highpages_with_active_regions() 441 start_pfn, end_pfn); in add_highpages_with_active_regions()
|
/arch/mips/loongson64/loongson-3/ |
D | numa.c | 128 u64 node_id, node_psize, start_pfn, end_pfn, mem_start, mem_size; in szmem() local 144 end_pfn = start_pfn + node_psize; in szmem() 149 start_pfn, end_pfn, num_physpages); in szmem() 153 PFN_PHYS(end_pfn - start_pfn), node); in szmem() 158 end_pfn = start_pfn + node_psize; in szmem() 163 start_pfn, end_pfn, num_physpages); in szmem() 167 PFN_PHYS(end_pfn - start_pfn), node); in szmem() 185 unsigned long start_pfn, end_pfn, freepfn; in node_mem_init() local 191 get_pfn_range_for_nid(node, &start_pfn, &end_pfn); in node_mem_init() 196 node, start_pfn, end_pfn, freepfn); in node_mem_init() [all …]
|
/arch/sh/mm/ |
D | numa.c | 30 unsigned long start_pfn, end_pfn; in setup_bootmem_node() local 37 end_pfn = PFN_DOWN(end); in setup_bootmem_node() 44 __add_active_range(nid, start_pfn, end_pfn); in setup_bootmem_node() 53 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; in setup_bootmem_node() 56 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); in setup_bootmem_node() 60 start_pfn, end_pfn); in setup_bootmem_node() 62 free_bootmem_with_active_regions(nid, end_pfn); in setup_bootmem_node()
|
D | init.c | 195 unsigned long start_pfn, end_pfn; in allocate_pgdat() local 200 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); in allocate_pgdat() 204 SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT); in allocate_pgdat() 219 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; in allocate_pgdat() 225 unsigned long end_pfn; in bootmem_init_one_node() local 234 end_pfn = pgdat_end_pfn(p); in bootmem_init_one_node() 242 init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn); in bootmem_init_one_node() 244 free_bootmem_with_active_regions(nid, end_pfn); in bootmem_init_one_node() 270 unsigned long start_pfn, end_pfn; in do_init_bootmem() local 272 end_pfn = memblock_region_memory_end_pfn(reg); in do_init_bootmem() [all …]
|
/arch/metag/mm/ |
D | numa.c | 33 unsigned long start_pfn, end_pfn; in setup_bootmem_node() local 40 end_pfn = end >> PAGE_SHIFT; in setup_bootmem_node() 45 PFN_PHYS(end_pfn - start_pfn), in setup_bootmem_node() 56 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; in setup_bootmem_node() 59 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); in setup_bootmem_node() 63 start_pfn, end_pfn); in setup_bootmem_node() 65 free_bootmem_with_active_regions(nid, end_pfn); in setup_bootmem_node()
|
D | init.c | 111 unsigned long start_pfn, end_pfn; in allocate_pgdat() local 116 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); in allocate_pgdat() 120 SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT); in allocate_pgdat() 136 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; in allocate_pgdat() 142 unsigned long end_pfn; in bootmem_init_one_node() local 151 end_pfn = pgdat_end_pfn(p); in bootmem_init_one_node() 153 if (end_pfn > max_low_pfn) in bootmem_init_one_node() 154 end_pfn = max_low_pfn; in bootmem_init_one_node() 157 total_pages = bootmem_bootmap_pages(end_pfn - p->node_start_pfn); in bootmem_init_one_node() 163 init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn); in bootmem_init_one_node() [all …]
|
/arch/s390/numa/ |
D | numa.c | 107 unsigned long start_pfn, end_pfn; in numa_setup_memory() local 112 end_pfn = 0; in numa_setup_memory() 116 if (t_end > end_pfn) in numa_setup_memory() 117 end_pfn = t_end; in numa_setup_memory() 119 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; in numa_setup_memory()
|
/arch/sparc/mm/ |
D | init_32.c | 78 unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; in calc_highpages() local 80 if (end_pfn <= max_low_pfn) in calc_highpages() 86 nr += end_pfn - start_pfn; in calc_highpages() 276 static void map_high_region(unsigned long start_pfn, unsigned long end_pfn) in map_high_region() argument 281 printk("mapping high region %08lx - %08lx\n", start_pfn, end_pfn); in map_high_region() 284 for (tmp = start_pfn; tmp < end_pfn; tmp++) in map_high_region() 325 unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; in mem_init() local 327 if (end_pfn <= highstart_pfn) in mem_init() 333 map_high_region(start_pfn, end_pfn); in mem_init()
|
/arch/unicore32/mm/ |
D | init.c | 132 unsigned long end_pfn) in uc32_bootmem_init() argument 143 boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); in uc32_bootmem_init() 145 __pfn_to_phys(end_pfn)); in uc32_bootmem_init() 153 init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn); in uc32_bootmem_init() 160 if (end >= end_pfn) in uc32_bootmem_init() 161 end = end_pfn; in uc32_bootmem_init() 173 if (end >= end_pfn) in uc32_bootmem_init() 174 end = end_pfn; in uc32_bootmem_init() 317 free_memmap(unsigned long start_pfn, unsigned long end_pfn) in free_memmap() argument 326 end_pg = pfn_to_page(end_pfn); in free_memmap()
|
/arch/sh/kernel/ |
D | swsusp.c | 25 unsigned long end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT; in pfn_is_nosave() local 27 return (pfn >= begin_pfn) && (pfn < end_pfn); in pfn_is_nosave()
|
D | setup.c | 192 unsigned long end_pfn) in __add_active_range() argument 200 end = end_pfn << PAGE_SHIFT; in __add_active_range() 209 start_pfn, end_pfn); in __add_active_range() 233 memblock_set_node(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), in __add_active_range()
|
/arch/mips/sgi-ip27/ |
D | ip27-memory.c | 393 unsigned long start_pfn, end_pfn; in node_mem_init() local 395 get_pfn_range_for_nid(node, &start_pfn, &end_pfn); in node_mem_init() 405 NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn; in node_mem_init() 413 start_pfn, end_pfn); in node_mem_init() 414 free_bootmem_with_active_regions(node, end_pfn); in node_mem_init() 467 unsigned long start_pfn, end_pfn; in paging_init() local 469 get_pfn_range_for_nid(node, &start_pfn, &end_pfn); in paging_init() 471 if (end_pfn > max_low_pfn) in paging_init() 472 max_low_pfn = end_pfn; in paging_init()
|
/arch/s390/kernel/ |
D | early.c | 83 unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size; in create_kernel_nss() local 116 end_pfn = PFN_UP(__pa(&_end)); in create_kernel_nss() 117 min_size = end_pfn << 2; in create_kernel_nss() 122 eshared_pfn - 1, eshared_pfn, end_pfn); in create_kernel_nss() 202 unsigned long end_pfn, init_pfn; in init_kernel_storage_key() local 204 end_pfn = PFN_UP(__pa(&_end)); in init_kernel_storage_key() 206 for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++) in init_kernel_storage_key()
|
/arch/x86/xen/ |
D | setup.c | 254 unsigned long end_pfn, unsigned long nr_pages) in xen_set_identity_and_release_chunk() argument 259 WARN_ON(start_pfn > end_pfn); in xen_set_identity_and_release_chunk() 262 end = min(end_pfn, nr_pages); in xen_set_identity_and_release_chunk() 281 set_phys_range_identity(start_pfn, end_pfn); in xen_set_identity_and_release_chunk() 389 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, in xen_set_identity_and_remap_chunk() argument 394 unsigned long n = end_pfn - start_pfn; in xen_set_identity_and_remap_chunk() 436 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) in xen_set_identity_and_remap_chunk() 445 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, in xen_count_remap_pages() argument 451 return remap_pages + min(end_pfn, nr_pages) - start_pfn; in xen_count_remap_pages() 455 unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn, in xen_foreach_remap_area() argument [all …]
|
/arch/mn10300/kernel/ |
D | setup.c | 95 unsigned long kstart_pfn, start_pfn, free_pfn, end_pfn; in setup_arch() local 128 end_pfn = PFN_DOWN(__pa(memory_end)); in setup_arch() 133 end_pfn); in setup_arch() 140 PFN_PHYS(end_pfn - free_pfn)); in setup_arch()
|
/arch/x86/kernel/ |
D | e820.c | 766 unsigned long end_pfn; in e820_end_pfn() local 772 end_pfn = (ei->addr + ei->size) >> PAGE_SHIFT; in e820_end_pfn() 776 if (end_pfn > limit_pfn) { in e820_end_pfn() 780 if (end_pfn > last_pfn) in e820_end_pfn() 781 last_pfn = end_pfn; in e820_end_pfn() 1127 unsigned long start_pfn, end_pfn; in memblock_find_dma_reserve() local 1137 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { in memblock_find_dma_reserve() 1139 end_pfn = min(end_pfn, MAX_DMA_PFN); in memblock_find_dma_reserve() 1140 nr_pages += end_pfn - start_pfn; in memblock_find_dma_reserve() 1146 end_pfn = min_t(unsigned long, PFN_DOWN(end), MAX_DMA_PFN); in memblock_find_dma_reserve() [all …]
|
/arch/unicore32/kernel/ |
D | hibernate.c | 148 unsigned long end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT; in pfn_is_nosave() local 150 return (pfn >= begin_pfn) && (pfn < end_pfn); in pfn_is_nosave()
|
/arch/x86/include/asm/ |
D | mtrr.h | 50 extern int mtrr_trim_uncached_memory(unsigned long end_pfn); 80 static inline int mtrr_trim_uncached_memory(unsigned long end_pfn) in mtrr_trim_uncached_memory() argument
|
D | highmem.h | 75 unsigned long end_pfn);
|
/arch/alpha/mm/ |
D | numa.c | 307 unsigned long end_pfn = bdata->node_low_pfn; in paging_init() local 309 if (dma_local_pfn >= end_pfn - start_pfn) in paging_init() 310 zones_size[ZONE_DMA] = end_pfn - start_pfn; in paging_init() 313 zones_size[ZONE_NORMAL] = (end_pfn - start_pfn) - dma_local_pfn; in paging_init()
|
/arch/sh/include/asm/ |
D | mmzone.h | 42 unsigned long end_pfn);
|
/arch/powerpc/mm/ |
D | numa.c | 90 static int __init fake_numa_create_new_node(unsigned long end_pfn, in fake_numa_create_new_node() argument 121 if ((end_pfn << PAGE_SHIFT) > mem) { in fake_numa_create_new_node() 826 unsigned long start_pfn, end_pfn; in setup_nonnuma() local 837 end_pfn = memblock_region_memory_end_pfn(reg); in setup_nonnuma() 839 fake_numa_create_new_node(end_pfn, &nid); in setup_nonnuma() 841 PFN_PHYS(end_pfn - start_pfn), in setup_nonnuma() 922 static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) in setup_node_data() argument 924 u64 spanned_pages = end_pfn - start_pfn; in setup_node_data() 933 (end_pfn << PAGE_SHIFT) - 1); in setup_node_data() 1005 unsigned long start_pfn, end_pfn; in initmem_init() local [all …]
|
/arch/microblaze/mm/ |
D | init.c | 190 unsigned long start_pfn, end_pfn; in setup_memory() local 193 end_pfn = memblock_region_memory_end_pfn(reg); in setup_memory() 195 (end_pfn - start_pfn) << PAGE_SHIFT, in setup_memory()
|