/mm/ |
D | page_isolation.c | 15 unsigned long flags, pfn; in set_migratetype_isolate() local 24 pfn = page_to_pfn(page); in set_migratetype_isolate() 25 arg.start_pfn = pfn; in set_migratetype_isolate() 131 __first_valid_page(unsigned long pfn, unsigned long nr_pages) in __first_valid_page() argument 135 if (pfn_valid_within(pfn + i)) in __first_valid_page() 139 return pfn_to_page(pfn + i); in __first_valid_page() 159 unsigned long pfn; in start_isolate_page_range() local 166 for (pfn = start_pfn; in start_isolate_page_range() 167 pfn < end_pfn; in start_isolate_page_range() 168 pfn += pageblock_nr_pages) { in start_isolate_page_range() [all …]
|
D | memory-failure.c | 191 unsigned long pfn, struct page *page, int flags) in kill_proc() argument 198 pfn, t->comm, t->pid); in kill_proc() 347 int fail, struct page *page, unsigned long pfn, in kill_procs() argument 362 pfn, tk->tsk->comm, tk->tsk->pid); in kill_procs() 373 pfn, page, flags) < 0) in kill_procs() 376 pfn, tk->tsk->comm, tk->tsk->pid); in kill_procs() 572 static int me_kernel(struct page *p, unsigned long pfn) in me_kernel() argument 580 static int me_unknown(struct page *p, unsigned long pfn) in me_unknown() argument 582 printk(KERN_ERR "MCE %#lx: Unknown page state\n", pfn); in me_unknown() 589 static int me_pagecache_clean(struct page *p, unsigned long pfn) in me_pagecache_clean() argument [all …]
|
D | memory_hotplug.c | 247 unsigned long i, pfn, end_pfn, nr_pages; in register_page_bootmem_info_node() local 271 pfn = pgdat->node_start_pfn; in register_page_bootmem_info_node() 275 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in register_page_bootmem_info_node() 282 if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node)) in register_page_bootmem_info_node() 283 register_page_bootmem_info_section(pfn); in register_page_bootmem_info_node() 330 unsigned long pfn; in fix_zone_id() local 332 for (pfn = start_pfn; pfn < end_pfn; pfn++) in fix_zone_id() 333 set_page_links(pfn_to_page(pfn), zid, nid, pfn); in fix_zone_id() 553 unsigned long pfn; in find_biggest_section_pfn() local 556 pfn = end_pfn - 1; in find_biggest_section_pfn() [all …]
|
D | page_cgroup.c | 26 unsigned long pfn = page_to_pfn(page); in lookup_page_cgroup() local 41 offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn; in lookup_page_cgroup() 94 unsigned long pfn = page_to_pfn(page); in lookup_page_cgroup() local 95 struct mem_section *section = __pfn_to_section(pfn); in lookup_page_cgroup() 106 return section->page_cgroup + pfn; in lookup_page_cgroup() 128 static int __meminit init_section_page_cgroup(unsigned long pfn, int nid) in init_section_page_cgroup() argument 134 section = __pfn_to_section(pfn); in init_section_page_cgroup() 158 pfn &= PAGE_SECTION_MASK; in init_section_page_cgroup() 159 section->page_cgroup = base - pfn; in init_section_page_cgroup() 179 static void __free_page_cgroup(unsigned long pfn) in __free_page_cgroup() argument [all …]
|
D | cma.c | 90 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, in cma_clear_bitmap() argument 95 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; in cma_clear_bitmap() 106 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; in cma_activate_area() local 115 WARN_ON_ONCE(!pfn_valid(pfn)); in cma_activate_area() 116 zone = page_zone(pfn_to_page(pfn)); in cma_activate_area() 121 base_pfn = pfn; in cma_activate_area() 122 for (j = pageblock_nr_pages; j; --j, pfn++) { in cma_activate_area() 123 WARN_ON_ONCE(!pfn_valid(pfn)); in cma_activate_area() 130 if (page_zone(pfn_to_page(pfn)) != zone) in cma_activate_area() 365 unsigned long mask, offset, pfn, start = 0; in cma_alloc() local [all …]
|
D | compaction.c | 133 unsigned long pfn; in __reset_isolation_suitable() local 141 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { in __reset_isolation_suitable() 146 if (!pfn_valid(pfn)) in __reset_isolation_suitable() 149 page = pfn_to_page(pfn); in __reset_isolation_suitable() 181 unsigned long pfn; in update_pageblock_skip() local 194 pfn = page_to_pfn(page); in update_pageblock_skip() 200 if (pfn > zone->compact_cached_migrate_pfn[0]) in update_pageblock_skip() 201 zone->compact_cached_migrate_pfn[0] = pfn; in update_pageblock_skip() 203 pfn > zone->compact_cached_migrate_pfn[1]) in update_pageblock_skip() 204 zone->compact_cached_migrate_pfn[1] = pfn; in update_pageblock_skip() [all …]
|
D | page_alloc.c | 266 unsigned long pfn = page_to_pfn(page); in page_outside_zone_boundaries() local 273 if (!zone_spans_pfn(zone, pfn)) in page_outside_zone_boundaries() 279 pfn, zone_to_nid(zone), zone->name, in page_outside_zone_boundaries() 555 unsigned long pfn, in __free_one_page() argument 577 page_idx = pfn & ((1 << MAX_ORDER) - 1); in __free_one_page() 755 struct page *page, unsigned long pfn, in free_one_page() argument 767 migratetype = get_pfnblock_migratetype(page, pfn); in free_one_page() 769 __free_one_page(page, pfn, zone, order, migratetype); in free_one_page() 804 unsigned long pfn = page_to_pfn(page); in __free_pages_ok() local 809 migratetype = get_pfnblock_migratetype(page, pfn); in __free_pages_ok() [all …]
|
D | hwpoison-inject.c | 15 unsigned long pfn = val; in hwpoison_inject() local 23 if (!pfn_valid(pfn)) in hwpoison_inject() 26 p = pfn_to_page(pfn); in hwpoison_inject() 58 pr_info("Injecting memory failure at pfn %#lx\n", pfn); in hwpoison_inject() 59 return memory_failure(pfn, 18, MF_COUNT_INCREASED); in hwpoison_inject()
|
D | sparse.c | 172 unsigned long pfn; in memory_present() local 176 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { in memory_present() 177 unsigned long section = pfn_to_section_nr(pfn); in memory_present() 197 unsigned long pfn; in node_memmap_size_bytes() local 201 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in node_memmap_size_bytes() 202 if (nid != early_pfn_to_nid(pfn)) in node_memmap_size_bytes() 205 if (pfn_present(pfn)) in node_memmap_size_bytes()
|
D | memory.c | 751 unsigned long pfn = pte_pfn(pte); in vm_normal_page() local 758 if (!is_zero_pfn(pfn)) in vm_normal_page() 767 if (!pfn_valid(pfn)) in vm_normal_page() 773 if (pfn == vma->vm_pgoff + off) in vm_normal_page() 780 if (is_zero_pfn(pfn)) in vm_normal_page() 783 if (unlikely(pfn > highest_memmap_pfn)) { in vm_normal_page() 793 return pfn_to_page(pfn); in vm_normal_page() 1543 unsigned long pfn, pgprot_t prot) in insert_pfn() argument 1559 entry = pte_mkspecial(pfn_pte(pfn, prot)); in insert_pfn() 1588 unsigned long pfn) in vm_insert_pfn() argument [all …]
|
D | internal.h | 136 extern void __free_pages_bootmem(struct page *page, unsigned long pfn, 302 unsigned long pfn = page_to_pfn(base) + offset; in mem_map_next() local 303 if (!pfn_valid(pfn)) in mem_map_next() 305 return pfn_to_page(pfn); in mem_map_next() 343 enum zone_type zone, unsigned long nid, unsigned long pfn); 358 enum zone_type zone, unsigned long nid, unsigned long pfn) in mminit_verify_page_links() argument
|
D | mmzone.c | 77 int memmap_valid_within(unsigned long pfn, in memmap_valid_within() argument 80 if (page_to_pfn(page) != pfn) in memmap_valid_within()
|
D | vmstat.c | 977 unsigned long pfn; in pagetypeinfo_showblockcount_print() local 982 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { in pagetypeinfo_showblockcount_print() 985 if (!pfn_valid(pfn)) in pagetypeinfo_showblockcount_print() 988 page = pfn_to_page(pfn); in pagetypeinfo_showblockcount_print() 991 if (!memmap_valid_within(pfn, page, zone)) in pagetypeinfo_showblockcount_print()
|
D | mm_init.c | 134 unsigned long nid, unsigned long pfn) in mminit_verify_page_links() argument 138 BUG_ON(page_to_pfn(page) != pfn); in mminit_verify_page_links()
|
D | sparse-vmemmap.c | 93 unsigned long pfn = pte_pfn(*pte); in vmemmap_verify() local 94 int actual_node = early_pfn_to_nid(pfn); in vmemmap_verify()
|
D | kmemleak.c | 1344 unsigned long pfn; in kmemleak_scan() local 1346 for (pfn = start_pfn; pfn < end_pfn; pfn++) { in kmemleak_scan() 1349 if (!pfn_valid(pfn)) in kmemleak_scan() 1351 page = pfn_to_page(pfn); in kmemleak_scan()
|
D | nommu.c | 227 unsigned long *pfn) in follow_pfn() argument 232 *pfn = address >> PAGE_SHIFT; in follow_pfn() 1842 unsigned long pfn, unsigned long size, pgprot_t prot) in remap_pfn_range() argument 1844 if (addr != (pfn << PAGE_SHIFT)) in remap_pfn_range() 1854 unsigned long pfn = start >> PAGE_SHIFT; in vm_iomap_memory() local 1857 pfn += vma->vm_pgoff; in vm_iomap_memory() 1858 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); in vm_iomap_memory()
|
D | hugetlb.c | 747 unsigned long ret, pfn, flags; in alloc_gigantic_page() local 754 pfn = ALIGN(z->zone_start_pfn, nr_pages); in alloc_gigantic_page() 755 while (zone_spans_last_pfn(z, pfn, nr_pages)) { in alloc_gigantic_page() 756 if (pfn_range_valid_gigantic(pfn, nr_pages)) { in alloc_gigantic_page() 765 ret = __alloc_gigantic_page(pfn, nr_pages); in alloc_gigantic_page() 767 return pfn_to_page(pfn); in alloc_gigantic_page() 770 pfn += nr_pages; in alloc_gigantic_page() 1114 unsigned long pfn; in dissolve_free_huge_pages() local 1125 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) in dissolve_free_huge_pages() 1126 dissolve_free_huge_page(pfn_to_page(pfn)); in dissolve_free_huge_pages()
|
D | memblock.c | 1405 int __init_memblock memblock_search_pfn_nid(unsigned long pfn, in memblock_search_pfn_nid() argument 1409 int mid = memblock_search(type, PFN_PHYS(pfn)); in memblock_search_pfn_nid()
|