Home
last modified time | relevance | path

Searched refs:pfn (Results 1 – 25 of 29) sorted by relevance

12

/mm/
Dpage_isolation.c21 unsigned long flags, pfn; in set_migratetype_isolate() local
38 pfn = page_to_pfn(page); in set_migratetype_isolate()
39 arg.start_pfn = pfn; in set_migratetype_isolate()
96 unsigned long pfn, buddy_pfn; in unset_migratetype_isolate() local
115 pfn = page_to_pfn(page); in unset_migratetype_isolate()
116 buddy_pfn = __find_buddy_pfn(pfn, order); in unset_migratetype_isolate()
117 buddy = page + (buddy_pfn - pfn); in unset_migratetype_isolate()
147 __first_valid_page(unsigned long pfn, unsigned long nr_pages) in __first_valid_page() argument
154 page = pfn_to_online_page(pfn + i); in __first_valid_page()
196 unsigned long pfn; in start_isolate_page_range() local
[all …]
Dmemory_hotplug.c227 unsigned long i, pfn, end_pfn, nr_pages; in register_page_bootmem_info_node() local
237 pfn = pgdat->node_start_pfn; in register_page_bootmem_info_node()
241 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in register_page_bootmem_info_node()
248 if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node)) in register_page_bootmem_info_node()
249 register_page_bootmem_info_section(pfn); in register_page_bootmem_info_node()
254 static int check_pfn_span(unsigned long pfn, unsigned long nr_pages, in check_pfn_span() argument
272 if (!IS_ALIGNED(pfn, min_align) in check_pfn_span()
275 reason, pfn, pfn + nr_pages - 1); in check_pfn_span()
287 int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, in __add_pages() argument
298 if (altmap->base_pfn != pfn in __add_pages()
[all …]
Dmemory-failure.c209 static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags) in kill_proc() argument
216 pfn, t->comm, t->pid); in kill_proc()
362 unsigned long pfn, int flags) in kill_procs() argument
375 pfn, tk->tsk->comm, tk->tsk->pid); in kill_procs()
386 else if (kill_proc(tk, pfn, flags) < 0) in kill_procs()
388 pfn, tk->tsk->comm, tk->tsk->pid); in kill_procs()
593 static int truncate_error_page(struct page *p, unsigned long pfn, in truncate_error_page() argument
603 pfn, err); in truncate_error_page()
607 pfn); in truncate_error_page()
620 pfn); in truncate_error_page()
[all …]
Dpage_ext.c117 unsigned long pfn = page_to_pfn(page); in lookup_page_ext() local
130 index = pfn - round_down(node_start_pfn(page_to_nid(page)), in lookup_page_ext()
192 unsigned long pfn = page_to_pfn(page); in lookup_page_ext() local
193 struct mem_section *section = __pfn_to_section(pfn); in lookup_page_ext()
202 return get_entry(section->page_ext, pfn); in lookup_page_ext()
221 static int __meminit init_section_page_ext(unsigned long pfn, int nid) in init_section_page_ext() argument
227 section = __pfn_to_section(pfn); in init_section_page_ext()
251 pfn &= PAGE_SECTION_MASK; in init_section_page_ext()
252 section->page_ext = (void *)base - page_ext_size * pfn; in init_section_page_ext()
273 static void __free_page_ext(unsigned long pfn) in __free_page_ext() argument
[all …]
Dpage_owner.c259 unsigned long pfn = zone->zone_start_pfn, block_end_pfn; in pagetypeinfo_showmixedcount_print() local
260 unsigned long end_pfn = pfn + zone->spanned_pages; in pagetypeinfo_showmixedcount_print()
266 pfn = zone->zone_start_pfn; in pagetypeinfo_showmixedcount_print()
273 for (; pfn < end_pfn; ) { in pagetypeinfo_showmixedcount_print()
274 page = pfn_to_online_page(pfn); in pagetypeinfo_showmixedcount_print()
276 pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES); in pagetypeinfo_showmixedcount_print()
280 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); in pagetypeinfo_showmixedcount_print()
285 for (; pfn < block_end_pfn; pfn++) { in pagetypeinfo_showmixedcount_print()
286 if (!pfn_valid_within(pfn)) in pagetypeinfo_showmixedcount_print()
290 page = pfn_to_page(pfn); in pagetypeinfo_showmixedcount_print()
[all …]
Dsparse.c222 static void subsection_mask_set(unsigned long *map, unsigned long pfn, in subsection_mask_set() argument
225 int idx = subsection_map_index(pfn); in subsection_mask_set()
226 int end = subsection_map_index(pfn + nr_pages - 1); in subsection_mask_set()
231 void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) in subsection_map_init() argument
233 int end_sec = pfn_to_section_nr(pfn + nr_pages - 1); in subsection_map_init()
234 unsigned long nr, start_sec = pfn_to_section_nr(pfn); in subsection_map_init()
244 - (pfn & ~PAGE_SECTION_MASK)); in subsection_map_init()
246 subsection_mask_set(ms->usage->subsection_map, pfn, pfns); in subsection_map_init()
249 pfns, subsection_map_index(pfn), in subsection_map_init()
250 subsection_map_index(pfn + pfns - 1)); in subsection_map_init()
[all …]
Dcompaction.c48 #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) argument
49 #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) argument
50 #define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order) argument
51 #define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order) argument
59 unsigned long pfn = page_to_pfn(page); in release_freepages() local
62 if (pfn > high_pfn) in release_freepages()
63 high_pfn = pfn; in release_freepages()
241 __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source, in __reset_isolation_pfn() argument
244 struct page *page = pfn_to_online_page(pfn); in __reset_isolation_pfn()
272 block_pfn = pageblock_start_pfn(pfn); in __reset_isolation_pfn()
[all …]
Dpage_idle.c31 static struct page *page_idle_get_page(unsigned long pfn) in page_idle_get_page() argument
36 if (!pfn_valid(pfn)) in page_idle_get_page()
39 page = pfn_to_page(pfn); in page_idle_get_page()
127 unsigned long pfn, end_pfn; in page_idle_bitmap_read() local
133 pfn = pos * BITS_PER_BYTE; in page_idle_bitmap_read()
134 if (pfn >= max_pfn) in page_idle_bitmap_read()
137 end_pfn = pfn + count * BITS_PER_BYTE; in page_idle_bitmap_read()
141 for (; pfn < end_pfn; pfn++) { in page_idle_bitmap_read()
142 bit = pfn % BITMAP_CHUNK_BITS; in page_idle_bitmap_read()
145 page = page_idle_get_page(pfn); in page_idle_bitmap_read()
[all …]
Dcma.c85 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, in cma_clear_bitmap() argument
90 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; in cma_clear_bitmap()
101 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; in cma_activate_area() local
112 WARN_ON_ONCE(!pfn_valid(pfn)); in cma_activate_area()
113 zone = page_zone(pfn_to_page(pfn)); in cma_activate_area()
118 base_pfn = pfn; in cma_activate_area()
119 for (j = pageblock_nr_pages; j; --j, pfn++) { in cma_activate_area()
120 WARN_ON_ONCE(!pfn_valid(pfn)); in cma_activate_area()
127 if (page_zone(pfn_to_page(pfn)) != zone) in cma_activate_area()
423 unsigned long pfn = -1; in cma_alloc() local
[all …]
Dpage_alloc.c402 static inline bool __meminit early_page_uninitialised(unsigned long pfn) in early_page_uninitialised() argument
404 int nid = early_pfn_to_nid(pfn); in early_page_uninitialised()
406 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn) in early_page_uninitialised()
417 defer_init(int nid, unsigned long pfn, unsigned long end_pfn) in defer_init() argument
440 (pfn & (PAGES_PER_SECTION - 1)) == 0) { in defer_init()
441 NODE_DATA(nid)->first_deferred_pfn = pfn; in defer_init()
449 static inline bool early_page_uninitialised(unsigned long pfn) in early_page_uninitialised() argument
454 static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn) in defer_init() argument
462 unsigned long pfn) in get_pageblock_bitmap() argument
465 return section_to_usemap(__pfn_to_section(pfn)); in get_pageblock_bitmap()
[all …]
Dsparse-vmemmap.c108 unsigned long pfn, nr_pfns, nr_align; in altmap_alloc_block_buf() local
116 pfn = vmem_altmap_next_pfn(altmap); in altmap_alloc_block_buf()
119 nr_align = ALIGN(pfn, nr_align) - pfn; in altmap_alloc_block_buf()
125 pfn += nr_align; in altmap_alloc_block_buf()
128 __func__, pfn, altmap->alloc, altmap->align, nr_pfns); in altmap_alloc_block_buf()
129 return __va(__pfn_to_phys(pfn)); in altmap_alloc_block_buf()
135 unsigned long pfn = pte_pfn(*pte); in vmemmap_verify() local
136 int actual_node = early_pfn_to_nid(pfn); in vmemmap_verify()
248 struct page * __meminit __populate_section_memmap(unsigned long pfn, in __populate_section_memmap() argument
259 end = ALIGN(pfn + nr_pages, PAGES_PER_SUBSECTION); in __populate_section_memmap()
[all …]
Dmemremap.c69 static unsigned long pfn_next(unsigned long pfn) in pfn_next() argument
71 if (pfn % 1024 == 0) in pfn_next()
73 return pfn + 1; in pfn_next()
76 #define for_each_device_pfn(pfn, map) \ argument
77 for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
107 unsigned long pfn; in memunmap_pages() local
111 for_each_device_pfn(pfn, pgmap) in memunmap_pages()
112 put_page(pfn_to_page(pfn)); in memunmap_pages()
387 struct dev_pagemap *get_dev_pagemap(unsigned long pfn, in get_dev_pagemap() argument
390 resource_size_t phys = PFN_PHYS(pfn); in get_dev_pagemap()
Dhmm.c224 bool write_fault, uint64_t *pfn) in hmm_vma_do_fault() argument
251 *pfn = range->values[HMM_PFN_ERROR]; in hmm_vma_do_fault()
410 unsigned long pfn, npages, i; in hmm_vma_handle_pmd() local
422 pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); in hmm_vma_handle_pmd()
423 for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) { in hmm_vma_handle_pmd()
425 hmm_vma_walk->pgmap = get_dev_pagemap(pfn, in hmm_vma_handle_pmd()
430 pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags; in hmm_vma_handle_pmd()
456 uint64_t *pfn) in hmm_vma_handle_pte() argument
463 uint64_t orig_pfn = *pfn; in hmm_vma_handle_pte()
465 *pfn = range->values[HMM_PFN_NONE]; in hmm_vma_handle_pte()
[all …]
Dpage_vma_mapped.c55 static inline bool pfn_in_hpage(struct page *hpage, unsigned long pfn) in pfn_in_hpage() argument
60 return pfn >= hpage_pfn && pfn - hpage_pfn < hpage_nr_pages(hpage); in pfn_in_hpage()
83 unsigned long pfn; in check_pte() local
94 pfn = migration_entry_to_pfn(entry); in check_pte()
103 pfn = device_private_entry_to_pfn(entry); in check_pte()
108 pfn = pte_pfn(*pvmw->pte); in check_pte()
111 return pfn_in_hpage(pvmw->page, pfn); in check_pte()
Dmemory.c596 unsigned long pfn = pte_pfn(pte); in vm_normal_page() local
605 if (is_zero_pfn(pfn)) in vm_normal_page()
618 if (!pfn_valid(pfn)) in vm_normal_page()
624 if (pfn == vma->vm_pgoff + off) in vm_normal_page()
631 if (is_zero_pfn(pfn)) in vm_normal_page()
635 if (unlikely(pfn > highest_memmap_pfn)) { in vm_normal_page()
645 return pfn_to_page(pfn); in vm_normal_page()
652 unsigned long pfn = pmd_pfn(pmd); in vm_normal_page_pmd() local
661 if (!pfn_valid(pfn)) in vm_normal_page_pmd()
667 if (pfn == vma->vm_pgoff + off) in vm_normal_page_pmd()
[all …]
Dhwpoison-inject.c16 unsigned long pfn = val; in hwpoison_inject() local
24 if (!pfn_valid(pfn)) in hwpoison_inject()
27 p = pfn_to_page(pfn); in hwpoison_inject()
55 pr_info("Injecting memory failure at pfn %#lx\n", pfn); in hwpoison_inject()
56 return memory_failure(pfn, MF_COUNT_INCREASED); in hwpoison_inject()
Dhuge_memory.c778 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write, in insert_pfn_pmd() argument
788 if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) { in insert_pfn_pmd()
801 entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); in insert_pfn_pmd()
802 if (pfn_t_devmap(pfn)) in insert_pfn_pmd()
824 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write) in vmf_insert_pfn_pmd() argument
837 !pfn_t_devmap(pfn)); in vmf_insert_pfn_pmd()
851 track_pfn_insert(vma, &pgprot, pfn); in vmf_insert_pfn_pmd()
853 insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable); in vmf_insert_pfn_pmd()
867 pud_t *pud, pfn_t pfn, pgprot_t prot, bool write) in insert_pfn_pud() argument
876 if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) { in insert_pfn_pud()
[all …]
Dshuffle.c61 static struct page * __meminit shuffle_valid_page(unsigned long pfn, int order) in shuffle_valid_page() argument
71 if (!pfn_valid_within(pfn)) in shuffle_valid_page()
75 if (!pfn_present(pfn)) in shuffle_valid_page()
79 page = pfn_to_page(pfn); in shuffle_valid_page()
Dswap_state.c646 unsigned long faddr, pfn, fpfn; in swap_ra_info() local
671 pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val)); in swap_ra_info()
674 ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits, in swap_ra_info()
685 if (fpfn == pfn + 1) in swap_ra_info()
687 else if (pfn == fpfn + 1) in swap_ra_info()
702 for (pfn = start; pfn != end; pfn++) in swap_ra_info()
Dinternal.h160 extern void memblock_free_pages(struct page *page, unsigned long pfn,
413 unsigned long pfn = page_to_pfn(base) + offset; in mem_map_next() local
414 if (!pfn_valid(pfn)) in mem_map_next()
416 return pfn_to_page(pfn); in mem_map_next()
Dmmzone.c76 bool memmap_valid_within(unsigned long pfn, in memmap_valid_within() argument
79 if (page_to_pfn(page) != pfn) in memmap_valid_within()
Dmadvise.c874 unsigned long pfn; in madvise_inject_error() local
880 pfn = page_to_pfn(page); in madvise_inject_error()
896 pfn, start); in madvise_inject_error()
905 pfn, start); in madvise_inject_error()
914 ret = memory_failure(pfn, 0); in madvise_inject_error()
Dnommu.c125 unsigned long *pfn) in follow_pfn() argument
130 *pfn = address >> PAGE_SHIFT; in follow_pfn()
1648 unsigned long pfn, unsigned long size, pgprot_t prot) in remap_pfn_range() argument
1650 if (addr != (pfn << PAGE_SHIFT)) in remap_pfn_range()
1660 unsigned long pfn = start >> PAGE_SHIFT; in vm_iomap_memory() local
1663 pfn += vma->vm_pgoff; in vm_iomap_memory()
1664 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); in vm_iomap_memory()
Dkmemleak.c1445 unsigned long pfn; in kmemleak_scan() local
1447 for (pfn = start_pfn; pfn < end_pfn; pfn++) { in kmemleak_scan()
1448 struct page *page = pfn_to_online_page(pfn); in kmemleak_scan()
1460 if (!(pfn & 63)) in kmemleak_scan()
Dvmstat.c1438 unsigned long pfn; in pagetypeinfo_showblockcount_print() local
1443 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { in pagetypeinfo_showblockcount_print()
1446 page = pfn_to_online_page(pfn); in pagetypeinfo_showblockcount_print()
1451 if (!memmap_valid_within(pfn, page, zone)) in pagetypeinfo_showblockcount_print()

12