Home
last modified time | relevance | path

Searched refs:pfn (Results 1 – 25 of 26) sorted by relevance

12

/mm/
Dpage_isolation.c22 unsigned long flags, pfn; in set_migratetype_isolate() local
31 pfn = page_to_pfn(page); in set_migratetype_isolate()
32 arg.start_pfn = pfn; in set_migratetype_isolate()
89 unsigned long pfn, buddy_pfn; in unset_migratetype_isolate() local
108 pfn = page_to_pfn(page); in unset_migratetype_isolate()
109 buddy_pfn = __find_buddy_pfn(pfn, order); in unset_migratetype_isolate()
110 buddy = page + (buddy_pfn - pfn); in unset_migratetype_isolate()
140 __first_valid_page(unsigned long pfn, unsigned long nr_pages) in __first_valid_page() argument
147 if (!pfn_valid_within(pfn + i)) in __first_valid_page()
149 page = pfn_to_online_page(pfn + i); in __first_valid_page()
[all …]
Dmemory-failure.c182 unsigned long pfn, struct page *page, int flags) in kill_proc() argument
188 pfn, t->comm, t->pid); in kill_proc()
327 bool fail, struct page *page, unsigned long pfn, in kill_procs() argument
341 pfn, tk->tsk->comm, tk->tsk->pid); in kill_procs()
353 pfn, page, flags) < 0) in kill_procs()
355 pfn, tk->tsk->comm, tk->tsk->pid); in kill_procs()
559 static int truncate_error_page(struct page *p, unsigned long pfn, in truncate_error_page() argument
569 pfn, err); in truncate_error_page()
573 pfn); in truncate_error_page()
586 pfn); in truncate_error_page()
[all …]
Dpage_ext.c123 unsigned long pfn = page_to_pfn(page); in lookup_page_ext() local
136 index = pfn - round_down(node_start_pfn(page_to_nid(page)), in lookup_page_ext()
198 unsigned long pfn = page_to_pfn(page); in lookup_page_ext() local
199 struct mem_section *section = __pfn_to_section(pfn); in lookup_page_ext()
208 return get_entry(section->page_ext, pfn); in lookup_page_ext()
227 static int __meminit init_section_page_ext(unsigned long pfn, int nid) in init_section_page_ext() argument
233 section = __pfn_to_section(pfn); in init_section_page_ext()
257 pfn &= PAGE_SECTION_MASK; in init_section_page_ext()
258 section->page_ext = (void *)base - get_entry_size() * pfn; in init_section_page_ext()
279 static void __free_page_ext(unsigned long pfn) in __free_page_ext() argument
[all …]
Dpage_owner.c261 unsigned long pfn = zone->zone_start_pfn, block_end_pfn; in pagetypeinfo_showmixedcount_print() local
262 unsigned long end_pfn = pfn + zone->spanned_pages; in pagetypeinfo_showmixedcount_print()
268 pfn = zone->zone_start_pfn; in pagetypeinfo_showmixedcount_print()
275 for (; pfn < end_pfn; ) { in pagetypeinfo_showmixedcount_print()
276 page = pfn_to_online_page(pfn); in pagetypeinfo_showmixedcount_print()
278 pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES); in pagetypeinfo_showmixedcount_print()
282 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); in pagetypeinfo_showmixedcount_print()
287 for (; pfn < block_end_pfn; pfn++) { in pagetypeinfo_showmixedcount_print()
288 if (!pfn_valid_within(pfn)) in pagetypeinfo_showmixedcount_print()
292 page = pfn_to_page(pfn); in pagetypeinfo_showmixedcount_print()
[all …]
Dmemory_hotplug.c226 unsigned long i, pfn, end_pfn, nr_pages; in register_page_bootmem_info_node() local
236 pfn = pgdat->node_start_pfn; in register_page_bootmem_info_node()
240 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in register_page_bootmem_info_node()
247 if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node)) in register_page_bootmem_info_node()
248 register_page_bootmem_info_section(pfn); in register_page_bootmem_info_node()
273 unsigned long pfn = phys_start_pfn + i; in __add_section() local
275 if (!pfn_valid(pfn)) in __add_section()
278 page = pfn_to_page(pfn); in __add_section()
367 unsigned long pfn; in find_biggest_section_pfn() local
370 pfn = end_pfn - 1; in find_biggest_section_pfn()
[all …]
Dpage_idle.c31 static struct page *page_idle_get_page(unsigned long pfn) in page_idle_get_page() argument
36 if (!pfn_valid(pfn)) in page_idle_get_page()
39 page = pfn_to_page(pfn); in page_idle_get_page()
127 unsigned long pfn, end_pfn; in page_idle_bitmap_read() local
133 pfn = pos * BITS_PER_BYTE; in page_idle_bitmap_read()
134 if (pfn >= max_pfn) in page_idle_bitmap_read()
137 end_pfn = pfn + count * BITS_PER_BYTE; in page_idle_bitmap_read()
141 for (; pfn < end_pfn; pfn++) { in page_idle_bitmap_read()
142 bit = pfn % BITMAP_CHUNK_BITS; in page_idle_bitmap_read()
145 page = page_idle_get_page(pfn); in page_idle_bitmap_read()
[all …]
Dcma.c86 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, in cma_clear_bitmap() argument
91 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; in cma_clear_bitmap()
102 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; in cma_activate_area() local
113 WARN_ON_ONCE(!pfn_valid(pfn)); in cma_activate_area()
114 zone = page_zone(pfn_to_page(pfn)); in cma_activate_area()
119 base_pfn = pfn; in cma_activate_area()
120 for (j = pageblock_nr_pages; j; --j, pfn++) { in cma_activate_area()
121 WARN_ON_ONCE(!pfn_valid(pfn)); in cma_activate_area()
128 if (page_zone(pfn_to_page(pfn)) != zone) in cma_activate_area()
421 unsigned long pfn = -1; in cma_alloc() local
[all …]
Dcompaction.c48 #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) argument
49 #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) argument
50 #define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order) argument
51 #define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order) argument
59 unsigned long pfn = page_to_pfn(page); in release_freepages() local
62 if (pfn > high_pfn) in release_freepages()
63 high_pfn = pfn; in release_freepages()
231 unsigned long pfn; in __reset_isolation_suitable() local
236 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { in __reset_isolation_suitable()
241 page = pfn_to_online_page(pfn); in __reset_isolation_suitable()
[all …]
Dpage_alloc.c340 static inline bool __meminit early_page_uninitialised(unsigned long pfn) in early_page_uninitialised() argument
342 int nid = early_pfn_to_nid(pfn); in early_page_uninitialised()
344 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn) in early_page_uninitialised()
355 unsigned long pfn, unsigned long zone_end, in update_defer_init() argument
363 (pfn & (PAGES_PER_SECTION - 1)) == 0) { in update_defer_init()
364 pgdat->first_deferred_pfn = pfn; in update_defer_init()
375 static inline bool early_page_uninitialised(unsigned long pfn) in early_page_uninitialised() argument
381 unsigned long pfn, unsigned long zone_end, in update_defer_init() argument
390 unsigned long pfn) in get_pageblock_bitmap() argument
393 return __pfn_to_section(pfn)->pageblock_flags; in get_pageblock_bitmap()
[all …]
Dsparse.c208 unsigned long pfn; in memory_present() local
222 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { in memory_present()
223 unsigned long section = pfn_to_section_nr(pfn); in memory_present()
245 unsigned long pfn; in node_memmap_size_bytes() local
249 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in node_memmap_size_bytes()
250 if (nid != early_pfn_to_nid(pfn)) in node_memmap_size_bytes()
253 if (pfn_present(pfn)) in node_memmap_size_bytes()
642 unsigned long pfn; in online_mem_sections() local
644 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in online_mem_sections()
645 unsigned long section_nr = pfn_to_section_nr(pfn); in online_mem_sections()
[all …]
Dsparse-vmemmap.c112 unsigned long pfn = vmem_altmap_next_pfn(altmap); in vmem_altmap_alloc() local
116 nr_align = ALIGN(pfn, nr_align) - pfn; in vmem_altmap_alloc()
122 return pfn + nr_align; in vmem_altmap_alloc()
128 unsigned long pfn, nr_pfns; in altmap_alloc_block_buf() local
138 pfn = vmem_altmap_alloc(altmap, nr_pfns); in altmap_alloc_block_buf()
139 if (pfn < ULONG_MAX) in altmap_alloc_block_buf()
140 ptr = __va(__pfn_to_phys(pfn)); in altmap_alloc_block_buf()
144 __func__, pfn, altmap->alloc, altmap->align, nr_pfns); in altmap_alloc_block_buf()
161 unsigned long pfn = pte_pfn(*pte); in vmemmap_verify() local
162 int actual_node = early_pfn_to_nid(pfn); in vmemmap_verify()
Dmemory.c833 unsigned long pfn = pte_pfn(pte); in _vm_normal_page() local
842 if (is_zero_pfn(pfn)) in _vm_normal_page()
857 if (likely(pfn <= highest_memmap_pfn)) { in _vm_normal_page()
858 struct page *page = pfn_to_page(pfn); in _vm_normal_page()
874 if (!pfn_valid(pfn)) in _vm_normal_page()
880 if (pfn == vma->vm_pgoff + off) in _vm_normal_page()
887 if (is_zero_pfn(pfn)) in _vm_normal_page()
890 if (unlikely(pfn > highest_memmap_pfn)) { in _vm_normal_page()
900 return pfn_to_page(pfn); in _vm_normal_page()
907 unsigned long pfn = pmd_pfn(pmd); in vm_normal_page_pmd() local
[all …]
Dhwpoison-inject.c15 unsigned long pfn = val; in hwpoison_inject() local
23 if (!pfn_valid(pfn)) in hwpoison_inject()
26 p = pfn_to_page(pfn); in hwpoison_inject()
54 pr_info("Injecting memory failure at pfn %#lx\n", pfn); in hwpoison_inject()
55 return memory_failure(pfn, 18, MF_COUNT_INCREASED); in hwpoison_inject()
Dpage_vma_mapped.c75 unsigned long pfn; in check_pte() local
86 pfn = migration_entry_to_pfn(entry); in check_pte()
95 pfn = device_private_entry_to_pfn(entry); in check_pte()
100 pfn = pte_pfn(*pvmw->pte); in check_pte()
103 if (pfn < page_to_pfn(pvmw->page)) in check_pte()
107 if (pfn - page_to_pfn(pvmw->page) >= hpage_nr_pages(pvmw->page)) in check_pte()
Dhuge_memory.c736 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write, in insert_pfn_pmd() argument
744 entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); in insert_pfn_pmd()
745 if (pfn_t_devmap(pfn)) in insert_pfn_pmd()
763 pmd_t *pmd, pfn_t pfn, bool write) in vmf_insert_pfn_pmd() argument
776 BUG_ON(!pfn_t_devmap(pfn)); in vmf_insert_pfn_pmd()
787 track_pfn_insert(vma, &pgprot, pfn); in vmf_insert_pfn_pmd()
789 insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write, pgtable); in vmf_insert_pfn_pmd()
803 pud_t *pud, pfn_t pfn, pgprot_t prot, bool write) in insert_pfn_pud() argument
810 entry = pud_mkhuge(pfn_t_pud(pfn, prot)); in insert_pfn_pud()
811 if (pfn_t_devmap(pfn)) in insert_pfn_pud()
[all …]
Dswap_state.c657 unsigned long faddr, pfn, fpfn; in swap_readahead_detect() local
682 pfn = PFN_DOWN(SWAP_RA_ADDR(swap_ra_info)); in swap_readahead_detect()
685 swap_ra->win = win = __swapin_nr_pages(pfn, fpfn, hits, in swap_readahead_detect()
694 if (fpfn == pfn + 1) in swap_readahead_detect()
696 else if (pfn == fpfn + 1) in swap_readahead_detect()
711 for (pfn = start; pfn != end; pfn++) in swap_readahead_detect()
Dhmm.c248 hmm_pfn_t *pfn) in hmm_vma_do_fault() argument
261 *pfn = HMM_PFN_ERROR; in hmm_vma_do_fault()
376 unsigned long pfn; in hmm_vma_walk_pmd() local
398 pfn = pmd_pfn(pmd) + pte_index(addr); in hmm_vma_walk_pmd()
400 for (; addr < end; addr += PAGE_SIZE, i++, pfn++) in hmm_vma_walk_pmd()
401 pfns[i] = hmm_pfn_t_from_pfn(pfn) | flag; in hmm_vma_walk_pmd()
857 unsigned long pfn; in hmm_devmem_pages_create() local
942 for (pfn = devmem->pfn_first; pfn < devmem->pfn_last; pfn++) { in hmm_devmem_pages_create()
943 struct page *page = pfn_to_page(pfn); in hmm_devmem_pages_create()
Dinternal.h164 extern void __free_pages_bootmem(struct page *page, unsigned long pfn,
383 unsigned long pfn = page_to_pfn(base) + offset; in mem_map_next() local
384 if (!pfn_valid(pfn)) in mem_map_next()
386 return pfn_to_page(pfn); in mem_map_next()
Dmmzone.c76 bool memmap_valid_within(unsigned long pfn, in memmap_valid_within() argument
79 if (page_to_pfn(page) != pfn) in memmap_valid_within()
Dkmemleak.c1511 unsigned long pfn; in kmemleak_scan() local
1513 for (pfn = start_pfn; pfn < end_pfn; pfn++) { in kmemleak_scan()
1516 if (!pfn_valid(pfn)) in kmemleak_scan()
1518 page = pfn_to_page(pfn); in kmemleak_scan()
1523 if (!(pfn % (MAX_SCAN_SIZE / sizeof(*page)))) in kmemleak_scan()
Dmigrate.c2257 unsigned long mpfn, pfn; in migrate_vma_collect_pmd() local
2263 pfn = pte_pfn(pte); in migrate_vma_collect_pmd()
2268 pfn = 0; in migrate_vma_collect_pmd()
2273 mpfn = pfn = 0; in migrate_vma_collect_pmd()
2290 if (is_zero_pfn(pfn)) { in migrate_vma_collect_pmd()
2293 pfn = 0; in migrate_vma_collect_pmd()
2297 mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE; in migrate_vma_collect_pmd()
2303 mpfn = pfn = 0; in migrate_vma_collect_pmd()
2306 pfn = page_to_pfn(page); in migrate_vma_collect_pmd()
2710 unsigned long pfn = pte_pfn(*ptep); in migrate_vma_insert_page() local
[all …]
Dnommu.c212 unsigned long *pfn) in follow_pfn() argument
217 *pfn = address >> PAGE_SHIFT; in follow_pfn()
1754 unsigned long pfn, unsigned long size, pgprot_t prot) in remap_pfn_range() argument
1756 if (addr != (pfn << PAGE_SHIFT)) in remap_pfn_range()
1766 unsigned long pfn = start >> PAGE_SHIFT; in vm_iomap_memory() local
1769 pfn += vma->vm_pgoff; in vm_iomap_memory()
1770 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); in vm_iomap_memory()
Dvmstat.c1343 unsigned long pfn; in pagetypeinfo_showblockcount_print() local
1348 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { in pagetypeinfo_showblockcount_print()
1351 page = pfn_to_online_page(pfn); in pagetypeinfo_showblockcount_print()
1356 if (!memmap_valid_within(pfn, page, zone)) in pagetypeinfo_showblockcount_print()
Dhugetlb.c1115 unsigned long ret, pfn, flags; in alloc_gigantic_page() local
1126 pfn = ALIGN(zone->zone_start_pfn, nr_pages); in alloc_gigantic_page()
1127 while (zone_spans_last_pfn(zone, pfn, nr_pages)) { in alloc_gigantic_page()
1128 if (pfn_range_valid_gigantic(zone, pfn, nr_pages)) { in alloc_gigantic_page()
1137 ret = __alloc_gigantic_page(pfn, nr_pages, gfp_mask); in alloc_gigantic_page()
1139 return pfn_to_page(pfn); in alloc_gigantic_page()
1142 pfn += nr_pages; in alloc_gigantic_page()
1520 unsigned long pfn; in dissolve_free_huge_pages() local
1527 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) { in dissolve_free_huge_pages()
1528 page = pfn_to_page(pfn); in dissolve_free_huge_pages()
Dgup.c1473 static int __gup_device_huge(unsigned long pfn, unsigned long addr, in __gup_device_huge() argument
1480 struct page *page = pfn_to_page(pfn); in __gup_device_huge()
1482 pgmap = get_dev_pagemap(pfn, pgmap); in __gup_device_huge()
1492 pfn++; in __gup_device_huge()

12