/kernel/linux/linux-5.10/arch/s390/kernel/ |
D | vdso.c | 117 arch_set_page_dat(virt_to_page(segment_table), SEGMENT_ORDER); in vdso_alloc_per_cpu() 118 arch_set_page_dat(virt_to_page(page_table), 0); in vdso_alloc_per_cpu() 237 struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE); in vdso_init() 241 vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data); in vdso_init() 246 get_page(virt_to_page(vdso_data)); in vdso_init()
|
/kernel/linux/linux-5.10/arch/powerpc/mm/ |
D | pgtable_64.c | 109 return virt_to_page(p4d_page_vaddr(p4d)); in p4d_page() 120 return virt_to_page(pud_page_vaddr(pud)); in pud_page() 139 return virt_to_page(pmd_page_vaddr(pmd)); in pmd_page()
|
D | pgtable_32.c | 183 struct page *page = virt_to_page(_sinittext); in mark_initmem_nx() 205 page = virt_to_page(_stext); in mark_rodata_ro() 214 page = virt_to_page(__start_rodata); in mark_rodata_ro()
|
D | pgtable-frag.c | 23 page = virt_to_page(pte_frag); in pte_frag_destroy() 111 struct page *page = virt_to_page(table); in pte_fragment_free()
|
/kernel/linux/linux-5.10/mm/ |
D | ptdump.c | 35 if (pgd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_p4d))) in ptdump_pgd_entry() 55 if (p4d_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pud))) in ptdump_p4d_entry() 75 if (pud_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pmd))) in ptdump_pud_entry() 95 if (pmd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pte))) in ptdump_pmd_entry()
|
/kernel/linux/linux-5.10/arch/x86/mm/ |
D | pgtable.c | 63 struct page *page = virt_to_page(pmd); in ___pmd_free_tlb() 80 paravirt_tlb_remove_table(tlb, virt_to_page(pud)); in ___pud_free_tlb() 87 paravirt_tlb_remove_table(tlb, virt_to_page(p4d)); in ___p4d_free_tlb() 95 struct page *page = virt_to_page(pgd); in pgd_list_add() 102 struct page *page = virt_to_page(pgd); in pgd_list_del() 115 virt_to_page(pgd)->pt_mm = mm; in pgd_set_mm() 219 pgtable_pmd_page_dtor(virt_to_page(pmds[i])); in free_pmds() 238 if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) { in preallocate_pmds() 833 pgtable_pmd_page_dtor(virt_to_page(pmd)); in pud_free_pmd_page()
|
/kernel/linux/linux-5.10/arch/um/include/asm/ |
D | pgalloc.h | 39 pgtable_pmd_page_dtor(virt_to_page(pmd)); \ 40 tlb_remove_page((tlb),virt_to_page(pmd)); \
|
/kernel/linux/linux-5.10/arch/s390/mm/ |
D | page-states.c | 115 page = virt_to_page(pmd_val(*pmd)); in mark_kernel_pmd() 133 page = virt_to_page(pud_val(*pud)); in mark_kernel_pud() 154 page = virt_to_page(p4d_val(*p4d)); in mark_kernel_p4d() 176 page = virt_to_page(pgd_val(*pgd)); in mark_kernel_pgd()
|
/kernel/linux/linux-5.10/arch/m68k/include/asm/ |
D | page_no.h | 26 #define virt_to_page(addr) (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT)) macro 29 #define pfn_to_page(pfn) virt_to_page(pfn_to_virt(pfn))
|
D | mcf_pgalloc.h | 40 struct page *page = virt_to_page(pgtable); in __pte_free_tlb() 66 struct page *page = virt_to_page(pgtable); in pte_free()
|
/kernel/linux/linux-5.10/arch/um/kernel/skas/ |
D | mmu.c | 44 *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); in init_stub_pte() 114 mm->context.stub_pages[0] = virt_to_page(__syscall_stub_start); in uml_setup_stubs() 115 mm->context.stub_pages[1] = virt_to_page(mm->context.id.stack); in uml_setup_stubs()
|
/kernel/linux/linux-5.10/arch/arm64/include/asm/ |
D | tlb.h | 86 struct page *page = virt_to_page(pmdp); in __pmd_free_tlb() 97 tlb_remove_table(tlb, virt_to_page(pudp)); in __pud_free_tlb()
|
/kernel/linux/linux-5.10/arch/parisc/mm/ |
D | ioremap.c | 58 for (page = virt_to_page(t_addr); in ioremap() 59 page <= virt_to_page(t_end); page++) { in ioremap()
|
/kernel/linux/linux-5.10/arch/nds32/mm/ |
D | mm-nds32.c | 37 inc_zone_page_state(virt_to_page((unsigned long *)new_pgd), in pgd_alloc() 62 dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE); in pgd_free()
|
/kernel/linux/linux-5.10/fs/ubifs/ |
D | crypto.c | 42 err = fscrypt_encrypt_block_inplace(inode, virt_to_page(p), pad_len, in ubifs_encrypt() 67 err = fscrypt_decrypt_block_inplace(inode, virt_to_page(&dn->data), in ubifs_decrypt()
|
/kernel/linux/linux-5.10/arch/alpha/include/asm/ |
D | mmzone.h | 71 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) macro 93 __xx = virt_to_page(kvirt); \
|
/kernel/linux/linux-5.10/arch/xtensa/mm/ |
D | kasan_init.c | 27 mk_pte(virt_to_page(kasan_early_shadow_page), in kasan_early_init() 95 mk_pte(virt_to_page(kasan_early_shadow_page), in kasan_init()
|
/kernel/linux/linux-5.10/arch/riscv/kernel/ |
D | vdso.c | 50 pg = virt_to_page(vdso_start + (i << PAGE_SHIFT)); in vdso_init() 53 vdso_pagelist[i] = virt_to_page(vdso_data); in vdso_init()
|
/kernel/linux/linux-5.10/arch/arc/include/asm/ |
D | pgalloc.h | 110 page = virt_to_page(pte_pg); in pte_alloc_one() 126 pgtable_pte_page_dtor(virt_to_page(ptep)); in pte_free()
|
/kernel/linux/linux-5.10/mm/kasan/ |
D | init.c | 40 return pgd_page(pgd) == virt_to_page(lm_alias(kasan_early_shadow_p4d)); in kasan_p4d_table() 52 return p4d_page(p4d) == virt_to_page(lm_alias(kasan_early_shadow_pud)); in kasan_pud_table() 64 return pud_page(pud) == virt_to_page(lm_alias(kasan_early_shadow_pmd)); in kasan_pmd_table() 76 return pmd_page(pmd) == virt_to_page(lm_alias(kasan_early_shadow_pte)); in kasan_pte_table() 81 return pte_page(pte) == virt_to_page(lm_alias(kasan_early_shadow_page)); in kasan_early_shadow_page_entry()
|
/kernel/linux/linux-5.10/arch/csky/mm/ |
D | init.c | 116 ClearPageReserved(virt_to_page(addr)); in free_initmem() 117 init_page_count(virt_to_page(addr)); in free_initmem()
|
/kernel/linux/linux-5.10/arch/sparc/kernel/ |
D | leon_smp.c | 248 free_reserved_page(virt_to_page(&trapbase_cpu1)); in leon_smp_done() 251 free_reserved_page(virt_to_page(&trapbase_cpu2)); in leon_smp_done() 254 free_reserved_page(virt_to_page(&trapbase_cpu3)); in leon_smp_done()
|
/kernel/linux/linux-5.10/arch/s390/include/asm/ |
D | pgalloc.h | 89 if (!pgtable_pmd_page_ctor(virt_to_page(table))) { in pmd_alloc_one() 100 pgtable_pmd_page_dtor(virt_to_page(pmd)); in pmd_free()
|
/kernel/linux/linux-5.10/arch/nios2/mm/ |
D | ioremap.c | 139 for (page = virt_to_page(t_addr); in ioremap() 140 page <= virt_to_page(t_end); page++) in ioremap()
|
/kernel/linux/linux-5.10/kernel/events/ |
D | ring_buffer.c | 636 struct page *page = virt_to_page(rb->aux_pages[idx]); in rb_free_aux_page() 725 struct page *page = virt_to_page(rb->aux_pages[0]); in rb_alloc_aux() 780 return virt_to_page(rb->user_page); in __perf_mmap_to_page() 782 return virt_to_page(rb->data_pages[pgoff - 1]); in __perf_mmap_to_page() 800 struct page *page = virt_to_page(addr); in perf_mmap_free_page() 952 return virt_to_page(rb->aux_pages[aux_pgoff]); in perf_mmap_to_page()
|