/arch/x86/include/asm/xen/ |
D | page.h | 57 extern int xen_alloc_p2m_entry(unsigned long pfn); 59 extern unsigned long get_phys_to_machine(unsigned long pfn); 60 extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); 61 extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); 142 static inline unsigned long __pfn_to_mfn(unsigned long pfn) in __pfn_to_mfn() argument 146 if (pfn < xen_p2m_size) in __pfn_to_mfn() 147 mfn = xen_p2m_addr[pfn]; in __pfn_to_mfn() 148 else if (unlikely(pfn < xen_max_p2m_pfn)) in __pfn_to_mfn() 149 return get_phys_to_machine(pfn); in __pfn_to_mfn() 151 return IDENTITY_FRAME(pfn); in __pfn_to_mfn() [all …]
|
/arch/x86/xen/ |
D | p2m.c | 126 static inline unsigned p2m_top_index(unsigned long pfn) in p2m_top_index() argument 128 BUG_ON(pfn >= MAX_P2M_PFN); in p2m_top_index() 129 return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE); in p2m_top_index() 132 static inline unsigned p2m_mid_index(unsigned long pfn) in p2m_mid_index() argument 134 return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE; in p2m_mid_index() 137 static inline unsigned p2m_index(unsigned long pfn) in p2m_index() argument 139 return pfn % P2M_PER_PAGE; in p2m_index() 174 static void p2m_init_identity(unsigned long *p2m, unsigned long pfn) in p2m_init_identity() argument 179 p2m[i] = IDENTITY_FRAME(pfn + i); in p2m_init_identity() 219 unsigned long pfn, mfn; in xen_build_mfn_list_list() local [all …]
|
D | setup.c | 164 unsigned long __ref xen_chk_extra_mem(unsigned long pfn) in xen_chk_extra_mem() argument 169 if (pfn >= xen_extra_mem[i].start_pfn && in xen_chk_extra_mem() 170 pfn < xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns) in xen_chk_extra_mem() 174 return IDENTITY_FRAME(pfn); in xen_chk_extra_mem() 182 unsigned long pfn, pfn_s, pfn_e; in xen_inv_extra_mem() local 190 for (pfn = pfn_s; pfn < pfn_e; pfn++) in xen_inv_extra_mem() 191 set_phys_to_machine(pfn, INVALID_P2M_ENTRY); in xen_inv_extra_mem() 257 unsigned long pfn, end; in xen_set_identity_and_release_chunk() local 264 for (pfn = start_pfn; pfn < end; pfn++) { in xen_set_identity_and_release_chunk() 265 unsigned long mfn = pfn_to_mfn(pfn); in xen_set_identity_and_release_chunk() [all …]
|
/arch/arm/xen/ |
D | p2m.c | 22 unsigned long pfn; member 43 if (new->pfn == entry->pfn) in xen_add_phys_to_mach_entry() 46 if (new->pfn < entry->pfn) in xen_add_phys_to_mach_entry() 58 __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn); in xen_add_phys_to_mach_entry() 63 unsigned long __pfn_to_mfn(unsigned long pfn) in __pfn_to_mfn() argument 73 if (entry->pfn <= pfn && in __pfn_to_mfn() 74 entry->pfn + entry->nr_pages > pfn) { in __pfn_to_mfn() 75 unsigned long mfn = entry->mfn + (pfn - entry->pfn); in __pfn_to_mfn() 79 if (pfn < entry->pfn) in __pfn_to_mfn() 151 bool __set_phys_to_machine_multi(unsigned long pfn, in __set_phys_to_machine_multi() argument [all …]
|
/arch/x86/include/asm/ |
D | mmzone_32.h | 34 static inline int pfn_to_nid(unsigned long pfn) in pfn_to_nid() argument 37 return((int) physnode_map[(pfn) / PAGES_PER_SECTION]); in pfn_to_nid() 43 static inline int pfn_valid(int pfn) in pfn_valid() argument 45 int nid = pfn_to_nid(pfn); in pfn_valid() 48 return (pfn < node_end_pfn(nid)); in pfn_valid() 52 #define early_pfn_valid(pfn) pfn_valid((pfn)) argument
|
/arch/arc/include/asm/ |
D | mmzone.h | 14 static inline int pfn_to_nid(unsigned long pfn) in pfn_to_nid() argument 19 is_end_low = pfn <= virt_to_pfn(0xFFFFFFFFUL); in pfn_to_nid() 26 if (pfn >= ARCH_PFN_OFFSET && is_end_low) in pfn_to_nid() 32 static inline int pfn_valid(unsigned long pfn) in pfn_valid() argument 34 int nid = pfn_to_nid(pfn); in pfn_valid() 36 return (pfn <= node_end_pfn(nid)); in pfn_valid()
|
/arch/alpha/include/asm/ |
D | mmzone.h | 35 #define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn) argument 82 unsigned long pfn; \ 84 pfn = page_to_pfn(page) << 32; \ 85 pte_val(pte) = pfn | pgprot_val(pgprot); \ 104 #define pfn_to_nid(pfn) pa_to_nid(((u64)(pfn) << PAGE_SHIFT)) argument 105 #define pfn_valid(pfn) \ argument 106 (((pfn) - node_start_pfn(pfn_to_nid(pfn))) < \ 107 node_spanned_pages(pfn_to_nid(pfn))) \
|
/arch/unicore32/kernel/ |
D | hibernate.c | 72 unsigned long pfn; in resume_physical_mapping_init() local 80 pfn = 0; in resume_physical_mapping_init() 87 if (pfn >= max_low_pfn) in resume_physical_mapping_init() 93 if (pfn >= max_low_pfn) in resume_physical_mapping_init() 104 for (; pte < max_pte; pte++, pfn++) { in resume_physical_mapping_init() 105 if (pfn >= max_low_pfn) in resume_physical_mapping_init() 108 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); in resume_physical_mapping_init() 142 int pfn_is_nosave(unsigned long pfn) in pfn_is_nosave() argument 147 return (pfn >= begin_pfn) && (pfn < end_pfn); in pfn_is_nosave()
|
/arch/arm/mach-omap2/ |
D | io.c | 70 .pfn = __phys_to_pfn(L3_24XX_PHYS), 76 .pfn = __phys_to_pfn(L4_24XX_PHYS), 86 .pfn = __phys_to_pfn(DSP_MEM_2420_PHYS), 92 .pfn = __phys_to_pfn(DSP_IPI_2420_PHYS), 98 .pfn = __phys_to_pfn(DSP_MMU_2420_PHYS), 110 .pfn = __phys_to_pfn(L4_WK_243X_PHYS), 116 .pfn = __phys_to_pfn(OMAP243X_GPMC_PHYS), 122 .pfn = __phys_to_pfn(OMAP243X_SDRC_PHYS), 128 .pfn = __phys_to_pfn(OMAP243X_SMS_PHYS), 140 .pfn = __phys_to_pfn(L3_34XX_PHYS), [all …]
|
/arch/s390/kernel/ |
D | suspend.c | 100 void page_key_read(unsigned long *pfn) in page_key_read() argument 106 page = pfn_to_page(*pfn); in page_key_read() 111 *(unsigned char *) pfn = key; in page_key_read() 118 void page_key_memorize(unsigned long *pfn) in page_key_memorize() argument 120 page_key_wp->data[page_key_wx] = *(unsigned char *) pfn; in page_key_memorize() 121 *(unsigned char *) pfn = 0; in page_key_memorize() 152 int pfn_is_nosave(unsigned long pfn) in pfn_is_nosave() argument 160 if (pfn <= LC_PAGES) in pfn_is_nosave() 162 if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn) in pfn_is_nosave() 165 if (pfn >= stext_pfn && pfn <= end_rodata_pfn) in pfn_is_nosave() [all …]
|
/arch/unicore32/mm/ |
D | ioremap.c | 98 remap_area_sections(unsigned long virt, unsigned long pfn, in remap_area_sections() argument 114 set_pmd(pmd, __pmd(__pfn_to_phys(pfn) | type->prot_sect)); in remap_area_sections() 115 pfn += SZ_4M >> PAGE_SHIFT; in remap_area_sections() 125 void __iomem *__uc32_ioremap_pfn_caller(unsigned long pfn, in __uc32_ioremap_pfn_caller() argument 136 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SECTION_MASK)) in __uc32_ioremap_pfn_caller() 142 if (pfn_valid(pfn)) { in __uc32_ioremap_pfn_caller() 164 if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) { in __uc32_ioremap_pfn_caller() 166 err = remap_area_sections(addr, pfn, size, type); in __uc32_ioremap_pfn_caller() 168 err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn), in __uc32_ioremap_pfn_caller() 185 unsigned long pfn = __phys_to_pfn(phys_addr); in __uc32_ioremap_caller() local [all …]
|
/arch/arm/include/asm/ |
D | dma-mapping.h | 36 static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn) in pfn_to_dma() argument 39 pfn -= dev->dma_pfn_offset; in pfn_to_dma() 40 return (dma_addr_t)__pfn_to_bus(pfn); in pfn_to_dma() 45 unsigned long pfn = __bus_to_pfn(addr); in dma_to_pfn() local 48 pfn += dev->dma_pfn_offset; in dma_to_pfn() 50 return pfn; in dma_to_pfn() 56 unsigned long pfn = dma_to_pfn(dev, addr); in dma_to_virt() local 58 return phys_to_virt(__pfn_to_phys(pfn)); in dma_to_virt() 73 static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn) in pfn_to_dma() argument 75 return __arch_pfn_to_dma(dev, pfn); in pfn_to_dma()
|
/arch/ia64/include/asm/ |
D | page.h | 99 extern int ia64_pfn_valid (unsigned long pfn); 101 # define ia64_pfn_valid(pfn) 1 argument 108 # define pfn_to_page(pfn) (vmem_map + (pfn)) argument 109 # define __pfn_to_phys(pfn) PFN_PHYS(pfn) argument 118 # define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn)) argument 122 # define pfn_valid(pfn) (((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn)) argument 127 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) argument
|
/arch/x86/kernel/ |
D | crash_dump_64.c | 14 static ssize_t __copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, in __copy_oldmem_page() argument 24 vaddr = (__force void *)ioremap_encrypted(pfn << PAGE_SHIFT, PAGE_SIZE); in __copy_oldmem_page() 26 vaddr = (__force void *)ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE); in __copy_oldmem_page() 57 ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, in copy_oldmem_page() argument 60 return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, false); in copy_oldmem_page() 68 ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize, in copy_oldmem_page_encrypted() argument 71 return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, true); in copy_oldmem_page_encrypted()
|
D | crash_dump_32.c | 18 static inline bool is_crashed_pfn_valid(unsigned long pfn) in is_crashed_pfn_valid() argument 28 return pte_pfn(pfn_pte(pfn, __pgprot(0))) == pfn; in is_crashed_pfn_valid() 51 ssize_t copy_oldmem_page(unsigned long pfn, char *buf, in copy_oldmem_page() argument 59 if (!is_crashed_pfn_valid(pfn)) in copy_oldmem_page() 62 vaddr = kmap_atomic_pfn(pfn); in copy_oldmem_page()
|
/arch/arm64/mm/ |
D | mmap.c | 46 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) in valid_mmap_phys_addr_range() argument 48 return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK); in valid_mmap_phys_addr_range() 61 int devmem_is_allowed(unsigned long pfn) in devmem_is_allowed() argument 63 if (iomem_is_exclusive(pfn << PAGE_SHIFT)) in devmem_is_allowed() 65 if (!page_is_ram(pfn)) in devmem_is_allowed()
|
/arch/arm/mm/ |
D | fault-armv.c | 38 unsigned long pfn, pte_t *ptep) in do_adjust_pte() argument 53 flush_cache_page(vma, address, pfn); in do_adjust_pte() 54 outer_flush_range((pfn << PAGE_SHIFT), in do_adjust_pte() 55 (pfn << PAGE_SHIFT) + PAGE_SIZE); in do_adjust_pte() 90 unsigned long pfn) in adjust_pte() argument 120 ret = do_adjust_pte(vma, address, pfn, pte); in adjust_pte() 130 unsigned long addr, pte_t *ptep, unsigned long pfn) in make_coherent() argument 157 aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn); in make_coherent() 161 do_adjust_pte(vma, addr, pfn, ptep); in make_coherent() 180 unsigned long pfn = pte_pfn(*ptep); in update_mmu_cache() local [all …]
|
D | flush.c | 38 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) in flush_pfn_alias() argument 43 set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL)); in flush_pfn_alias() 52 static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len) in flush_icache_alias() argument 58 set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL)); in flush_icache_alias() 98 void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) in flush_cache_page() argument 101 vivt_flush_cache_page(vma, user_addr, pfn); in flush_cache_page() 106 flush_pfn_alias(pfn, user_addr); in flush_cache_page() 115 #define flush_pfn_alias(pfn,vaddr) do { } while (0) argument 116 #define flush_icache_alias(pfn,vaddr,len) do { } while (0) argument 271 unsigned long pfn; in __sync_icache_dcache() local [all …]
|
D | ioremap.c | 189 remap_area_sections(unsigned long virt, unsigned long pfn, in remap_area_sections() argument 207 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); in remap_area_sections() 208 pfn += SZ_1M >> PAGE_SHIFT; in remap_area_sections() 209 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); in remap_area_sections() 210 pfn += SZ_1M >> PAGE_SHIFT; in remap_area_sections() 221 remap_area_supersections(unsigned long virt, unsigned long pfn, in remap_area_supersections() argument 241 super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect | in remap_area_supersections() 243 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20; in remap_area_supersections() 254 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT; in remap_area_supersections() 261 static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, in __arm_ioremap_pfn_caller() argument [all …]
|
/arch/sh/include/asm/ |
D | mmzone.h | 13 static inline int pfn_to_nid(unsigned long pfn) in pfn_to_nid() argument 18 if (pfn >= node_start_pfn(nid) && pfn <= node_end_pfn(nid)) in pfn_to_nid() 24 static inline struct pglist_data *pfn_to_pgdat(unsigned long pfn) in pfn_to_pgdat() argument 26 return NODE_DATA(pfn_to_nid(pfn)); in pfn_to_pgdat()
|
/arch/m68k/include/asm/ |
D | page_no.h | 24 #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) argument 29 #define pfn_to_page(pfn) virt_to_page(pfn_to_virt(pfn)) argument 31 #define pfn_valid(pfn) ((pfn) < max_mapnr) argument
|
/arch/x86/mm/ |
D | numa_32.c | 48 unsigned long pfn; in memory_present() local 56 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { in memory_present() 57 physnode_map[pfn / PAGES_PER_SECTION] = nid; in memory_present() 58 printk(KERN_CONT "%lx ", pfn); in memory_present()
|
D | init_32.c | 262 unsigned long pfn; in kernel_physical_mapping_init() local 293 pfn = start_pfn; in kernel_physical_mapping_init() 294 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); in kernel_physical_mapping_init() 299 if (pfn >= end_pfn) in kernel_physical_mapping_init() 302 pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); in kernel_physical_mapping_init() 307 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn; in kernel_physical_mapping_init() 309 unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET; in kernel_physical_mapping_init() 326 pfn &= PMD_MASK >> PAGE_SHIFT; in kernel_physical_mapping_init() 327 addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE + in kernel_physical_mapping_init() 336 set_pmd(pmd, pfn_pmd(pfn, init_prot)); in kernel_physical_mapping_init() [all …]
|
/arch/x86/power/ |
D | hibernate_32.c | 83 unsigned long pfn; in resume_physical_mapping_init() local 91 pfn = 0; in resume_physical_mapping_init() 98 if (pfn >= max_low_pfn) in resume_physical_mapping_init() 102 if (pfn >= max_low_pfn) in resume_physical_mapping_init() 110 set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC)); in resume_physical_mapping_init() 111 pfn += PTRS_PER_PTE; in resume_physical_mapping_init() 120 for (; pte < max_pte; pte++, pfn++) { in resume_physical_mapping_init() 121 if (pfn >= max_low_pfn) in resume_physical_mapping_init() 124 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); in resume_physical_mapping_init()
|
/arch/riscv/include/asm/ |
D | pgalloc.h | 18 unsigned long pfn = virt_to_pfn(pte); in pmd_populate_kernel() local 20 set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); in pmd_populate_kernel() 26 unsigned long pfn = virt_to_pfn(page_address(pte)); in pmd_populate() local 28 set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); in pmd_populate() 34 unsigned long pfn = virt_to_pfn(pmd); in pud_populate() local 36 set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); in pud_populate()
|