/arch/microblaze/mm/ |
D | pgtable.c | 61 p = addr & PAGE_MASK; in __ioremap() 118 return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK)); in __ioremap() 131 vfree((void *) (PAGE_MASK & (unsigned long) addr)); in iounmap() 203 pgd = pgd_offset(mm, addr & PAGE_MASK); in get_pteptr() 205 p4d = p4d_offset(pgd, addr & PAGE_MASK); in get_pteptr() 206 pud = pud_offset(p4d, addr & PAGE_MASK); in get_pteptr() 207 pmd = pmd_offset(pud, addr & PAGE_MASK); in get_pteptr() 209 pte = pte_offset_kernel(pmd, addr & PAGE_MASK); in get_pteptr() 239 pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK); in iopa()
|
/arch/sh/mm/ |
D | tlbflush_32.c | 25 page &= PAGE_MASK; in local_flush_tlb_page() 60 start &= PAGE_MASK; in local_flush_tlb_range() 62 end &= PAGE_MASK; in local_flush_tlb_range() 93 start &= PAGE_MASK; in local_flush_tlb_kernel_range() 95 end &= PAGE_MASK; in local_flush_tlb_kernel_range()
|
/arch/m68k/kernel/ |
D | sys_m68k.c | 63 _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \ 104 paddr += addr & ~(PAGE_MASK | 15); in cache_flush_040() 107 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK); in cache_flush_040() 125 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4; in cache_flush_040() 180 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1); in cache_flush_040() 268 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK); in cache_flush_060() 286 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4; in cache_flush_060() 319 addr &= PAGE_MASK; in cache_flush_060() 341 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1); in cache_flush_060() 342 addr &= PAGE_MASK; /* Workaround for bug in some in cache_flush_060()
|
/arch/mips/lib/ |
D | r3k_dump_tlb.c | 48 if ((entryhi & PAGE_MASK) != KSEG0 && in dump_tlb() 58 entryhi & PAGE_MASK, in dump_tlb() 60 entrylo0 & PAGE_MASK, in dump_tlb()
|
/arch/arc/include/asm/ |
D | pgtable-levels.h | 111 #define p4d_bad(x) ((p4d_val(x) & ~PAGE_MASK)) 114 #define p4d_pgtable(p4d) ((pud_t *)(p4d_val(p4d) & PAGE_MASK)) 133 #define pud_bad(x) ((pud_val(x) & ~PAGE_MASK)) 136 #define pud_pgtable(pud) ((pmd_t *)(pud_val(pud) & PAGE_MASK)) 160 #define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK)) 163 #define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK)
|
/arch/powerpc/mm/ |
D | ioremap_64.c | 24 paligned = addr & PAGE_MASK; in __ioremap_caller() 25 offset = addr & ~PAGE_MASK; in __ioremap_caller() 57 addr = (void *)((unsigned long __force)PCI_FIX_ADDR(token) & PAGE_MASK); in iounmap()
|
D | ioremap_32.c | 30 p = addr & PAGE_MASK; in __ioremap_caller() 31 offset = addr & ~PAGE_MASK; in __ioremap_caller() 91 vunmap((void *)(PAGE_MASK & (unsigned long)addr)); in iounmap()
|
/arch/ia64/mm/ |
D | ioremap.c | 69 page_base = phys_addr & PAGE_MASK; in ioremap() 77 offset = phys_addr & ~PAGE_MASK; in ioremap() 78 phys_addr &= PAGE_MASK; in ioremap() 121 vunmap((void *) ((unsigned long) addr & PAGE_MASK)); in iounmap()
|
/arch/xtensa/mm/ |
D | ioremap.c | 17 unsigned long offset = paddr & ~PAGE_MASK; in xtensa_ioremap() 23 paddr &= PAGE_MASK; in xtensa_ioremap() 61 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); in xtensa_iounmap()
|
/arch/m68k/mm/ |
D | memory.c | 110 pushcl040(paddr & PAGE_MASK); in cache_clear() 116 paddr &= PAGE_MASK; in cache_clear() 164 paddr &= PAGE_MASK; in cache_push()
|
D | cache.c | 50 return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK); in virt_to_phys_slow() 71 address &= PAGE_MASK; in flush_icache_user_range()
|
/arch/microblaze/pci/ |
D | indirect_pci.c | 148 resource_size_t base = cfg_addr & PAGE_MASK; in setup_indirect_pci() 152 hose->cfg_addr = mbase + (cfg_addr & ~PAGE_MASK); in setup_indirect_pci() 153 if ((cfg_data & PAGE_MASK) != base) in setup_indirect_pci() 154 mbase = ioremap(cfg_data & PAGE_MASK, PAGE_SIZE); in setup_indirect_pci() 155 hose->cfg_data = mbase + (cfg_data & ~PAGE_MASK); in setup_indirect_pci()
|
/arch/parisc/mm/ |
D | ioremap.c | 71 offset = phys_addr & ~PAGE_MASK; in ioremap() 72 phys_addr &= PAGE_MASK; in ioremap() 95 unsigned long addr = (unsigned long)io_addr & PAGE_MASK; in iounmap()
|
/arch/powerpc/sysdev/ |
D | indirect_pci.c | 163 resource_size_t base = cfg_addr & PAGE_MASK; in setup_indirect_pci() 167 hose->cfg_addr = mbase + (cfg_addr & ~PAGE_MASK); in setup_indirect_pci() 168 if ((cfg_data & PAGE_MASK) != base) in setup_indirect_pci() 169 mbase = ioremap(cfg_data & PAGE_MASK, PAGE_SIZE); in setup_indirect_pci() 170 hose->cfg_data = mbase + (cfg_data & ~PAGE_MASK); in setup_indirect_pci()
|
/arch/mips/mm/ |
D | tlb-r3k.c | 89 start &= PAGE_MASK; in local_flush_tlb_range() 91 end &= PAGE_MASK; in local_flush_tlb_range() 125 start &= PAGE_MASK; in local_flush_tlb_kernel_range() 127 end &= PAGE_MASK; in local_flush_tlb_kernel_range() 162 page &= PAGE_MASK; in local_flush_tlb_page() 203 address &= PAGE_MASK; in __update_tlb()
|
D | ioremap.c | 91 offset = phys_addr & ~PAGE_MASK; in ioremap_prot() 92 phys_addr &= PAGE_MASK; in ioremap_prot() 117 vunmap((void *)((unsigned long)addr & PAGE_MASK)); in iounmap()
|
/arch/openrisc/mm/ |
D | ioremap.c | 51 offset = addr & ~PAGE_MASK; in ioremap() 52 p = addr & PAGE_MASK; in ioremap() 103 return vfree((void *)(PAGE_MASK & (unsigned long)addr)); in iounmap()
|
/arch/csky/abiv1/inc/abi/ |
D | page.h | 17 if (pages_do_alias((unsigned long) addr, vaddr & PAGE_MASK)) in clear_user_page() 25 if (pages_do_alias((unsigned long) to, vaddr & PAGE_MASK)) in copy_user_page()
|
/arch/microblaze/kernel/ |
D | sys_microblaze.c | 40 if (pgoff & ~PAGE_MASK) in SYSCALL_DEFINE6() 50 if (pgoff & (~PAGE_MASK >> 12)) in SYSCALL_DEFINE6()
|
/arch/sparc/mm/ |
D | iommu.c | 164 start &= PAGE_MASK; in iommu_flush_iotlb() 188 unsigned long off = paddr & ~PAGE_MASK; in __sbus_iommu_map_page() 208 for (p = vaddr & PAGE_MASK; p < vaddr + len; p += PAGE_SIZE) in __sbus_iommu_map_page() 283 unsigned int busa = dma_addr & PAGE_MASK; in sbus_iommu_unmap_page() 284 unsigned long off = dma_addr & ~PAGE_MASK; in sbus_iommu_unmap_page() 334 BUG_ON((va & ~PAGE_MASK) != 0); in sbus_iommu_alloc() 335 BUG_ON((addr & ~PAGE_MASK) != 0); in sbus_iommu_alloc() 336 BUG_ON((len & ~PAGE_MASK) != 0); in sbus_iommu_alloc() 406 BUG_ON((busa & ~PAGE_MASK) != 0); in sbus_iommu_free() 407 BUG_ON((len & ~PAGE_MASK) != 0); in sbus_iommu_free()
|
/arch/h8300/mm/ |
D | init.c | 60 unsigned long end_mem = memory_end & PAGE_MASK; in paging_init() 96 high_memory = (void *) (memory_end & PAGE_MASK); in mem_init()
|
/arch/mips/include/asm/ |
D | ginvt.h | 41 addr &= PAGE_MASK << 1; in ginvt_va() 52 addr &= PAGE_MASK << 1; in ginvt_va_mmid()
|
/arch/um/kernel/skas/ |
D | uaccess.c | 79 (addr & ~PAGE_MASK); in do_op_one_page() 82 (addr & ~PAGE_MASK); in do_op_one_page() 114 while (addr < ((addr + remain) & PAGE_MASK)) { in buffer_op() 293 (((unsigned long) addr) & ~PAGE_MASK); in arch_futex_atomic_op_inuser() 296 ((unsigned long) addr & ~PAGE_MASK); in arch_futex_atomic_op_inuser() 372 uaddr = page_address(page) + (((unsigned long) uaddr) & ~PAGE_MASK); in futex_atomic_cmpxchg_inatomic() 374 uaddr = kmap_atomic(page) + ((unsigned long) uaddr & ~PAGE_MASK); in futex_atomic_cmpxchg_inatomic()
|
/arch/hexagon/mm/ |
D | ioremap.c | 15 unsigned long offset = phys_addr & ~PAGE_MASK; in ioremap() 43 vunmap((void *) ((unsigned long) addr & PAGE_MASK)); in iounmap()
|
/arch/um/include/asm/ |
D | pgtable-3level.h | 56 #define pud_bad(x) ((pud_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) 85 #define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK) 86 #define pud_pgtable(pud) ((pmd_t *) __va(pud_val(pud) & PAGE_MASK))
|