/arch/m68k/include/asm/ |
D | bitops.h | 31 static inline void bset_reg_set_bit(int nr, volatile unsigned long *vaddr) in bset_reg_set_bit() argument 33 char *p = (char *)vaddr + (nr ^ 31) / 8; in bset_reg_set_bit() 41 static inline void bset_mem_set_bit(int nr, volatile unsigned long *vaddr) in bset_mem_set_bit() argument 43 char *p = (char *)vaddr + (nr ^ 31) / 8; in bset_mem_set_bit() 50 static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr) in bfset_mem_set_bit() argument 54 : "d" (nr ^ 31), "o" (*vaddr) in bfset_mem_set_bit() 59 #define set_bit(nr, vaddr) bset_reg_set_bit(nr, vaddr) argument 61 #define set_bit(nr, vaddr) bset_mem_set_bit(nr, vaddr) argument 63 #define set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \ argument 64 bset_mem_set_bit(nr, vaddr) : \ [all …]
|
/arch/riscv/mm/ |
D | kasan_init.c | 29 static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end) in kasan_populate_pte() argument 39 ptep = pte_offset_kernel(pmd, vaddr); in kasan_populate_pte() 47 } while (ptep++, vaddr += PAGE_SIZE, vaddr != end); in kasan_populate_pte() 50 static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned long end) in kasan_populate_pmd() argument 61 pmdp = pmd_offset(pud, vaddr); in kasan_populate_pmd() 64 next = pmd_addr_end(vaddr, end); in kasan_populate_pmd() 66 if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) { in kasan_populate_pmd() 75 kasan_populate_pte(pmdp, vaddr, next); in kasan_populate_pmd() 76 } while (pmdp++, vaddr = next, vaddr != end); in kasan_populate_pmd() 80 unsigned long vaddr, unsigned long end) in kasan_populate_pud() argument [all …]
|
D | pageattr.c | 97 unsigned long vaddr, unsigned long end) in __split_linear_mapping_pmd() argument 102 pmdp = pmd_offset(pudp, vaddr); in __split_linear_mapping_pmd() 105 next = pmd_addr_end(vaddr, end); in __split_linear_mapping_pmd() 107 if (next - vaddr >= PMD_SIZE && in __split_linear_mapping_pmd() 108 vaddr <= (vaddr & PMD_MASK) && end >= next) in __split_linear_mapping_pmd() 130 } while (pmdp++, vaddr = next, vaddr != end); in __split_linear_mapping_pmd() 136 unsigned long vaddr, unsigned long end) in __split_linear_mapping_pud() argument 142 pudp = pud_offset(p4dp, vaddr); in __split_linear_mapping_pud() 145 next = pud_addr_end(vaddr, end); in __split_linear_mapping_pud() 147 if (next - vaddr >= PUD_SIZE && in __split_linear_mapping_pud() [all …]
|
/arch/arm/mm/ |
D | cache-xsc3l2.c | 88 unsigned long vaddr; in xsc3_l2_inv_range() local 95 vaddr = -1; /* to force the first mapping */ in xsc3_l2_inv_range() 101 vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr); in xsc3_l2_inv_range() 102 xsc3_l2_clean_mva(vaddr); in xsc3_l2_inv_range() 103 xsc3_l2_inv_mva(vaddr); in xsc3_l2_inv_range() 111 vaddr = l2_map_va(start, vaddr); in xsc3_l2_inv_range() 112 xsc3_l2_inv_mva(vaddr); in xsc3_l2_inv_range() 120 vaddr = l2_map_va(start, vaddr); in xsc3_l2_inv_range() 121 xsc3_l2_clean_mva(vaddr); in xsc3_l2_inv_range() 122 xsc3_l2_inv_mva(vaddr); in xsc3_l2_inv_range() [all …]
|
/arch/parisc/kernel/ |
D | pci-dma.c | 77 unsigned long vaddr, in map_pte_uncached() argument 81 unsigned long orig_vaddr = vaddr; in map_pte_uncached() 83 vaddr &= ~PMD_MASK; in map_pte_uncached() 84 end = vaddr + size; in map_pte_uncached() 96 vaddr += PAGE_SIZE; in map_pte_uncached() 100 } while (vaddr < end); in map_pte_uncached() 104 static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr, in map_pmd_uncached() argument 108 unsigned long orig_vaddr = vaddr; in map_pmd_uncached() 110 vaddr &= ~PGDIR_MASK; in map_pmd_uncached() 111 end = vaddr + size; in map_pmd_uncached() [all …]
|
/arch/parisc/mm/ |
D | fixmap.c | 15 unsigned long vaddr = __fix_to_virt(idx); in set_fixmap() local 16 pgd_t *pgd = pgd_offset_k(vaddr); in set_fixmap() 17 p4d_t *p4d = p4d_offset(pgd, vaddr); in set_fixmap() 18 pud_t *pud = pud_offset(p4d, vaddr); in set_fixmap() 19 pmd_t *pmd = pmd_offset(pud, vaddr); in set_fixmap() 22 pte = pte_offset_kernel(pmd, vaddr); in set_fixmap() 23 set_pte_at(&init_mm, vaddr, pte, __mk_pte(phys, PAGE_KERNEL_RWX)); in set_fixmap() 24 flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); in set_fixmap() 29 unsigned long vaddr = __fix_to_virt(idx); in clear_fixmap() local 30 pte_t *pte = virt_to_kpte(vaddr); in clear_fixmap() [all …]
|
/arch/arm/include/asm/ |
D | page.h | 111 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr); 113 unsigned long vaddr, struct vm_area_struct *vma); 117 unsigned long vaddr, struct vm_area_struct *vma); 118 void fa_clear_user_highpage(struct page *page, unsigned long vaddr); 120 unsigned long vaddr, struct vm_area_struct *vma); 121 void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr); 123 unsigned long vaddr, struct vm_area_struct *vma); 124 void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr); 126 unsigned long vaddr, struct vm_area_struct *vma); 127 void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr); [all …]
|
/arch/m68k/sun3/ |
D | dvma.c | 23 static unsigned long dvma_page(unsigned long kaddr, unsigned long vaddr) in dvma_page() argument 35 if(ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] != pte) { in dvma_page() 36 sun3_put_pte(vaddr, pte); in dvma_page() 37 ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] = pte; in dvma_page() 40 return (vaddr + (kaddr & ~PAGE_MASK)); in dvma_page() 49 unsigned long vaddr; in dvma_map_iommu() local 51 vaddr = dvma_btov(baddr); in dvma_map_iommu() 53 end = vaddr + len; in dvma_map_iommu() 55 while(vaddr < end) { in dvma_map_iommu() 56 dvma_page(kaddr, vaddr); in dvma_map_iommu() [all …]
|
D | mmu_emu.c | 117 void print_pte_vaddr (unsigned long vaddr) in print_pte_vaddr() argument 119 pr_cont(" vaddr=%lx [%02lx]", vaddr, sun3_get_segmap (vaddr)); in print_pte_vaddr() 120 print_pte (__pte (sun3_get_pte (vaddr))); in print_pte_vaddr() 281 inline void mmu_emu_map_pmeg (int context, int vaddr) in mmu_emu_map_pmeg() argument 287 vaddr &= ~SUN3_PMEG_MASK; in mmu_emu_map_pmeg() 296 curr_pmeg, context, vaddr); in mmu_emu_map_pmeg() 308 if(vaddr >= PAGE_OFFSET) { in mmu_emu_map_pmeg() 314 sun3_put_segmap (vaddr, curr_pmeg); in mmu_emu_map_pmeg() 324 sun3_put_segmap (vaddr, curr_pmeg); in mmu_emu_map_pmeg() 327 pmeg_vaddr[curr_pmeg] = vaddr; in mmu_emu_map_pmeg() [all …]
|
/arch/x86/mm/ |
D | mem_encrypt_amd.c | 158 static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size, in __sme_early_map_unmap_mem() argument 161 unsigned long paddr = (unsigned long)vaddr - __PAGE_OFFSET; in __sme_early_map_unmap_mem() 169 __early_make_pgtable((unsigned long)vaddr, pmd); in __sme_early_map_unmap_mem() 171 vaddr += PMD_SIZE; in __sme_early_map_unmap_mem() 292 static void enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc) in enc_dec_hypercall() argument 295 unsigned long vaddr_end = vaddr + size; in enc_dec_hypercall() 297 while (vaddr < vaddr_end) { in enc_dec_hypercall() 302 kpte = lookup_address(vaddr, &level); in enc_dec_hypercall() 317 vaddr = (vaddr & pmask) + psize; in enc_dec_hypercall() 322 static bool amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc) in amd_enc_status_change_prepare() argument [all …]
|
D | pgtable_32.c | 27 void set_pte_vaddr(unsigned long vaddr, pte_t pteval) in set_pte_vaddr() argument 35 pgd = swapper_pg_dir + pgd_index(vaddr); in set_pte_vaddr() 40 p4d = p4d_offset(pgd, vaddr); in set_pte_vaddr() 45 pud = pud_offset(p4d, vaddr); in set_pte_vaddr() 50 pmd = pmd_offset(pud, vaddr); in set_pte_vaddr() 55 pte = pte_offset_kernel(pmd, vaddr); in set_pte_vaddr() 57 set_pte_at(&init_mm, vaddr, pte, pteval); in set_pte_vaddr() 59 pte_clear(&init_mm, vaddr, pte); in set_pte_vaddr() 65 flush_tlb_one_kernel(vaddr); in set_pte_vaddr()
|
D | kaslr.c | 67 unsigned long vaddr_start, vaddr; in kernel_randomize_memory() local 74 vaddr = vaddr_start; in kernel_randomize_memory() 128 vaddr += entropy; in kernel_randomize_memory() 129 *kaslr_regions[i].base = vaddr; in kernel_randomize_memory() 135 vaddr += get_padding(&kaslr_regions[i]); in kernel_randomize_memory() 136 vaddr = round_up(vaddr + 1, PUD_SIZE); in kernel_randomize_memory() 145 unsigned long paddr, vaddr; in init_trampoline_kaslr() local 158 vaddr = (unsigned long)__va(paddr); in init_trampoline_kaslr() 159 pgd = pgd_offset_k(vaddr); in init_trampoline_kaslr() 161 p4d = p4d_offset(pgd, vaddr); in init_trampoline_kaslr() [all …]
|
D | init_32.c | 107 pmd_t * __init populate_extra_pmd(unsigned long vaddr) in populate_extra_pmd() argument 109 int pgd_idx = pgd_index(vaddr); in populate_extra_pmd() 110 int pmd_idx = pmd_index(vaddr); in populate_extra_pmd() 115 pte_t * __init populate_extra_pte(unsigned long vaddr) in populate_extra_pte() argument 117 int pte_idx = pte_index(vaddr); in populate_extra_pte() 120 pmd = populate_extra_pmd(vaddr); in populate_extra_pte() 132 unsigned long vaddr; in page_table_range_init_count() local 137 vaddr = start; in page_table_range_init_count() 138 pgd_idx = pgd_index(vaddr); in page_table_range_init_count() 139 pmd_idx = pmd_index(vaddr); in page_table_range_init_count() [all …]
|
/arch/sh/mm/ |
D | kmap.c | 21 unsigned long vaddr; in kmap_coherent_init() local 24 vaddr = __fix_to_virt(FIX_CMAP_BEGIN); in kmap_coherent_init() 25 kmap_coherent_pte = virt_to_kpte(vaddr); in kmap_coherent_init() 32 unsigned long vaddr; in kmap_coherent() local 43 vaddr = __fix_to_virt(idx); in kmap_coherent() 48 return (void *)vaddr; in kmap_coherent() 54 unsigned long vaddr = (unsigned long)kvaddr & PAGE_MASK; in kunmap_coherent() local 55 enum fixed_addresses idx = __virt_to_fix(vaddr); in kunmap_coherent() 58 __flush_purge_region((void *)vaddr, PAGE_SIZE); in kunmap_coherent() 60 pte_clear(&init_mm, vaddr, kmap_coherent_pte - idx); in kunmap_coherent() [all …]
|
/arch/m68k/sun3x/ |
D | dvma.c | 79 unsigned long vaddr, int len) in dvma_map_cpu() argument 88 vaddr &= PAGE_MASK; in dvma_map_cpu() 90 end = PAGE_ALIGN(vaddr + len); in dvma_map_cpu() 92 pr_debug("dvma: mapping kern %08lx to virt %08lx\n", kaddr, vaddr); in dvma_map_cpu() 93 pgd = pgd_offset_k(vaddr); in dvma_map_cpu() 94 p4d = p4d_offset(pgd, vaddr); in dvma_map_cpu() 95 pud = pud_offset(p4d, vaddr); in dvma_map_cpu() 101 if((pmd = pmd_alloc(&init_mm, pud, vaddr)) == NULL) { in dvma_map_cpu() 106 if((end & PGDIR_MASK) > (vaddr & PGDIR_MASK)) in dvma_map_cpu() 107 end2 = (vaddr + (PGDIR_SIZE-1)) & PGDIR_MASK; in dvma_map_cpu() [all …]
|
/arch/mips/mm/ |
D | pgtable-32.c | 53 unsigned long vaddr; in pagetable_init() local 72 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1); in pagetable_init() 73 fixrange_init(vaddr & PMD_MASK, vaddr + FIXADDR_SIZE, pgd_base); in pagetable_init() 79 vaddr = PKMAP_BASE; in pagetable_init() 80 fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); in pagetable_init() 82 pgd = swapper_pg_dir + pgd_index(vaddr); in pagetable_init() 83 p4d = p4d_offset(pgd, vaddr); in pagetable_init() 84 pud = pud_offset(p4d, vaddr); in pagetable_init() 85 pmd = pmd_offset(pud, vaddr); in pagetable_init() 86 pte = pte_offset_kernel(pmd, vaddr); in pagetable_init()
|
/arch/sparc/include/asm/ |
D | viking.h | 214 static inline unsigned long viking_hwprobe(unsigned long vaddr) in viking_hwprobe() argument 218 vaddr &= PAGE_MASK; in viking_hwprobe() 222 : "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE)); in viking_hwprobe() 229 : "r" (vaddr | 0x200), "i" (ASI_M_FLUSH_PROBE)); in viking_hwprobe() 231 vaddr &= ~PGDIR_MASK; in viking_hwprobe() 232 vaddr >>= PAGE_SHIFT; in viking_hwprobe() 233 return val | (vaddr << 8); in viking_hwprobe() 239 : "r" (vaddr | 0x100), "i" (ASI_M_FLUSH_PROBE)); in viking_hwprobe() 241 vaddr &= ~PMD_MASK; in viking_hwprobe() 242 vaddr >>= PAGE_SHIFT; in viking_hwprobe() [all …]
|
/arch/sparc/mm/ |
D | io-unit.c | 97 static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size) in iounit_get_area() argument 103 npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT; in iounit_get_area() 112 IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages)); in iounit_get_area() 127 panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size); in iounit_get_area() 135 iopte = MKIOPTE(__pa(vaddr & PAGE_MASK)); in iounit_get_area() 136 vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK); in iounit_get_area() 141 IOD(("%08lx\n", vaddr)); in iounit_get_area() 142 return vaddr; in iounit_get_area() 149 void *vaddr = page_address(page) + offset; in iounit_map_page() local 158 ret = iounit_get_area(iounit, (unsigned long)vaddr, len); in iounit_map_page() [all …]
|
/arch/nios2/mm/ |
D | dma-mapping.c | 24 void *vaddr = phys_to_virt(paddr); in arch_sync_dma_for_device() local 28 invalidate_dcache_range((unsigned long)vaddr, in arch_sync_dma_for_device() 29 (unsigned long)(vaddr + size)); in arch_sync_dma_for_device() 37 flush_dcache_range((unsigned long)vaddr, in arch_sync_dma_for_device() 38 (unsigned long)(vaddr + size)); in arch_sync_dma_for_device() 48 void *vaddr = phys_to_virt(paddr); in arch_sync_dma_for_cpu() local 53 invalidate_dcache_range((unsigned long)vaddr, in arch_sync_dma_for_cpu() 54 (unsigned long)(vaddr + size)); in arch_sync_dma_for_cpu()
|
/arch/csky/mm/ |
D | highmem.c | 22 unsigned long vaddr; in kmap_init() local 28 vaddr = PKMAP_BASE; in kmap_init() 29 fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir); in kmap_init() 31 pgd = swapper_pg_dir + pgd_index(vaddr); in kmap_init() 33 pmd = pmd_offset(pud, vaddr); in kmap_init() 34 pte = pte_offset_kernel(pmd, vaddr); in kmap_init()
|
D | tcm.c | 29 unsigned long vaddr, paddr; local 42 vaddr = __fix_to_virt(FIX_TCM - i); 45 pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr); 49 flush_tlb_one(vaddr); 61 vaddr = __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES - i); 64 pte_offset_kernel((pmd_t *) pgd_offset_k(vaddr), vaddr); 68 flush_tlb_one(vaddr); 111 unsigned long vaddr; local 116 vaddr = gen_pool_alloc(tcm_pool, len); 117 if (!vaddr) [all …]
|
/arch/x86/include/asm/ |
D | page.h | 25 static inline void clear_user_page(void *page, unsigned long vaddr, in clear_user_page() argument 31 static inline void copy_user_page(void *to, void *from, unsigned long vaddr, in copy_user_page() argument 37 #define vma_alloc_zeroed_movable_folio(vma, vaddr) \ argument 38 vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO | __GFP_CMA, 0, vma, vaddr, false) 77 static __always_inline u64 __canonical_address(u64 vaddr, u8 vaddr_bits) in __canonical_address() argument 79 return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits); in __canonical_address() 82 static __always_inline u64 __is_canonical_address(u64 vaddr, u8 vaddr_bits) in __is_canonical_address() argument 84 return __canonical_address(vaddr, vaddr_bits) == vaddr; in __is_canonical_address()
|
D | sev.h | 169 static inline int rmpadjust(unsigned long vaddr, bool rmp_psize, unsigned long attrs) in rmpadjust() argument 176 : "a"(vaddr), "c"(rmp_psize), "d"(attrs) in rmpadjust() 181 static inline int pvalidate(unsigned long vaddr, bool rmp_psize, bool validate) in pvalidate() argument 190 : "a"(vaddr), "c"(rmp_psize), "d"(validate) in pvalidate() 202 void early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, 204 void early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, 206 void snp_set_memory_shared(unsigned long vaddr, unsigned long npages); 207 void snp_set_memory_private(unsigned long vaddr, unsigned long npages); 223 static inline int pvalidate(unsigned long vaddr, bool rmp_psize, bool validate) { return 0; } in pvalidate() argument 224 static inline int rmpadjust(unsigned long vaddr, bool rmp_psize, unsigned long attrs) { return 0; } in rmpadjust() argument [all …]
|
/arch/um/kernel/ |
D | mem.c | 119 unsigned long vaddr; in fixrange_init() local 121 vaddr = start; in fixrange_init() 122 i = pgd_index(vaddr); in fixrange_init() 123 j = pmd_index(vaddr); in fixrange_init() 126 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) { in fixrange_init() 127 p4d = p4d_offset(pgd, vaddr); in fixrange_init() 128 pud = pud_offset(p4d, vaddr); in fixrange_init() 131 pmd = pmd_offset(pud, vaddr); in fixrange_init() 132 for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) { in fixrange_init() 134 vaddr += PMD_SIZE; in fixrange_init() [all …]
|
/arch/xtensa/mm/ |
D | cache.c | 60 unsigned long vaddr) in kmap_invalidate_coherent() argument 62 if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) { in kmap_invalidate_coherent() 82 unsigned long vaddr, unsigned long *paddr) in coherent_kvaddr() argument 85 return (void *)(base + (vaddr & DCACHE_ALIAS_MASK)); in coherent_kvaddr() 88 void clear_user_highpage(struct page *page, unsigned long vaddr) in clear_user_highpage() argument 91 void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr); in clear_user_highpage() 94 kmap_invalidate_coherent(page, vaddr); in clear_user_highpage() 102 unsigned long vaddr, struct vm_area_struct *vma) in copy_user_highpage() argument 105 void *dst_vaddr = coherent_kvaddr(dst, TLBTEMP_BASE_1, vaddr, in copy_user_highpage() 107 void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr, in copy_user_highpage() [all …]
|