Lines Matching refs:addr
48 unsigned long addr = (unsigned long)x; in is_vmalloc_addr() local
50 return addr >= VMALLOC_START && addr < VMALLOC_END; in is_vmalloc_addr()
73 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, in vunmap_pte_range() argument
78 pte = pte_offset_kernel(pmd, addr); in vunmap_pte_range()
80 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); in vunmap_pte_range()
82 } while (pte++, addr += PAGE_SIZE, addr != end); in vunmap_pte_range()
86 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, in vunmap_pmd_range() argument
93 pmd = pmd_offset(pud, addr); in vunmap_pmd_range()
95 next = pmd_addr_end(addr, end); in vunmap_pmd_range()
105 vunmap_pte_range(pmd, addr, next, mask); in vunmap_pmd_range()
108 } while (pmd++, addr = next, addr != end); in vunmap_pmd_range()
111 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, in vunmap_pud_range() argument
118 pud = pud_offset(p4d, addr); in vunmap_pud_range()
120 next = pud_addr_end(addr, end); in vunmap_pud_range()
130 vunmap_pmd_range(pud, addr, next, mask); in vunmap_pud_range()
131 } while (pud++, addr = next, addr != end); in vunmap_pud_range()
134 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, in vunmap_p4d_range() argument
141 p4d = p4d_offset(pgd, addr); in vunmap_p4d_range()
143 next = p4d_addr_end(addr, end); in vunmap_p4d_range()
153 vunmap_pud_range(p4d, addr, next, mask); in vunmap_p4d_range()
154 } while (p4d++, addr = next, addr != end); in vunmap_p4d_range()
175 unsigned long addr = start; in unmap_kernel_range_noflush() local
178 BUG_ON(addr >= end); in unmap_kernel_range_noflush()
179 pgd = pgd_offset_k(addr); in unmap_kernel_range_noflush()
181 next = pgd_addr_end(addr, end); in unmap_kernel_range_noflush()
186 vunmap_p4d_range(pgd, addr, next, &mask); in unmap_kernel_range_noflush()
187 } while (pgd++, addr = next, addr != end); in unmap_kernel_range_noflush()
193 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, in vmap_pte_range() argument
204 pte = pte_alloc_kernel_track(pmd, addr, mask); in vmap_pte_range()
214 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); in vmap_pte_range()
216 } while (pte++, addr += PAGE_SIZE, addr != end); in vmap_pte_range()
221 static int vmap_pmd_range(pud_t *pud, unsigned long addr, in vmap_pmd_range() argument
228 pmd = pmd_alloc_track(&init_mm, pud, addr, mask); in vmap_pmd_range()
232 next = pmd_addr_end(addr, end); in vmap_pmd_range()
233 if (vmap_pte_range(pmd, addr, next, prot, pages, nr, mask)) in vmap_pmd_range()
235 } while (pmd++, addr = next, addr != end); in vmap_pmd_range()
239 static int vmap_pud_range(p4d_t *p4d, unsigned long addr, in vmap_pud_range() argument
246 pud = pud_alloc_track(&init_mm, p4d, addr, mask); in vmap_pud_range()
250 next = pud_addr_end(addr, end); in vmap_pud_range()
251 if (vmap_pmd_range(pud, addr, next, prot, pages, nr, mask)) in vmap_pud_range()
253 } while (pud++, addr = next, addr != end); in vmap_pud_range()
257 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, in vmap_p4d_range() argument
264 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); in vmap_p4d_range()
268 next = p4d_addr_end(addr, end); in vmap_p4d_range()
269 if (vmap_pud_range(p4d, addr, next, prot, pages, nr, mask)) in vmap_p4d_range()
271 } while (p4d++, addr = next, addr != end); in vmap_p4d_range()
293 int map_kernel_range_noflush(unsigned long addr, unsigned long size, in map_kernel_range_noflush() argument
296 unsigned long start = addr; in map_kernel_range_noflush()
297 unsigned long end = addr + size; in map_kernel_range_noflush()
304 BUG_ON(addr >= end); in map_kernel_range_noflush()
305 pgd = pgd_offset_k(addr); in map_kernel_range_noflush()
307 next = pgd_addr_end(addr, end); in map_kernel_range_noflush()
310 err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr, &mask); in map_kernel_range_noflush()
313 } while (pgd++, addr = next, addr != end); in map_kernel_range_noflush()
340 unsigned long addr = (unsigned long)x; in is_vmalloc_or_module_addr() local
341 if (addr >= MODULES_VADDR && addr < MODULES_END) in is_vmalloc_or_module_addr()
352 unsigned long addr = (unsigned long) vmalloc_addr; in vmalloc_to_page() local
354 pgd_t *pgd = pgd_offset_k(addr); in vmalloc_to_page()
368 p4d = p4d_offset(pgd, addr); in vmalloc_to_page()
371 pud = pud_offset(p4d, addr); in vmalloc_to_page()
384 pmd = pmd_offset(pud, addr); in vmalloc_to_page()
389 ptep = pte_offset_map(pmd, addr); in vmalloc_to_page()
496 static struct vmap_area *__find_vmap_area(unsigned long addr) in __find_vmap_area() argument
504 if (addr < va->va_start) in __find_vmap_area()
506 else if (addr >= va->va_end) in __find_vmap_area()
1158 unsigned long addr; in alloc_vmap_area() local
1217 addr = __alloc_vmap_area(size, align, vstart, vend); in alloc_vmap_area()
1220 if (unlikely(addr == vend)) in alloc_vmap_area()
1223 va->va_start = addr; in alloc_vmap_area()
1224 va->va_end = addr + size; in alloc_vmap_area()
1236 ret = kasan_populate_vmalloc(addr, size); in alloc_vmap_area()
1449 static struct vmap_area *find_vmap_area(unsigned long addr) in find_vmap_area() argument
1454 va = __find_vmap_area(addr); in find_vmap_area()
1522 static unsigned long addr_to_vb_idx(unsigned long addr) in addr_to_vb_idx() argument
1524 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); in addr_to_vb_idx()
1525 addr /= VMAP_BLOCK_SIZE; in addr_to_vb_idx()
1526 return addr; in addr_to_vb_idx()
1531 unsigned long addr; in vmap_block_vaddr() local
1533 addr = va_start + (pages_off << PAGE_SHIFT); in vmap_block_vaddr()
1534 BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start)); in vmap_block_vaddr()
1535 return (void *)addr; in vmap_block_vaddr()
1705 static void vb_free(unsigned long addr, unsigned long size) in vb_free() argument
1714 flush_cache_vunmap(addr, addr + size); in vb_free()
1717 offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT; in vb_free()
1718 vb = xa_load(&vmap_blocks, addr_to_vb_idx(addr)); in vb_free()
1720 unmap_kernel_range_noflush(addr, size); in vb_free()
1723 flush_tlb_kernel_range(addr, addr + size); in vb_free()
1810 unsigned long addr = (unsigned long)mem; in vm_unmap_ram() local
1814 BUG_ON(!addr); in vm_unmap_ram()
1815 BUG_ON(addr < VMALLOC_START); in vm_unmap_ram()
1816 BUG_ON(addr > VMALLOC_END); in vm_unmap_ram()
1817 BUG_ON(!PAGE_ALIGNED(addr)); in vm_unmap_ram()
1823 vb_free(addr, size); in vm_unmap_ram()
1827 va = find_vmap_area(addr); in vm_unmap_ram()
1852 unsigned long addr; in vm_map_ram() local
1859 addr = (unsigned long)mem; in vm_map_ram()
1867 addr = va->va_start; in vm_map_ram()
1868 mem = (void *)addr; in vm_map_ram()
1873 if (map_kernel_range(addr, size, PAGE_KERNEL, pages) < 0) { in vm_map_ram()
1899 if (tmp->addr >= vm->addr) { in vm_area_add_early()
1900 BUG_ON(tmp->addr < vm->addr + vm->size); in vm_area_add_early()
1903 BUG_ON(tmp->addr + tmp->size > vm->addr); in vm_area_add_early()
1924 unsigned long addr; in vm_area_register_early() local
1926 addr = ALIGN(VMALLOC_START + vm_init_off, align); in vm_area_register_early()
1927 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; in vm_area_register_early()
1929 vm->addr = (void *)addr; in vm_area_register_early()
2004 va->va_start = (unsigned long)tmp->addr; in vmalloc_init()
2025 void unmap_kernel_range(unsigned long addr, unsigned long size) in unmap_kernel_range() argument
2027 unsigned long end = addr + size; in unmap_kernel_range()
2029 flush_cache_vunmap(addr, end); in unmap_kernel_range()
2030 unmap_kernel_range_noflush(addr, size); in unmap_kernel_range()
2031 flush_tlb_kernel_range(addr, end); in unmap_kernel_range()
2038 vm->addr = (void *)va->va_start; in setup_vmalloc_vm_locked()
2145 struct vm_struct *find_vm_area(const void *addr) in find_vm_area() argument
2149 va = find_vmap_area((unsigned long)addr); in find_vm_area()
2166 struct vm_struct *remove_vm_area(const void *addr) in remove_vm_area() argument
2173 va = __find_vmap_area((unsigned long)addr); in remove_vm_area()
2209 remove_vm_area(area->addr); in vm_remove_mappings()
2230 unsigned long addr = (unsigned long)page_address(area->pages[i]); in vm_remove_mappings() local
2231 if (addr) { in vm_remove_mappings()
2232 start = min(addr, start); in vm_remove_mappings()
2233 end = max(addr + PAGE_SIZE, end); in vm_remove_mappings()
2248 static void __vunmap(const void *addr, int deallocate_pages) in __vunmap() argument
2252 if (!addr) in __vunmap()
2255 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n", in __vunmap()
2256 addr)) in __vunmap()
2259 area = find_vm_area(addr); in __vunmap()
2262 addr); in __vunmap()
2266 debug_check_no_locks_freed(area->addr, get_vm_area_size(area)); in __vunmap()
2267 debug_check_no_obj_freed(area->addr, get_vm_area_size(area)); in __vunmap()
2269 kasan_poison_vmalloc(area->addr, get_vm_area_size(area)); in __vunmap()
2291 static inline void __vfree_deferred(const void *addr) in __vfree_deferred() argument
2301 if (llist_add((struct llist_node *)addr, &p->list)) in __vfree_deferred()
2312 void vfree_atomic(const void *addr) in vfree_atomic() argument
2316 kmemleak_free(addr); in vfree_atomic()
2318 if (!addr) in vfree_atomic()
2320 __vfree_deferred(addr); in vfree_atomic()
2323 static void __vfree(const void *addr) in __vfree() argument
2326 __vfree_deferred(addr); in __vfree()
2328 __vunmap(addr, 1); in __vfree()
2348 void vfree(const void *addr) in vfree() argument
2352 kmemleak_free(addr); in vfree()
2356 if (!addr) in vfree()
2359 __vfree(addr); in vfree()
2372 void vunmap(const void *addr) in vunmap() argument
2376 if (addr) in vunmap()
2377 __vunmap(addr, 0); in vunmap()
2412 if (map_kernel_range((unsigned long)area->addr, size, pgprot_nx(prot), in vmap()
2414 vunmap(area->addr); in vmap()
2422 return area->addr; in vmap()
2433 static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private) in vmap_pfn_apply() argument
2461 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, in vmap_pfn()
2467 flush_cache_vmap((unsigned long)area->addr, in vmap_pfn()
2468 (unsigned long)area->addr + count * PAGE_SIZE); in vmap_pfn()
2470 return area->addr; in vmap_pfn()
2496 remove_vm_area(area->addr); in __vmalloc_area_node()
2524 if (map_kernel_range((unsigned long)area->addr, get_vm_area_size(area), in __vmalloc_area_node()
2528 return area->addr; in __vmalloc_area_node()
2534 __vfree(area->addr); in __vmalloc_area_node()
2562 void *addr; in __vmalloc_node_range() local
2574 addr = __vmalloc_area_node(area, gfp_mask, prot, node); in __vmalloc_node_range()
2575 if (!addr) in __vmalloc_node_range()
2587 return addr; in __vmalloc_node_range()
2782 static int aligned_vread(char *buf, char *addr, unsigned long count) in aligned_vread() argument
2790 offset = offset_in_page(addr); in aligned_vread()
2794 p = vmalloc_to_page(addr); in aligned_vread()
2813 addr += length; in aligned_vread()
2821 static int aligned_vwrite(char *buf, char *addr, unsigned long count) in aligned_vwrite() argument
2829 offset = offset_in_page(addr); in aligned_vwrite()
2833 p = vmalloc_to_page(addr); in aligned_vwrite()
2850 addr += length; in aligned_vwrite()
2882 long vread(char *buf, char *addr, unsigned long count) in vread() argument
2891 if ((unsigned long) addr + count < count) in vread()
2892 count = -(unsigned long) addr; in vread()
2903 vaddr = (char *) vm->addr; in vread()
2904 if (addr >= vaddr + get_vm_area_size(vm)) in vread()
2906 while (addr < vaddr) { in vread()
2911 addr++; in vread()
2914 n = vaddr + get_vm_area_size(vm) - addr; in vread()
2918 aligned_vread(buf, addr, n); in vread()
2922 addr += n; in vread()
2961 long vwrite(char *buf, char *addr, unsigned long count) in vwrite() argument
2970 if ((unsigned long) addr + count < count) in vwrite()
2971 count = -(unsigned long) addr; in vwrite()
2983 vaddr = (char *) vm->addr; in vwrite()
2984 if (addr >= vaddr + get_vm_area_size(vm)) in vwrite()
2986 while (addr < vaddr) { in vwrite()
2990 addr++; in vwrite()
2993 n = vaddr + get_vm_area_size(vm) - addr; in vwrite()
2997 aligned_vwrite(buf, addr, n); in vwrite()
3001 addr += n; in vwrite()
3089 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, in remap_vmalloc_range() argument
3093 addr, pgoff, in remap_vmalloc_range()
3101 ret = remove_vm_area(area->addr); in free_vm_area()
3123 pvm_find_va_enclose_addr(unsigned long addr) in pvm_find_va_enclose_addr() argument
3133 if (tmp->va_start <= addr) { in pvm_find_va_enclose_addr()
3135 if (tmp->va_end >= addr) in pvm_find_va_enclose_addr()
3160 unsigned long addr; in pvm_determine_end_from_reverse() local
3165 addr = min((*va)->va_end & ~(align - 1), vmalloc_end); in pvm_determine_end_from_reverse()
3166 if ((*va)->va_start < addr) in pvm_determine_end_from_reverse()
3167 return addr; in pvm_determine_end_from_reverse()
3536 v->addr, v->addr + v->size, v->size); in s_show()