/arch/x86/kernel/ |
D | sys_x86_64.c | 96 (!vma || addr + len <= vma->vm_start)) in arch_get_unmapped_area() 124 if (!vma || addr + len <= vma->vm_start) { in arch_get_unmapped_area() 131 if (addr + mm->cached_hole_size < vma->vm_start) in arch_get_unmapped_area() 132 mm->cached_hole_size = vma->vm_start - addr; in arch_get_unmapped_area() 164 (!vma || addr + len <= vma->vm_start)) in arch_get_unmapped_area_topdown() 180 if (!vma || addr <= vma->vm_start) in arch_get_unmapped_area_topdown() 197 if (!vma || addr+len <= vma->vm_start) in arch_get_unmapped_area_topdown() 202 if (addr + mm->cached_hole_size < vma->vm_start) in arch_get_unmapped_area_topdown() 203 mm->cached_hole_size = vma->vm_start - addr; in arch_get_unmapped_area_topdown() 206 addr = vma->vm_start-len; in arch_get_unmapped_area_topdown() [all …]
|
/arch/powerpc/lib/ |
D | dma-noncoherent.c | 82 unsigned long vm_start; member 88 .vm_start = CONSISTENT_BASE, 95 unsigned long addr = head->vm_start, end = head->vm_end - size; in ppc_vm_region_alloc() 108 if ((addr + size) <= c->vm_start) in ppc_vm_region_alloc() 120 new->vm_start = addr; in ppc_vm_region_alloc() 138 if (c->vm_start == addr) in ppc_vm_region_find() 197 unsigned long vaddr = c->vm_start; in __dma_alloc_coherent() 227 return (void *)c->vm_start; in __dma_alloc_coherent() 254 if ((c->vm_end - c->vm_start) != size) { in __dma_free_coherent() 256 __func__, c->vm_end - c->vm_start, size); in __dma_free_coherent() [all …]
|
/arch/arm/mm/ |
D | dma-mapping.c | 76 unsigned long vm_start; member 84 .vm_start = CONSISTENT_BASE, 91 unsigned long addr = head->vm_start, end = head->vm_end - size; in arm_vm_region_alloc() 104 if ((addr + size) <= c->vm_start) in arm_vm_region_alloc() 116 new->vm_start = addr; in arm_vm_region_alloc() 135 if (c->vm_active && c->vm_start == addr) in arm_vm_region_find() 222 int idx = CONSISTENT_PTE_INDEX(c->vm_start); in __dma_alloc() 223 u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); in __dma_alloc() 260 return (void *)c->vm_start; in __dma_alloc() 317 user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; in dma_mmap() [all …]
|
D | mmap.c | 73 (!vma || addr + len <= vma->vm_start)) in arch_get_unmapped_area() 103 if (!vma || addr + len <= vma->vm_start) { in arch_get_unmapped_area() 110 if (addr + mm->cached_hole_size < vma->vm_start) in arch_get_unmapped_area() 111 mm->cached_hole_size = vma->vm_start - addr; in arch_get_unmapped_area()
|
/arch/x86/mm/ |
D | hugetlbpage.c | 25 svma->vm_start; in page_table_shareable() 35 sbase < svma->vm_start || svma->vm_end < s_end) in page_table_shareable() 50 vma->vm_start <= base && end <= vma->vm_end) in vma_shareable() 62 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + in huge_pmd_share() 292 if (!vma || addr + len <= vma->vm_start) { in hugetlb_get_unmapped_area_bottomup() 296 if (addr + mm->cached_hole_size < vma->vm_start) in hugetlb_get_unmapped_area_bottomup() 297 mm->cached_hole_size = vma->vm_start - addr; in hugetlb_get_unmapped_area_bottomup() 340 if (addr + len <= vma->vm_start && in hugetlb_get_unmapped_area_topdown() 348 mm->free_area_cache = vma->vm_start; in hugetlb_get_unmapped_area_topdown() 354 if (addr + largest_hole < vma->vm_start) in hugetlb_get_unmapped_area_topdown() [all …]
|
/arch/sparc/mm/ |
D | hugetlbpage.c | 72 if (likely(!vma || addr + len <= vma->vm_start)) { in hugetlb_get_unmapped_area_bottomup() 79 if (addr + mm->cached_hole_size < vma->vm_start) in hugetlb_get_unmapped_area_bottomup() 80 mm->cached_hole_size = vma->vm_start - addr; in hugetlb_get_unmapped_area_bottomup() 111 if (!vma || addr <= vma->vm_start) { in hugetlb_get_unmapped_area_topdown() 129 if (likely(!vma || addr+len <= vma->vm_start)) { in hugetlb_get_unmapped_area_topdown() 135 if (addr + mm->cached_hole_size < vma->vm_start) in hugetlb_get_unmapped_area_topdown() 136 mm->cached_hole_size = vma->vm_start - addr; in hugetlb_get_unmapped_area_topdown() 139 addr = (vma->vm_start-len) & HPAGE_MASK; in hugetlb_get_unmapped_area_topdown() 140 } while (likely(len < vma->vm_start)); in hugetlb_get_unmapped_area_topdown() 187 (!vma || addr + len <= vma->vm_start)) in hugetlb_get_unmapped_area()
|
/arch/sh/mm/ |
D | mmap.c | 60 (!vma || addr + len <= vma->vm_start)) in arch_get_unmapped_area() 91 if (likely(!vma || addr + len <= vma->vm_start)) { in arch_get_unmapped_area() 98 if (addr + mm->cached_hole_size < vma->vm_start) in arch_get_unmapped_area() 99 mm->cached_hole_size = vma->vm_start - addr; in arch_get_unmapped_area()
|
/arch/ia64/ia32/ |
D | binfmt_elf32.c | 93 vma->vm_start = IA32_GDT_OFFSET; in ia64_elf32_init() 94 vma->vm_end = vma->vm_start + PAGE_SIZE; in ia64_elf32_init() 118 vma->vm_start = IA32_GATE_OFFSET; in ia64_elf32_init() 119 vma->vm_end = vma->vm_start + PAGE_SIZE; in ia64_elf32_init() 142 vma->vm_start = IA32_LDT_OFFSET; in ia64_elf32_init() 143 vma->vm_end = vma->vm_start + PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE); in ia64_elf32_init()
|
/arch/ia64/include/asm/ |
D | fb.h | 12 if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start)) in fb_pgprotect()
|
/arch/sparc/include/asm/ |
D | tlb_32.h | 6 flush_cache_range(vma, vma->vm_start, vma->vm_end); \ 11 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
|
/arch/sh/include/asm/ |
D | tlb.h | 11 flush_cache_range(vma, vma->vm_start, vma->vm_end) 14 flush_tlb_range(vma, vma->vm_start, vma->vm_end)
|
/arch/avr32/include/asm/ |
D | tlb.h | 12 flush_cache_range(vma, vma->vm_start, vma->vm_end) 15 flush_tlb_range(vma, vma->vm_start, vma->vm_end)
|
/arch/xtensa/include/asm/ |
D | tlb.h | 29 flush_cache_range(vma, vma->vm_start, vma->vm_end); \ 35 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
|
/arch/parisc/include/asm/ |
D | tlb.h | 11 flush_cache_range(vma, vma->vm_start, vma->vm_end); \ 16 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
|
/arch/frv/mm/ |
D | elf-fdpic.c | 77 (!vma || addr + len <= vma->vm_start)) in arch_get_unmapped_area() 92 if (addr + len <= vma->vm_start) in arch_get_unmapped_area() 107 if (addr + len <= vma->vm_start) in arch_get_unmapped_area()
|
/arch/avr32/boards/mimc200/ |
D | fram.c | 30 vma->vm_start, in fram_mmap() 32 vma->vm_end-vma->vm_start, in fram_mmap()
|
/arch/parisc/mm/ |
D | fault.c | 129 if (tree->vm_start > addr) { 135 if (prev->vm_next->vm_start > addr) 185 if (!vma || address < vma->vm_start) in do_page_fault() 245 vma->vm_start, vma->vm_end); in do_page_fault()
|
/arch/x86/pci/ |
D | i386.c | 264 free_memtype(addr, addr + vma->vm_end - vma->vm_start); in pci_unmap_page_range() 273 reserve_memtype(addr, addr + vma->vm_end - vma->vm_start, flags, NULL); in pci_track_mmap_page_range() 287 unsigned long len = vma->vm_end - vma->vm_start; in pci_mmap_page_range() 332 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in pci_mmap_page_range() 333 vma->vm_end - vma->vm_start, in pci_mmap_page_range()
|
/arch/arm/include/asm/ |
D | tlb.h | 77 flush_cache_range(vma, vma->vm_start, vma->vm_end); in tlb_start_vma() 84 flush_tlb_range(vma, vma->vm_start, vma->vm_end); in tlb_end_vma()
|
/arch/sparc/kernel/ |
D | sys_sparc_64.c | 151 (!vma || addr + len <= vma->vm_start)) in arch_get_unmapped_area() 185 if (likely(!vma || addr + len <= vma->vm_start)) { in arch_get_unmapped_area() 192 if (addr + mm->cached_hole_size < vma->vm_start) in arch_get_unmapped_area() 193 mm->cached_hole_size = vma->vm_start - addr; in arch_get_unmapped_area() 241 (!vma || addr + len <= vma->vm_start)) in arch_get_unmapped_area_topdown() 262 if (!vma || addr <= vma->vm_start) { in arch_get_unmapped_area_topdown() 282 if (likely(!vma || addr+len <= vma->vm_start)) { in arch_get_unmapped_area_topdown() 288 if (addr + mm->cached_hole_size < vma->vm_start) in arch_get_unmapped_area_topdown() 289 mm->cached_hole_size = vma->vm_start - addr; in arch_get_unmapped_area_topdown() 292 addr = vma->vm_start-len; in arch_get_unmapped_area_topdown() [all …]
|
/arch/um/drivers/ |
D | mmapper_kern.c | 63 size = vma->vm_end - vma->vm_start; in mmapper_mmap() 71 if (remap_pfn_range(vma, vma->vm_start, p_buf >> PAGE_SHIFT, size, in mmapper_mmap()
|
/arch/powerpc/kernel/ |
D | proc_ppc64.c | 115 if ((vma->vm_end - vma->vm_start) > dp->size) in page_map_mmap() 118 remap_pfn_range(vma, vma->vm_start, __pa(dp->data) >> PAGE_SHIFT, in page_map_mmap()
|
/arch/powerpc/mm/ |
D | slice.c | 101 return (!vma || (addr + len) <= vma->vm_start); in slice_area_is_free() 259 if (!vma || addr + len <= vma->vm_start) { in slice_find_area_bottomup() 267 if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start) in slice_find_area_bottomup() 268 mm->cached_hole_size = vma->vm_start - addr; in slice_find_area_bottomup() 339 if (!vma || (addr + len) <= vma->vm_start) { in slice_find_area_topdown() 347 if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start) in slice_find_area_topdown() 348 mm->cached_hole_size = vma->vm_start - addr; in slice_find_area_topdown() 351 addr = vma->vm_start; in slice_find_area_topdown()
|
/arch/ia64/mm/ |
D | fault.c | 125 if (( !vma && prev_vma ) || (address < vma->vm_start) ) in ia64_do_page_fault() 185 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) in ia64_do_page_fault() 192 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) in ia64_do_page_fault()
|
/arch/powerpc/include/asm/ |
D | fb.h | 12 vma->vm_end - vma->vm_start, in fb_pgprotect()
|