/arch/metag/kernel/ |
D | dma.c | 102 unsigned long vm_end; member 110 .vm_end = CONSISTENT_END, 117 unsigned long addr = head->vm_start, end = head->vm_end - size; in metag_vm_region_alloc() 132 addr = c->vm_end; in metag_vm_region_alloc() 143 new->vm_end = addr + size; in metag_vm_region_alloc() 287 if ((c->vm_end - c->vm_start) != size) { in dma_free_coherent() 289 __func__, c->vm_end - c->vm_start, size); in dma_free_coherent() 291 size = c->vm_end - c->vm_start; in dma_free_coherent() 317 flush_tlb_kernel_range(c->vm_start, c->vm_end); in dma_free_coherent() 343 user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; in dma_mmap() [all …]
|
/arch/powerpc/mm/ |
D | dma-noncoherent.c | 87 unsigned long vm_end; member 93 .vm_end = CONSISTENT_END, 99 unsigned long addr = head->vm_start, end = head->vm_end - size; in ppc_vm_region_alloc() 114 addr = c->vm_end; in ppc_vm_region_alloc() 125 new->vm_end = addr + size; in ppc_vm_region_alloc() 270 if ((c->vm_end - c->vm_start) != size) { in __dma_free_coherent() 272 __func__, c->vm_end - c->vm_start, size); in __dma_free_coherent() 274 size = c->vm_end - c->vm_start; in __dma_free_coherent() 297 flush_tlb_kernel_range(c->vm_start, c->vm_end); in __dma_free_coherent()
|
/arch/sparc/include/asm/ |
D | tlb_32.h | 6 flush_cache_range(vma, vma->vm_start, vma->vm_end); \ 11 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
|
/arch/avr32/include/asm/ |
D | tlb.h | 12 flush_cache_range(vma, vma->vm_start, vma->vm_end) 15 flush_tlb_range(vma, vma->vm_start, vma->vm_end)
|
/arch/arc/include/asm/ |
D | tlb.h | 32 flush_cache_range(vma, vma->vm_start, vma->vm_end); \ 39 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
|
/arch/metag/include/asm/ |
D | tlb.h | 14 flush_cache_range(vma, vma->vm_start, vma->vm_end); \ 20 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
|
/arch/xtensa/include/asm/ |
D | tlb.h | 29 flush_cache_range(vma, vma->vm_start, vma->vm_end); \ 35 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
|
/arch/parisc/include/asm/ |
D | tlb.h | 11 flush_cache_range(vma, vma->vm_start, vma->vm_end); \ 16 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
|
/arch/x86/um/ |
D | mem_32.c | 22 gate_vma.vm_end = FIXADDR_USER_END; in gate_vma_init() 53 return (addr >= vma->vm_start) && (addr < vma->vm_end); in in_gate_area()
|
/arch/arm64/kernel/ |
D | sys_compat.c | 45 if (end > vma->vm_end) in do_compat_cache_op() 46 end = vma->vm_end; in do_compat_cache_op()
|
/arch/metag/mm/ |
D | hugetlbpage.c | 158 if (vma && vma->vm_end <= addr) in hugetlb_get_unmapped_area_existing() 172 if (vma->vm_end & HUGEPT_MASK) { in hugetlb_get_unmapped_area_existing() 175 addr = vma->vm_end; in hugetlb_get_unmapped_area_existing() 180 addr = ALIGN_HUGEPT(vma->vm_end); in hugetlb_get_unmapped_area_existing()
|
/arch/powerpc/include/asm/ |
D | fb.h | 12 vma->vm_end - vma->vm_start, in fb_pgprotect()
|
/arch/ia64/include/asm/ |
D | fb.h | 12 if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start)) in fb_pgprotect()
|
/arch/mips/include/asm/ |
D | tlb.h | 11 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
|
/arch/arc/kernel/ |
D | arc_hostlink.c | 26 vma->vm_end - vma->vm_start, in arc_hl_mmap()
|
/arch/alpha/kernel/ |
D | binfmt_loader.c | 25 loader = bprm->vma->vm_end - sizeof(void *); in load_binary()
|
D | pci-sysfs.c | 31 vma->vm_end - vma->vm_start, in hose_mmap_page_range() 41 nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; in __pci_mmap_fits() 258 nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; in __legacy_mmap_fits()
|
/arch/sh/kernel/ |
D | sys_sh.c | 73 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) { in sys_cacheflush()
|
/arch/powerpc/kernel/ |
D | proc_powerpc.c | 46 if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) in page_map_mmap()
|
/arch/parisc/mm/ |
D | fault.c | 196 vma->vm_start, vma->vm_end); in show_signal_msg() 306 address < vma->vm_start || address >= vma->vm_end) { in do_page_fault()
|
/arch/sh/mm/ |
D | cache-sh5.c | 180 if (!vma || (aligned_start <= vma->vm_end)) { in sh64_icache_inv_user_page_range() 185 vma_end = vma->vm_end; in sh64_icache_inv_user_page_range() 194 aligned_start = vma->vm_end; /* Skip to start of next region */ in sh64_icache_inv_user_page_range()
|
/arch/xtensa/kernel/ |
D | syscall.c | 91 addr = vmm->vm_end; in arch_get_unmapped_area()
|
/arch/arm/kvm/ |
D | mmu.c | 730 hva_t vm_start, vm_end; in stage2_unmap_memslot() local 739 vm_end = min(reg_end, vma->vm_end); in stage2_unmap_memslot() 743 unmap_stage2_range(kvm, gpa, vm_end - vm_start); in stage2_unmap_memslot() 745 hva = vm_end; in stage2_unmap_memslot() 1437 hva_t vm_start, vm_end; in kvm_arch_prepare_memory_region() local 1455 vm_end = min(reg_end, vma->vm_end); in kvm_arch_prepare_memory_region() 1466 vm_end - vm_start, in kvm_arch_prepare_memory_region() 1471 hva = vm_end; in kvm_arch_prepare_memory_region()
|
/arch/parisc/kernel/ |
D | cache.c | 466 usize += vma->vm_end - vma->vm_start; in mm_total_size() 499 flush_user_dcache_range_asm(vma->vm_start, vma->vm_end); in flush_cache_mm() 502 flush_user_icache_range_asm(vma->vm_start, vma->vm_end); in flush_cache_mm() 511 for (addr = vma->vm_start; addr < vma->vm_end; in flush_cache_mm()
|
/arch/ia64/mm/ |
D | fault.c | 206 if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) { in ia64_do_page_fault() 225 if (address > vma->vm_end + PAGE_SIZE - sizeof(long)) in ia64_do_page_fault()
|