/arch/ia64/ia32/ |
D | binfmt_elf32.c | 56 ia32_install_shared_page (struct vm_area_struct *vma, struct vm_fault *vmf) in ia32_install_shared_page() argument 64 ia32_install_gate_page (struct vm_area_struct *vma, struct vm_fault *vmf) in ia32_install_gate_page() argument 83 struct vm_area_struct *vma; in ia64_elf32_init() local 90 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); in ia64_elf32_init() 91 if (vma) { in ia64_elf32_init() 92 vma->vm_mm = current->mm; in ia64_elf32_init() 93 vma->vm_start = IA32_GDT_OFFSET; in ia64_elf32_init() 94 vma->vm_end = vma->vm_start + PAGE_SIZE; in ia64_elf32_init() 95 vma->vm_page_prot = PAGE_SHARED; in ia64_elf32_init() 96 vma->vm_flags = VM_READ|VM_MAYREAD|VM_RESERVED; in ia64_elf32_init() [all …]
|
/arch/frv/mm/ |
D | elf-fdpic.c | 62 struct vm_area_struct *vma; in arch_get_unmapped_area() local 75 vma = find_vma(current->mm, addr); in arch_get_unmapped_area() 77 (!vma || addr + len <= vma->vm_start)) in arch_get_unmapped_area() 88 vma = find_vma(current->mm, PAGE_SIZE); in arch_get_unmapped_area() 89 for (; vma; vma = vma->vm_next) { in arch_get_unmapped_area() 92 if (addr + len <= vma->vm_start) in arch_get_unmapped_area() 94 addr = vma->vm_end; in arch_get_unmapped_area() 103 vma = find_vma(current->mm, addr); in arch_get_unmapped_area() 104 for (; vma; vma = vma->vm_next) { in arch_get_unmapped_area() 107 if (addr + len <= vma->vm_start) in arch_get_unmapped_area() [all …]
|
/arch/x86/kernel/ |
D | sys_x86_64.c | 80 struct vm_area_struct *vma; in arch_get_unmapped_area() local 94 vma = find_vma(mm, addr); in arch_get_unmapped_area() 96 (!vma || addr + len <= vma->vm_start)) in arch_get_unmapped_area() 110 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { in arch_get_unmapped_area() 124 if (!vma || addr + len <= vma->vm_start) { in arch_get_unmapped_area() 131 if (addr + mm->cached_hole_size < vma->vm_start) in arch_get_unmapped_area() 132 mm->cached_hole_size = vma->vm_start - addr; in arch_get_unmapped_area() 134 addr = vma->vm_end; in arch_get_unmapped_area() 144 struct vm_area_struct *vma; in arch_get_unmapped_area_topdown() local 162 vma = find_vma(mm, addr); in arch_get_unmapped_area_topdown() [all …]
|
/arch/xtensa/include/asm/ |
D | tlb.h | 21 # define tlb_start_vma(tlb,vma) do { } while (0) argument 22 # define tlb_end_vma(tlb,vma) do { } while (0) argument 26 # define tlb_start_vma(tlb, vma) \ argument 29 flush_cache_range(vma, vma->vm_start, vma->vm_end); \ 32 # define tlb_end_vma(tlb, vma) \ argument 35 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
|
/arch/powerpc/include/asm/ |
D | tlbflush.h | 36 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 41 extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 45 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 48 #define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr) argument 50 #define flush_tlb_page_nohash(vma,addr) flush_tlb_page(vma,addr) argument 58 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 59 extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); 60 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 63 static inline void local_flush_tlb_page(struct vm_area_struct *vma, in local_flush_tlb_page() argument 66 flush_tlb_page(vma, vmaddr); in local_flush_tlb_page() [all …]
|
/arch/ia64/include/asm/ |
D | fb.h | 9 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, in fb_pgprotect() argument 12 if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start)) in fb_pgprotect() 13 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); in fb_pgprotect() 15 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in fb_pgprotect()
|
/arch/sparc/include/asm/ |
D | tlb_32.h | 4 #define tlb_start_vma(tlb, vma) \ argument 6 flush_cache_range(vma, vma->vm_start, vma->vm_end); \ 9 #define tlb_end_vma(tlb, vma) \ argument 11 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
|
D | cacheflush_32.h | 23 #define local_flush_cache_range(vma,start,end) BTFIXUP_CALL(local_flush_cache_range)(vma,start,end) argument 24 #define local_flush_cache_page(vma,addr) BTFIXUP_CALL(local_flush_cache_page)(vma,addr) argument 34 extern void smp_flush_cache_range(struct vm_area_struct *vma, 37 extern void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page); 52 #define flush_cache_range(vma,start,end) BTFIXUP_CALL(flush_cache_range)(vma,start,end) argument 53 #define flush_cache_page(vma,addr,pfn) BTFIXUP_CALL(flush_cache_page)(vma,addr) argument 55 #define flush_icache_page(vma, pg) do { } while (0) argument 57 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) argument 59 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument 61 flush_cache_page(vma, vaddr, page_to_pfn(page));\ [all …]
|
D | cacheflush_64.h | 16 #define flush_cache_range(vma, start, end) \ argument 17 flush_cache_mm((vma)->vm_mm) 18 #define flush_cache_page(vma, page, pfn) \ argument 19 flush_cache_mm((vma)->vm_mm) 42 #define flush_icache_page(vma, pg) do { } while(0) argument 43 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) argument 49 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument 51 flush_cache_page(vma, vaddr, page_to_pfn(page)); \ 53 flush_ptrace_access(vma, page, vaddr, src, len, 0); \ 56 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument [all …]
|
D | tlbflush_32.h | 27 #define local_flush_tlb_range(vma,start,end) BTFIXUP_CALL(local_flush_tlb_range)(vma,start,end) argument 28 #define local_flush_tlb_page(vma,addr) BTFIXUP_CALL(local_flush_tlb_page)(vma,addr) argument 32 extern void smp_flush_tlb_range(struct vm_area_struct *vma, 46 #define flush_tlb_range(vma,start,end) BTFIXUP_CALL(flush_tlb_range)(vma,start,end) in BTFIXUPDEF_CALL() argument 47 #define flush_tlb_page(vma,addr) BTFIXUP_CALL(flush_tlb_page)(vma,addr) in BTFIXUPDEF_CALL() argument
|
/arch/x86/pci/ |
D | i386.c | 261 static void pci_unmap_page_range(struct vm_area_struct *vma) in pci_unmap_page_range() argument 263 u64 addr = (u64)vma->vm_pgoff << PAGE_SHIFT; in pci_unmap_page_range() 264 free_memtype(addr, addr + vma->vm_end - vma->vm_start); in pci_unmap_page_range() 267 static void pci_track_mmap_page_range(struct vm_area_struct *vma) in pci_track_mmap_page_range() argument 269 u64 addr = (u64)vma->vm_pgoff << PAGE_SHIFT; in pci_track_mmap_page_range() 270 unsigned long flags = pgprot_val(vma->vm_page_prot) in pci_track_mmap_page_range() 273 reserve_memtype(addr, addr + vma->vm_end - vma->vm_start, flags, NULL); in pci_track_mmap_page_range() 282 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, in pci_mmap_page_range() argument 286 u64 addr = vma->vm_pgoff << PAGE_SHIFT; in pci_mmap_page_range() 287 unsigned long len = vma->vm_end - vma->vm_start; in pci_mmap_page_range() [all …]
|
/arch/sh/include/asm/ |
D | tlb.h | 10 #define tlb_start_vma(tlb, vma) \ argument 11 flush_cache_range(vma, vma->vm_start, vma->vm_end) 13 #define tlb_end_vma(tlb, vma) \ argument 14 flush_tlb_range(vma, vma->vm_start, vma->vm_end)
|
D | cacheflush.h | 17 #define flush_cache_range(vma, start, end) do { } while (0) argument 18 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument 21 #define flush_icache_page(vma,pg) do { } while (0) argument 25 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) argument 53 extern void copy_to_user_page(struct vm_area_struct *vma, 57 extern void copy_from_user_page(struct vm_area_struct *vma, 61 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument 63 flush_cache_page(vma, vaddr, page_to_pfn(page));\ 65 flush_icache_user_range(vma, page, vaddr, len); \ 68 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument [all …]
|
D | tlbflush.h | 15 extern void local_flush_tlb_range(struct vm_area_struct *vma, 18 extern void local_flush_tlb_page(struct vm_area_struct *vma, 28 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 30 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page); 38 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) argument 41 #define flush_tlb_range(vma, start, end) \ argument 42 local_flush_tlb_range(vma, start, end)
|
/arch/avr32/include/asm/ |
D | tlb.h | 11 #define tlb_start_vma(tlb, vma) \ argument 12 flush_cache_range(vma, vma->vm_start, vma->vm_end) 14 #define tlb_end_vma(tlb, vma) \ argument 15 flush_tlb_range(vma, vma->vm_start, vma->vm_end)
|
/arch/alpha/include/asm/ |
D | cacheflush.h | 10 #define flush_cache_range(vma, start, end) do { } while (0) argument 11 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument 47 flush_icache_user_range(struct vm_area_struct *vma, struct page *page, in flush_icache_user_range() argument 50 if (vma->vm_flags & VM_EXEC) { in flush_icache_user_range() 51 struct mm_struct *mm = vma->vm_mm; in flush_icache_user_range() 59 extern void flush_icache_user_range(struct vm_area_struct *vma, 64 #define flush_icache_page(vma, page) \ argument 65 flush_icache_user_range((vma), (page), 0, 0) 67 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument 69 flush_icache_user_range(vma, page, vaddr, len); \ [all …]
|
/arch/x86/mm/ |
D | hugetlbpage.c | 21 struct vm_area_struct *vma, in page_table_shareable() argument 34 vma->vm_flags != svma->vm_flags || in page_table_shareable() 41 static int vma_shareable(struct vm_area_struct *vma, unsigned long addr) in vma_shareable() argument 49 if (vma->vm_flags & VM_MAYSHARE && in vma_shareable() 50 vma->vm_start <= base && end <= vma->vm_end) in vma_shareable() 60 struct vm_area_struct *vma = find_vma(mm, addr); in huge_pmd_share() local 61 struct address_space *mapping = vma->vm_file->f_mapping; in huge_pmd_share() 62 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + in huge_pmd_share() 63 vma->vm_pgoff; in huge_pmd_share() 69 if (!vma_shareable(vma, addr)) in huge_pmd_share() [all …]
|
/arch/parisc/include/asm/ |
D | tlb.h | 9 #define tlb_start_vma(tlb, vma) \ argument 11 flush_cache_range(vma, vma->vm_start, vma->vm_end); \ 14 #define tlb_end_vma(tlb, vma) \ argument 16 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
|
/arch/sh/mm/ |
D | mmap.c | 32 struct vm_area_struct *vma; in arch_get_unmapped_area() local 58 vma = find_vma(mm, addr); in arch_get_unmapped_area() 60 (!vma || addr + len <= vma->vm_start)) in arch_get_unmapped_area() 77 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { in arch_get_unmapped_area() 91 if (likely(!vma || addr + len <= vma->vm_start)) { in arch_get_unmapped_area() 98 if (addr + mm->cached_hole_size < vma->vm_start) in arch_get_unmapped_area() 99 mm->cached_hole_size = vma->vm_start - addr; in arch_get_unmapped_area() 101 addr = vma->vm_end; in arch_get_unmapped_area()
|
/arch/arm/mm/ |
D | mmap.c | 31 struct vm_area_struct *vma; in arch_get_unmapped_area() local 71 vma = find_vma(mm, addr); in arch_get_unmapped_area() 73 (!vma || addr + len <= vma->vm_start)) in arch_get_unmapped_area() 89 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { in arch_get_unmapped_area() 103 if (!vma || addr + len <= vma->vm_start) { in arch_get_unmapped_area() 110 if (addr + mm->cached_hole_size < vma->vm_start) in arch_get_unmapped_area() 111 mm->cached_hole_size = vma->vm_start - addr; in arch_get_unmapped_area() 112 addr = vma->vm_end; in arch_get_unmapped_area()
|
D | fault-armv.c | 37 static int adjust_pte(struct vm_area_struct *vma, unsigned long address) in adjust_pte() argument 44 pgd = pgd_offset(vma->vm_mm, address); in adjust_pte() 70 flush_cache_page(vma, address, pfn); in adjust_pte() 75 set_pte_at(vma->vm_mm, address, pte, entry); in adjust_pte() 76 flush_tlb_page(vma, address); in adjust_pte() 95 make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigned long addr, unsign… in make_coherent() argument 97 struct mm_struct *mm = vma->vm_mm; in make_coherent() 104 pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT); in make_coherent() 118 if (mpnt->vm_mm != mm || mpnt == vma) in make_coherent() 127 adjust_pte(vma, addr); in make_coherent() [all …]
|
/arch/powerpc/platforms/cell/ |
D | spu_fault.c | 38 struct vm_area_struct *vma; in spu_handle_mm_fault() local 55 vma = find_vma(mm, ea); in spu_handle_mm_fault() 56 if (!vma) in spu_handle_mm_fault() 58 if (vma->vm_start <= ea) in spu_handle_mm_fault() 60 if (!(vma->vm_flags & VM_GROWSDOWN)) in spu_handle_mm_fault() 62 if (expand_stack(vma, ea)) in spu_handle_mm_fault() 67 if (!(vma->vm_flags & VM_WRITE)) in spu_handle_mm_fault() 72 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) in spu_handle_mm_fault() 76 *flt = handle_mm_fault(mm, vma, ea, is_write); in spu_handle_mm_fault()
|
/arch/sparc/mm/ |
D | hugetlbpage.c | 36 struct vm_area_struct * vma; in hugetlb_get_unmapped_area_bottomup() local 57 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { in hugetlb_get_unmapped_area_bottomup() 62 vma = find_vma(mm, VA_EXCLUDE_END); in hugetlb_get_unmapped_area_bottomup() 72 if (likely(!vma || addr + len <= vma->vm_start)) { in hugetlb_get_unmapped_area_bottomup() 79 if (addr + mm->cached_hole_size < vma->vm_start) in hugetlb_get_unmapped_area_bottomup() 80 mm->cached_hole_size = vma->vm_start - addr; in hugetlb_get_unmapped_area_bottomup() 82 addr = ALIGN(vma->vm_end, HPAGE_SIZE); in hugetlb_get_unmapped_area_bottomup() 92 struct vm_area_struct *vma; in hugetlb_get_unmapped_area_topdown() local 110 vma = find_vma(mm, addr-len); in hugetlb_get_unmapped_area_topdown() 111 if (!vma || addr <= vma->vm_start) { in hugetlb_get_unmapped_area_topdown() [all …]
|
/arch/m68k/include/asm/ |
D | cacheflush_mm.h | 96 static inline void flush_cache_range(struct vm_area_struct *vma, in flush_cache_range() argument 100 if (vma->vm_mm == current->mm) in flush_cache_range() 104 static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long… in flush_cache_page() argument 106 if (vma->vm_mm == current->mm) in flush_cache_page() 134 #define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page)) argument 136 extern void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, 140 static inline void copy_to_user_page(struct vm_area_struct *vma, in copy_to_user_page() argument 144 flush_cache_page(vma, vaddr, page_to_pfn(page)); in copy_to_user_page() 146 flush_icache_user_range(vma, page, vaddr, len); in copy_to_user_page() 148 static inline void copy_from_user_page(struct vm_area_struct *vma, in copy_from_user_page() argument [all …]
|
/arch/mips/include/asm/ |
D | tlbflush.h | 17 extern void local_flush_tlb_range(struct vm_area_struct *vma, 21 extern void local_flush_tlb_page(struct vm_area_struct *vma, 29 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long, 39 #define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, end) argument 42 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) argument
|