Home
last modified time | relevance | path

Searched refs:vma (Results 1 – 25 of 343) sorted by relevance

12345678910>>...14

/arch/powerpc/include/asm/book3s/64/
Dtlbflush.h50 static inline void flush_pmd_tlb_range(struct vm_area_struct *vma, in flush_pmd_tlb_range() argument
54 return radix__flush_pmd_tlb_range(vma, start, end); in flush_pmd_tlb_range()
55 return hash__flush_tlb_range(vma, start, end); in flush_pmd_tlb_range()
59 static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma, in flush_hugetlb_tlb_range() argument
64 return radix__flush_hugetlb_tlb_range(vma, start, end); in flush_hugetlb_tlb_range()
65 return hash__flush_tlb_range(vma, start, end); in flush_hugetlb_tlb_range()
68 static inline void flush_tlb_range(struct vm_area_struct *vma, in flush_tlb_range() argument
72 return radix__flush_tlb_range(vma, start, end); in flush_tlb_range()
73 return hash__flush_tlb_range(vma, start, end); in flush_tlb_range()
91 static inline void local_flush_tlb_page(struct vm_area_struct *vma, in local_flush_tlb_page() argument
[all …]
/arch/arc/include/asm/
Dtlbflush.h13 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
15 void local_flush_tlb_range(struct vm_area_struct *vma,
18 void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
23 #define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e) argument
24 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) argument
29 #define flush_pmd_tlb_range(vma, s, e) local_flush_pmd_tlb_range(vma, s, e) argument
32 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
34 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
39 extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
/arch/powerpc/mm/book3s64/
Dradix_hugetlbpage.c12 void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) in radix__flush_hugetlb_page() argument
15 struct hstate *hstate = hstate_file(vma->vm_file); in radix__flush_hugetlb_page()
18 radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); in radix__flush_hugetlb_page()
21 void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) in radix__local_flush_hugetlb_page() argument
24 struct hstate *hstate = hstate_file(vma->vm_file); in radix__local_flush_hugetlb_page()
27 radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); in radix__local_flush_hugetlb_page()
30 void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long start, in radix__flush_hugetlb_tlb_range() argument
34 struct hstate *hstate = hstate_file(vma->vm_file); in radix__flush_hugetlb_tlb_range()
37 radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize); in radix__flush_hugetlb_tlb_range()
51 struct vm_area_struct *vma; in radix__hugetlb_get_unmapped_area() local
[all …]
/arch/ia64/include/asm/
Dfb.h10 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, in fb_pgprotect() argument
13 if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start)) in fb_pgprotect()
14 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); in fb_pgprotect()
16 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in fb_pgprotect()
Dcacheflush.h23 #define flush_cache_range(vma, start, end) do { } while (0) argument
24 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument
25 #define flush_icache_page(vma,page) do { } while (0) argument
42 #define flush_icache_user_range(vma, page, user_addr, len) \ argument
48 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument
50 flush_icache_user_range(vma, page, vaddr, len); \
52 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
/arch/csky/include/asm/
Dtlb.h9 #define tlb_start_vma(tlb, vma) \ argument
12 flush_cache_range(vma, (vma)->vm_start, (vma)->vm_end); \
15 #define tlb_end_vma(tlb, vma) \ argument
18 flush_tlb_range(vma, (vma)->vm_start, (vma)->vm_end); \
/arch/arc/kernel/
Darc_hostlink.c18 static int arc_hl_mmap(struct file *fp, struct vm_area_struct *vma) in arc_hl_mmap() argument
20 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in arc_hl_mmap()
22 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in arc_hl_mmap()
23 vma->vm_end - vma->vm_start, in arc_hl_mmap()
24 vma->vm_page_prot)) { in arc_hl_mmap()
/arch/alpha/include/asm/
Dcacheflush.h11 #define flush_cache_range(vma, start, end) do { } while (0) argument
12 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument
51 flush_icache_user_range(struct vm_area_struct *vma, struct page *page, in flush_icache_user_range() argument
54 if (vma->vm_flags & VM_EXEC) { in flush_icache_user_range()
55 struct mm_struct *mm = vma->vm_mm; in flush_icache_user_range()
63 extern void flush_icache_user_range(struct vm_area_struct *vma,
68 #define flush_icache_page(vma, page) \ argument
69 flush_icache_user_range((vma), (page), 0, 0)
71 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument
73 flush_icache_user_range(vma, page, vaddr, len); \
[all …]
Dtlbflush.h40 struct vm_area_struct *vma, in ev4_flush_tlb_current_page() argument
44 if (vma->vm_flags & VM_EXEC) { in ev4_flush_tlb_current_page()
53 struct vm_area_struct *vma, in ev5_flush_tlb_current_page() argument
56 if (vma->vm_flags & VM_EXEC) in ev5_flush_tlb_current_page()
118 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) in flush_tlb_page() argument
120 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page()
123 flush_tlb_current_page(mm, vma, addr); in flush_tlb_page()
131 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, in flush_tlb_range() argument
134 flush_tlb_mm(vma->vm_mm); in flush_tlb_range()
/arch/parisc/kernel/
Dcache.c87 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) in update_mmu_cache() argument
304 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, in __flush_cache_page() argument
309 if (vma->vm_flags & VM_EXEC) in __flush_cache_page()
315 __purge_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, in __purge_cache_page() argument
320 if (vma->vm_flags & VM_EXEC) in __purge_cache_page()
524 struct vm_area_struct *vma; in mm_total_size() local
527 for (vma = mm->mmap; vma; vma = vma->vm_next) in mm_total_size()
528 usize += vma->vm_end - vma->vm_start; in mm_total_size()
549 struct vm_area_struct *vma; in flush_cache_mm() local
563 for (vma = mm->mmap; vma; vma = vma->vm_next) { in flush_cache_mm()
[all …]
/arch/sparc/include/asm/
Dcacheflush_64.h24 #define flush_cache_range(vma, start, end) \ argument
25 flush_cache_mm((vma)->vm_mm)
26 #define flush_cache_page(vma, page, pfn) \ argument
27 flush_cache_mm((vma)->vm_mm)
51 #define flush_icache_page(vma, pg) do { } while(0) argument
52 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) argument
58 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument
60 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
62 flush_ptrace_access(vma, page, vaddr, src, len, 0); \
65 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
[all …]
Dcacheflush_32.h13 #define flush_cache_range(vma,start,end) \ argument
14 sparc32_cachetlb_ops->cache_range(vma, start, end)
15 #define flush_cache_page(vma,addr,pfn) \ argument
16 sparc32_cachetlb_ops->cache_page(vma, addr)
18 #define flush_icache_page(vma, pg) do { } while (0) argument
20 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) argument
22 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument
24 flush_cache_page(vma, vaddr, page_to_pfn(page));\
27 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
29 flush_cache_page(vma, vaddr, page_to_pfn(page));\
/arch/arm/mm/
Dfault-armv.c37 static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, in do_adjust_pte() argument
53 flush_cache_page(vma, address, pfn); in do_adjust_pte()
58 set_pte_at(vma->vm_mm, address, ptep, entry); in do_adjust_pte()
59 flush_tlb_page(vma, address); in do_adjust_pte()
89 static int adjust_pte(struct vm_area_struct *vma, unsigned long address, in adjust_pte() argument
99 pgd = pgd_offset(vma->vm_mm, address); in adjust_pte()
116 ptl = pte_lockptr(vma->vm_mm, pmd); in adjust_pte()
120 ret = do_adjust_pte(vma, address, pfn, pte); in adjust_pte()
129 make_coherent(struct address_space *mapping, struct vm_area_struct *vma, in make_coherent() argument
132 struct mm_struct *mm = vma->vm_mm; in make_coherent()
[all …]
/arch/sh/include/asm/
Dtlbflush.h16 extern void local_flush_tlb_range(struct vm_area_struct *vma,
19 extern void local_flush_tlb_page(struct vm_area_struct *vma,
31 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
33 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
41 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) argument
44 #define flush_tlb_range(vma, start, end) \ argument
45 local_flush_tlb_range(vma, start, end)
/arch/x86/entry/vdso/
Dvma.c43 struct vm_area_struct *vma, struct vm_fault *vmf) in vdso_fault() argument
45 const struct vdso_image *image = vma->vm_mm->context.vdso_image; in vdso_fault()
88 struct vm_area_struct *vma, struct vm_fault *vmf) in vvar_fault() argument
90 const struct vdso_image *image = vma->vm_mm->context.vdso_image; in vvar_fault()
110 return vmf_insert_pfn(vma, vmf->address, in vvar_fault()
116 return vmf_insert_pfn_prot(vma, vmf->address, in vvar_fault()
118 pgprot_decrypted(vma->vm_page_prot)); in vvar_fault()
124 return vmf_insert_pfn(vma, vmf->address, in vvar_fault()
149 struct vm_area_struct *vma; in map_vdso() local
168 vma = _install_special_mapping(mm, in map_vdso()
[all …]
/arch/parisc/mm/
Dfault.c236 struct vm_area_struct *vma) in show_signal_msg() argument
250 vma ? ',':'\n'); in show_signal_msg()
252 if (vma) in show_signal_msg()
254 vma->vm_start, vma->vm_end); in show_signal_msg()
262 struct vm_area_struct *vma, *prev_vma; in do_page_fault() local
286 vma = find_vma_prev(mm, address, &prev_vma); in do_page_fault()
287 if (!vma || address < vma->vm_start) in do_page_fault()
296 if ((vma->vm_flags & acc_type) != acc_type) in do_page_fault()
305 fault = handle_mm_fault(vma, address, flags); in do_page_fault()
346 vma = prev_vma; in do_page_fault()
[all …]
/arch/ia64/mm/
Dfault.c64 struct vm_area_struct *vma, *prev_vma; in ia64_do_page_fault() local
107 vma = find_vma_prev(mm, address, &prev_vma); in ia64_do_page_fault()
108 if (!vma && !prev_vma ) in ia64_do_page_fault()
118 if (( !vma && prev_vma ) || (address < vma->vm_start) ) in ia64_do_page_fault()
131 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE)))) in ia64_do_page_fault()
134 if ((vma->vm_flags & mask) != mask) in ia64_do_page_fault()
142 fault = handle_mm_fault(vma, address, flags); in ia64_do_page_fault()
187 if (!vma) in ia64_do_page_fault()
189 if (!(vma->vm_flags & VM_GROWSDOWN)) in ia64_do_page_fault()
191 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) in ia64_do_page_fault()
[all …]
/arch/mips/include/asm/
Dtlbflush.h17 extern void local_flush_tlb_range(struct vm_area_struct *vma,
21 extern void local_flush_tlb_page(struct vm_area_struct *vma,
31 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long,
41 #define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, end) argument
44 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) argument
/arch/arm/include/asm/
Dcacheflush.h170 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
225 vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) in vivt_flush_cache_range() argument
227 struct mm_struct *mm = vma->vm_mm; in vivt_flush_cache_range()
231 vma->vm_flags); in vivt_flush_cache_range()
235 vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) in vivt_flush_cache_page() argument
237 struct mm_struct *mm = vma->vm_mm; in vivt_flush_cache_page()
241 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); in vivt_flush_cache_page()
248 #define flush_cache_range(vma,start,end) \ argument
249 vivt_flush_cache_range(vma,start,end)
250 #define flush_cache_page(vma,addr,pfn) \ argument
[all …]
/arch/nios2/kernel/
Dsys_nios2.c24 struct vm_area_struct *vma; in sys_cacheflush() local
41 vma = find_vma(current->mm, addr); in sys_cacheflush()
42 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) in sys_cacheflush()
45 flush_cache_range(vma, addr, addr + len); in sys_cacheflush()
/arch/alpha/kernel/
Dpci-sysfs.c19 struct vm_area_struct *vma, in hose_mmap_page_range() argument
29 vma->vm_pgoff += base >> PAGE_SHIFT; in hose_mmap_page_range()
31 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in hose_mmap_page_range()
32 vma->vm_end - vma->vm_start, in hose_mmap_page_range()
33 vma->vm_page_prot); in hose_mmap_page_range()
37 struct vm_area_struct *vma, int sparse) in __pci_mmap_fits() argument
42 nr = vma_pages(vma); in __pci_mmap_fits()
43 start = vma->vm_pgoff; in __pci_mmap_fits()
66 struct vm_area_struct *vma, int sparse) in pci_mmap_resource() argument
83 if (!__pci_mmap_fits(pdev, i, vma, sparse)) in pci_mmap_resource()
[all …]
/arch/m68k/include/asm/
Dfb.h12 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, in fb_pgprotect() argument
15 pgprot_val(vma->vm_page_prot) |= SUN3_PAGE_NOCACHE; in fb_pgprotect()
18 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, in fb_pgprotect() argument
22 pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE030; in fb_pgprotect()
24 pgprot_val(vma->vm_page_prot) &= _CACHEMASK040; in fb_pgprotect()
26 pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE_S; in fb_pgprotect()
/arch/csky/abiv2/inc/abi/
Dcacheflush.h17 #define flush_cache_range(vma, start, end) \ argument
19 if (vma->vm_flags & VM_EXEC) \
23 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument
31 void flush_icache_page(struct vm_area_struct *vma, struct page *page);
32 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
38 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument
43 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
/arch/hexagon/include/asm/
Dcacheflush.h31 #define flush_cache_range(vma, start, end) do { } while (0) argument
32 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument
37 #define flush_icache_page(vma, pg) do { } while (0) argument
38 #define flush_icache_user_range(vma, pg, adr, len) do { } while (0) argument
73 static inline void update_mmu_cache(struct vm_area_struct *vma, in update_mmu_cache() argument
79 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
82 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
/arch/powerpc/include/asm/
Dfb.h9 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, in fb_pgprotect() argument
12 vma->vm_page_prot = phys_mem_access_prot(file, off >> PAGE_SHIFT, in fb_pgprotect()
13 vma->vm_end - vma->vm_start, in fb_pgprotect()
14 vma->vm_page_prot); in fb_pgprotect()

12345678910>>...14