/arch/metag/include/asm/ |
D | tlb.h | 11 #define tlb_start_vma(tlb, vma) \ argument 14 flush_cache_range(vma, vma->vm_start, vma->vm_end); \ 17 #define tlb_end_vma(tlb, vma) \ argument 20 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \ 26 #define tlb_start_vma(tlb, vma) do { } while (0) argument 27 #define tlb_end_vma(tlb, vma) do { } while (0) argument
|
/arch/xtensa/include/asm/ |
D | tlb.h | 21 # define tlb_start_vma(tlb,vma) do { } while (0) argument 22 # define tlb_end_vma(tlb,vma) do { } while (0) argument 26 # define tlb_start_vma(tlb, vma) \ argument 29 flush_cache_range(vma, vma->vm_start, vma->vm_end); \ 32 # define tlb_end_vma(tlb, vma) \ argument 35 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
|
/arch/m32r/include/asm/ |
D | cacheflush.h | 13 #define flush_cache_range(vma, start, end) do { } while (0) argument 14 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument 21 #define flush_icache_page(vma,pg) _flush_cache_copyback_all() argument 22 #define flush_icache_user_range(vma,pg,adr,len) _flush_cache_copyback_all() argument 27 #define flush_icache_page(vma,pg) smp_flush_cache_all() argument 28 #define flush_icache_user_range(vma,pg,adr,len) smp_flush_cache_all() argument 35 #define flush_cache_range(vma, start, end) do { } while (0) argument 36 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument 42 #define flush_icache_page(vma,pg) _flush_cache_all() argument 43 #define flush_icache_user_range(vma,pg,adr,len) _flush_cache_all() argument [all …]
|
D | tlbflush.h | 27 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) argument 28 #define flush_tlb_range(vma, start, end) \ argument 29 local_flush_tlb_range(vma, start, end) 34 #define flush_tlb_page(vma, vmaddr) do { } while (0) argument 35 #define flush_tlb_range(vma, start, end) do { } while (0) argument 46 #define flush_tlb_page(vma, page) smp_flush_tlb_page(vma, page) argument 47 #define flush_tlb_range(vma, start, end) \ argument 48 smp_flush_tlb_range(vma, start, end)
|
/arch/ia64/include/asm/ |
D | fb.h | 9 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, in fb_pgprotect() argument 12 if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start)) in fb_pgprotect() 13 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); in fb_pgprotect() 15 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in fb_pgprotect()
|
/arch/sparc/include/asm/ |
D | tlb_32.h | 4 #define tlb_start_vma(tlb, vma) \ argument 6 flush_cache_range(vma, vma->vm_start, vma->vm_end); \ 9 #define tlb_end_vma(tlb, vma) \ argument 11 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
|
D | cacheflush_64.h | 23 #define flush_cache_range(vma, start, end) \ argument 24 flush_cache_mm((vma)->vm_mm) 25 #define flush_cache_page(vma, page, pfn) \ argument 26 flush_cache_mm((vma)->vm_mm) 50 #define flush_icache_page(vma, pg) do { } while(0) argument 51 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) argument 57 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument 59 flush_cache_page(vma, vaddr, page_to_pfn(page)); \ 61 flush_ptrace_access(vma, page, vaddr, src, len, 0); \ 64 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument [all …]
|
D | cacheflush_32.h | 12 #define flush_cache_range(vma,start,end) \ argument 13 sparc32_cachetlb_ops->cache_range(vma, start, end) 14 #define flush_cache_page(vma,addr,pfn) \ argument 15 sparc32_cachetlb_ops->cache_page(vma, addr) 17 #define flush_icache_page(vma, pg) do { } while (0) argument 19 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) argument 21 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument 23 flush_cache_page(vma, vaddr, page_to_pfn(page));\ 26 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument 28 flush_cache_page(vma, vaddr, page_to_pfn(page));\
|
/arch/avr32/include/asm/ |
D | tlb.h | 11 #define tlb_start_vma(tlb, vma) \ argument 12 flush_cache_range(vma, vma->vm_start, vma->vm_end) 14 #define tlb_end_vma(tlb, vma) \ argument 15 flush_tlb_range(vma, vma->vm_start, vma->vm_end)
|
/arch/arc/kernel/ |
D | arc_hostlink.c | 21 static int arc_hl_mmap(struct file *fp, struct vm_area_struct *vma) in arc_hl_mmap() argument 23 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in arc_hl_mmap() 25 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in arc_hl_mmap() 26 vma->vm_end - vma->vm_start, in arc_hl_mmap() 27 vma->vm_page_prot)) { in arc_hl_mmap()
|
/arch/powerpc/include/asm/ |
D | tlbflush.h | 37 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 42 extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 49 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 54 #define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr) argument 57 #define flush_tlb_page_nohash(vma,addr) flush_tlb_page(vma,addr) argument 65 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 66 extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); 67 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 70 static inline void local_flush_tlb_page(struct vm_area_struct *vma, in local_flush_tlb_page() argument 73 flush_tlb_page(vma, vmaddr); in local_flush_tlb_page() [all …]
|
/arch/alpha/include/asm/ |
D | cacheflush.h | 10 #define flush_cache_range(vma, start, end) do { } while (0) argument 11 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument 50 flush_icache_user_range(struct vm_area_struct *vma, struct page *page, in flush_icache_user_range() argument 53 if (vma->vm_flags & VM_EXEC) { in flush_icache_user_range() 54 struct mm_struct *mm = vma->vm_mm; in flush_icache_user_range() 62 extern void flush_icache_user_range(struct vm_area_struct *vma, 67 #define flush_icache_page(vma, page) \ argument 68 flush_icache_user_range((vma), (page), 0, 0) 70 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument 72 flush_icache_user_range(vma, page, vaddr, len); \ [all …]
|
/arch/metag/mm/ |
D | hugetlbpage.c | 35 struct vm_area_struct *vma; in prepare_hugepage_range() local 44 vma = find_vma(mm, ALIGN_HUGEPT(addr)); in prepare_hugepage_range() 45 if (vma && !(vma->vm_flags & MAP_HUGETLB)) in prepare_hugepage_range() 48 vma = find_vma(mm, addr); in prepare_hugepage_range() 49 if (vma) { in prepare_hugepage_range() 50 if (addr + len > vma->vm_start) in prepare_hugepage_range() 52 if (!(vma->vm_flags & MAP_HUGETLB) && in prepare_hugepage_range() 53 (ALIGN_HUGEPT(addr + len) > vma->vm_start)) in prepare_hugepage_range() 137 struct vm_area_struct *vma; in hugetlb_get_unmapped_area_existing() local 151 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { in hugetlb_get_unmapped_area_existing() [all …]
|
/arch/arc/include/asm/ |
D | tlb.h | 39 #define tlb_start_vma(tlb, vma) argument 41 #define tlb_start_vma(tlb, vma) \ argument 44 flush_cache_range(vma, vma->vm_start, vma->vm_end); \ 48 #define tlb_end_vma(tlb, vma) \ argument 51 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
|
D | tlbflush.h | 16 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); 18 void local_flush_tlb_range(struct vm_area_struct *vma, 22 #define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e) argument 23 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) argument
|
/arch/parisc/include/asm/ |
D | tlb.h | 9 #define tlb_start_vma(tlb, vma) \ argument 11 flush_cache_range(vma, vma->vm_start, vma->vm_end); \ 14 #define tlb_end_vma(tlb, vma) \ argument 16 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
|
/arch/powerpc/platforms/cell/ |
D | spu_fault.c | 38 struct vm_area_struct *vma; in spu_handle_mm_fault() local 50 vma = find_vma(mm, ea); in spu_handle_mm_fault() 51 if (!vma) in spu_handle_mm_fault() 54 if (ea < vma->vm_start) { in spu_handle_mm_fault() 55 if (!(vma->vm_flags & VM_GROWSDOWN)) in spu_handle_mm_fault() 57 if (expand_stack(vma, ea)) in spu_handle_mm_fault() 63 if (!(vma->vm_flags & VM_WRITE)) in spu_handle_mm_fault() 68 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) in spu_handle_mm_fault() 73 *flt = handle_mm_fault(mm, vma, ea, is_write ? FAULT_FLAG_WRITE : 0); in spu_handle_mm_fault()
|
/arch/arm64/kernel/ |
D | sys_compat.c | 35 struct vm_area_struct *vma; in do_compat_cache_op() local 41 vma = find_vma(mm, start); in do_compat_cache_op() 42 if (vma && vma->vm_start < end) { in do_compat_cache_op() 43 if (start < vma->vm_start) in do_compat_cache_op() 44 start = vma->vm_start; in do_compat_cache_op() 45 if (end > vma->vm_end) in do_compat_cache_op() 46 end = vma->vm_end; in do_compat_cache_op()
|
/arch/sh/include/asm/ |
D | tlbflush.h | 15 extern void local_flush_tlb_range(struct vm_area_struct *vma, 18 extern void local_flush_tlb_page(struct vm_area_struct *vma, 30 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 32 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page); 40 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) argument 43 #define flush_tlb_range(vma, start, end) \ argument 44 local_flush_tlb_range(vma, start, end)
|
/arch/arm/mm/ |
D | fault-armv.c | 40 static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, in do_adjust_pte() argument 56 flush_cache_page(vma, address, pfn); in do_adjust_pte() 61 set_pte_at(vma->vm_mm, address, ptep, entry); in do_adjust_pte() 62 flush_tlb_page(vma, address); in do_adjust_pte() 92 static int adjust_pte(struct vm_area_struct *vma, unsigned long address, in adjust_pte() argument 102 pgd = pgd_offset(vma->vm_mm, address); in adjust_pte() 119 ptl = pte_lockptr(vma->vm_mm, pmd); in adjust_pte() 123 ret = do_adjust_pte(vma, address, pfn, pte); in adjust_pte() 132 make_coherent(struct address_space *mapping, struct vm_area_struct *vma, in make_coherent() argument 135 struct mm_struct *mm = vma->vm_mm; in make_coherent() [all …]
|
/arch/alpha/kernel/ |
D | pci-sysfs.c | 18 struct vm_area_struct *vma, in hose_mmap_page_range() argument 28 vma->vm_pgoff += base >> PAGE_SHIFT; in hose_mmap_page_range() 29 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; in hose_mmap_page_range() 31 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in hose_mmap_page_range() 32 vma->vm_end - vma->vm_start, in hose_mmap_page_range() 33 vma->vm_page_prot); in hose_mmap_page_range() 37 struct vm_area_struct *vma, int sparse) in __pci_mmap_fits() argument 42 nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; in __pci_mmap_fits() 43 start = vma->vm_pgoff; in __pci_mmap_fits() 66 struct vm_area_struct *vma, int sparse) in pci_mmap_resource() argument [all …]
|
/arch/mips/include/asm/ |
D | tlbflush.h | 17 extern void local_flush_tlb_range(struct vm_area_struct *vma, 21 extern void local_flush_tlb_page(struct vm_area_struct *vma, 29 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long, 39 #define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, end) argument 42 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) argument
|
/arch/m68k/include/asm/ |
D | fb.h | 11 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, in fb_pgprotect() argument 14 pgprot_val(vma->vm_page_prot) |= SUN3_PAGE_NOCACHE; in fb_pgprotect() 17 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, in fb_pgprotect() argument 21 pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE030; in fb_pgprotect() 23 pgprot_val(vma->vm_page_prot) &= _CACHEMASK040; in fb_pgprotect() 25 pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE_S; in fb_pgprotect()
|
/arch/score/include/asm/ |
D | cacheflush.h | 9 extern void flush_cache_range(struct vm_area_struct *vma, 11 extern void flush_cache_page(struct vm_area_struct *vma, 28 static inline void flush_icache_page(struct vm_area_struct *vma, in flush_icache_page() argument 31 if (vma->vm_flags & VM_EXEC) { in flush_icache_page() 38 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument 41 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument 44 if ((vma->vm_flags & VM_EXEC)) \ 45 flush_cache_page(vma, vaddr, page_to_pfn(page));\
|
/arch/tile/kernel/ |
D | tlb.c | 53 void flush_tlb_page_mm(struct vm_area_struct *vma, struct mm_struct *mm, in flush_tlb_page_mm() argument 56 unsigned long size = vma_kernel_pagesize(vma); in flush_tlb_page_mm() 57 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; in flush_tlb_page_mm() 62 void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) in flush_tlb_page() argument 64 flush_tlb_page_mm(vma, vma->vm_mm, va); in flush_tlb_page() 68 void flush_tlb_range(struct vm_area_struct *vma, in flush_tlb_range() argument 71 unsigned long size = vma_kernel_pagesize(vma); in flush_tlb_range() 72 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range() 73 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; in flush_tlb_range()
|