/arch/powerpc/mm/book3s64/ |
D | radix_hugetlbpage.c | 10 void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) in radix__flush_hugetlb_page() argument 13 struct hstate *hstate = hstate_file(vma->vm_file); in radix__flush_hugetlb_page() 16 radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); in radix__flush_hugetlb_page() 19 void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) in radix__local_flush_hugetlb_page() argument 22 struct hstate *hstate = hstate_file(vma->vm_file); in radix__local_flush_hugetlb_page() 25 radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); in radix__local_flush_hugetlb_page() 28 void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long start, in radix__flush_hugetlb_tlb_range() argument 32 struct hstate *hstate = hstate_file(vma->vm_file); in radix__flush_hugetlb_tlb_range() 39 radix__flush_tlb_pwc_range_psize(vma->vm_mm, start, end, psize); in radix__flush_hugetlb_tlb_range() 41 radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize); in radix__flush_hugetlb_tlb_range() [all …]
|
/arch/arc/include/asm/ |
D | tlbflush.h | 13 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); 15 void local_flush_tlb_range(struct vm_area_struct *vma, 18 void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, 23 #define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e) argument 24 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) argument 29 #define flush_pmd_tlb_range(vma, s, e) local_flush_pmd_tlb_range(vma, s, e) argument 32 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 34 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page); 39 extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
|
/arch/arc/kernel/ |
D | arc_hostlink.c | 18 static int arc_hl_mmap(struct file *fp, struct vm_area_struct *vma) in arc_hl_mmap() argument 20 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in arc_hl_mmap() 22 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in arc_hl_mmap() 23 vma->vm_end - vma->vm_start, in arc_hl_mmap() 24 vma->vm_page_prot)) { in arc_hl_mmap()
|
/arch/powerpc/include/asm/book3s/64/ |
D | tlbflush.h | 47 static inline void flush_pmd_tlb_range(struct vm_area_struct *vma, in flush_pmd_tlb_range() argument 51 radix__flush_pmd_tlb_range(vma, start, end); in flush_pmd_tlb_range() 55 static inline void flush_pud_tlb_range(struct vm_area_struct *vma, in flush_pud_tlb_range() argument 59 radix__flush_pud_tlb_range(vma, start, end); in flush_pud_tlb_range() 63 static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma, in flush_hugetlb_tlb_range() argument 68 radix__flush_hugetlb_tlb_range(vma, start, end); in flush_hugetlb_tlb_range() 71 static inline void flush_tlb_range(struct vm_area_struct *vma, in flush_tlb_range() argument 75 radix__flush_tlb_range(vma, start, end); in flush_tlb_range() 91 static inline void local_flush_tlb_page(struct vm_area_struct *vma, in local_flush_tlb_page() argument 95 radix__local_flush_tlb_page(vma, vmaddr); in local_flush_tlb_page() [all …]
|
/arch/x86/entry/vdso/ |
D | vma.c | 64 struct vm_area_struct *vma, struct vm_fault *vmf) in vdso_fault() argument 66 const struct vdso_image *image = vma->vm_mm->context.vdso_image; in vdso_fault() 115 struct vm_area_struct *vma; in vdso_join_timens() local 119 for_each_vma(vmi, vma) { in vdso_join_timens() 120 if (vma_is_special_mapping(vma, &vvar_mapping)) in vdso_join_timens() 121 zap_vma_pages(vma); in vdso_join_timens() 130 struct vm_area_struct *vma, struct vm_fault *vmf) in vvar_fault() argument 132 const struct vdso_image *image = vma->vm_mm->context.vdso_image; in vvar_fault() 153 struct page *timens_page = find_timens_vvar_page(vma); in vvar_fault() 175 err = vmf_insert_pfn(vma, addr, pfn); in vvar_fault() [all …]
|
/arch/arm/mm/ |
D | fault-armv.c | 36 static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, in do_adjust_pte() argument 52 flush_cache_page(vma, address, pfn); in do_adjust_pte() 57 set_pte_at(vma->vm_mm, address, ptep, entry); in do_adjust_pte() 58 flush_tlb_page(vma, address); in do_adjust_pte() 88 static int adjust_pte(struct vm_area_struct *vma, unsigned long address, in adjust_pte() argument 99 pgd = pgd_offset(vma->vm_mm, address); in adjust_pte() 120 pte = pte_offset_map_nolock(vma->vm_mm, pmd, address, &ptl); in adjust_pte() 126 ret = do_adjust_pte(vma, address, pfn, pte); in adjust_pte() 135 make_coherent(struct address_space *mapping, struct vm_area_struct *vma, in make_coherent() argument 138 struct mm_struct *mm = vma->vm_mm; in make_coherent() [all …]
|
D | flush.c | 79 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) in flush_cache_range() argument 82 vivt_flush_cache_range(vma, start, end); in flush_cache_range() 94 if (vma->vm_flags & VM_EXEC) in flush_cache_range() 98 void flush_cache_pages(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn, unsi… in flush_cache_pages() argument 101 vivt_flush_cache_pages(vma, user_addr, pfn, nr); in flush_cache_pages() 110 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) in flush_cache_pages() 159 void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, in flush_ptrace_access() argument 163 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) in flush_ptrace_access() 165 if (vma->vm_flags & VM_EXEC) in flush_ptrace_access() 185 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, in copy_to_user_page() argument [all …]
|
/arch/ia64/include/asm/ |
D | fb.h | 13 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, in fb_pgprotect() argument 16 if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start)) in fb_pgprotect() 17 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); in fb_pgprotect() 19 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in fb_pgprotect()
|
/arch/nios2/mm/ |
D | cacheflush.c | 77 struct vm_area_struct *vma; in flush_aliases() local 85 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) { in flush_aliases() 88 if (vma->vm_mm != mm) in flush_aliases() 90 if (!(vma->vm_flags & VM_MAYSHARE)) in flush_aliases() 93 start = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in flush_aliases() 94 flush_cache_range(vma, start, start + nr * PAGE_SIZE); in flush_aliases() 134 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, in flush_cache_range() argument 138 if (vma == NULL || (vma->vm_flags & VM_EXEC)) in flush_cache_range() 142 void flush_icache_pages(struct vm_area_struct *vma, struct page *page, in flush_icache_pages() argument 152 void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, in flush_cache_page() argument [all …]
|
/arch/sh/include/asm/ |
D | tlbflush.h | 16 extern void local_flush_tlb_range(struct vm_area_struct *vma, 19 extern void local_flush_tlb_page(struct vm_area_struct *vma, 31 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 33 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page); 41 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) argument 44 #define flush_tlb_range(vma, start, end) \ argument 45 local_flush_tlb_range(vma, start, end)
|
/arch/arm/include/asm/ |
D | cacheflush.h | 170 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument 225 vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) in vivt_flush_cache_range() argument 227 struct mm_struct *mm = vma->vm_mm; in vivt_flush_cache_range() 231 vma->vm_flags); in vivt_flush_cache_range() 234 static inline void vivt_flush_cache_pages(struct vm_area_struct *vma, in vivt_flush_cache_pages() argument 237 struct mm_struct *mm = vma->vm_mm; in vivt_flush_cache_pages() 242 vma->vm_flags); in vivt_flush_cache_pages() 249 #define flush_cache_range(vma,start,end) \ argument 250 vivt_flush_cache_range(vma,start,end) 251 #define flush_cache_pages(vma, addr, pfn, nr) \ argument [all …]
|
/arch/parisc/kernel/ |
D | cache.c | 327 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, in __flush_cache_page() argument 342 flush_tlb_page(vma, vmaddr); in __flush_cache_page() 346 if (vma->vm_flags & VM_EXEC) in __flush_cache_page() 389 void flush_icache_pages(struct vm_area_struct *vma, struct page *page, in flush_icache_pages() argument 472 struct vm_area_struct *vma; in flush_dcache_folio() local 502 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) { in flush_dcache_folio() 503 unsigned long offset = pgoff - vma->vm_pgoff; in flush_dcache_folio() 506 addr = vma->vm_start; in flush_dcache_folio() 514 if (addr + nr * PAGE_SIZE > vma->vm_end) in flush_dcache_folio() 515 nr = (vma->vm_end - addr) / PAGE_SIZE; in flush_dcache_folio() [all …]
|
/arch/alpha/kernel/ |
D | pci-sysfs.c | 19 struct vm_area_struct *vma, in hose_mmap_page_range() argument 29 vma->vm_pgoff += base >> PAGE_SHIFT; in hose_mmap_page_range() 31 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in hose_mmap_page_range() 32 vma->vm_end - vma->vm_start, in hose_mmap_page_range() 33 vma->vm_page_prot); in hose_mmap_page_range() 37 struct vm_area_struct *vma, int sparse) in __pci_mmap_fits() argument 42 nr = vma_pages(vma); in __pci_mmap_fits() 43 start = vma->vm_pgoff; in __pci_mmap_fits() 68 struct vm_area_struct *vma, int sparse) in pci_mmap_resource() argument 85 if (!__pci_mmap_fits(pdev, i, vma, sparse)) in pci_mmap_resource() [all …]
|
/arch/mips/include/asm/ |
D | tlbflush.h | 17 extern void local_flush_tlb_range(struct vm_area_struct *vma, 21 extern void local_flush_tlb_page(struct vm_area_struct *vma, 31 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long, 41 #define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, end) argument 44 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) argument
|
/arch/parisc/mm/ |
D | fault.c | 244 struct vm_area_struct *vma) in show_signal_msg() argument 258 vma ? ',':'\n'); in show_signal_msg() 260 if (vma) in show_signal_msg() 262 vma->vm_start, vma->vm_end); in show_signal_msg() 270 struct vm_area_struct *vma, *prev_vma; in do_page_fault() local 295 vma = find_vma_prev(mm, address, &prev_vma); in do_page_fault() 296 if (!vma || address < vma->vm_start) { in do_page_fault() 299 vma = expand_stack(mm, address); in do_page_fault() 300 if (!vma) in do_page_fault() 309 if ((vma->vm_flags & acc_type) != acc_type) in do_page_fault() [all …]
|
/arch/sparc/include/asm/ |
D | cacheflush_64.h | 24 #define flush_cache_range(vma, start, end) \ argument 25 flush_cache_mm((vma)->vm_mm) 26 #define flush_cache_page(vma, page, pfn) \ argument 27 flush_cache_mm((vma)->vm_mm) 60 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument 62 flush_cache_page(vma, vaddr, page_to_pfn(page)); \ 64 flush_ptrace_access(vma, page, vaddr, src, len, 0); \ 67 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument 69 flush_cache_page(vma, vaddr, page_to_pfn(page)); \ 71 flush_ptrace_access(vma, page, vaddr, dst, len, 1); \
|
D | cacheflush_32.h | 14 #define flush_cache_range(vma,start,end) \ argument 15 sparc32_cachetlb_ops->cache_range(vma, start, end) 16 #define flush_cache_page(vma,addr,pfn) \ argument 17 sparc32_cachetlb_ops->cache_page(vma, addr) 20 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument 22 flush_cache_page(vma, vaddr, page_to_pfn(page));\ 25 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument 27 flush_cache_page(vma, vaddr, page_to_pfn(page));\
|
/arch/s390/kernel/ |
D | vdso.c | 58 struct vm_area_struct *vma; in vdso_join_timens() local 61 for_each_vma(vmi, vma) { in vdso_join_timens() 62 if (!vma_is_special_mapping(vma, &vvar_mapping)) in vdso_join_timens() 64 zap_vma_pages(vma); in vdso_join_timens() 73 struct vm_area_struct *vma, struct vm_fault *vmf) in vvar_fault() argument 75 struct page *timens_page = find_timens_vvar_page(vma); in vvar_fault() 88 err = vmf_insert_pfn(vma, addr, pfn); in vvar_fault() 111 return vmf_insert_pfn(vma, vmf->address, pfn); in vvar_fault() 115 struct vm_area_struct *vma) in vdso_mremap() argument 117 current->mm->context.vdso_base = vma->vm_start; in vdso_mremap() [all …]
|
/arch/loongarch/kernel/ |
D | vdso.c | 51 struct vm_area_struct *vma, struct vm_fault *vmf) in vvar_fault() argument 54 struct page *timens_page = find_timens_vvar_page(vma); in vvar_fault() 84 return vmf_insert_pfn(vma, vmf->address, pfn); in vvar_fault() 136 struct vm_area_struct *vma; in vdso_join_timens() local 141 for_each_vma(vmi, vma) { in vdso_join_timens() 142 if (vma_is_special_mapping(vma, &vdso_info.data_mapping)) in vdso_join_timens() 143 zap_vma_pages(vma); in vdso_join_timens() 168 struct vm_area_struct *vma; in arch_setup_additional_pages() local 186 vma = _install_special_mapping(mm, data_addr, VVAR_SIZE, in arch_setup_additional_pages() 189 if (IS_ERR(vma)) { in arch_setup_additional_pages() [all …]
|
/arch/loongarch/include/asm/ |
D | tlbflush.h | 23 extern void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long en… 25 extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); 32 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long, unsigned long); 41 #define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, end) argument 43 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) argument
|
/arch/alpha/include/asm/ |
D | tlbflush.h | 39 struct vm_area_struct *vma, in ev4_flush_tlb_current_page() argument 43 if (vma->vm_flags & VM_EXEC) { in ev4_flush_tlb_current_page() 52 struct vm_area_struct *vma, in ev5_flush_tlb_current_page() argument 55 if (vma->vm_flags & VM_EXEC) in ev5_flush_tlb_current_page() 117 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) in flush_tlb_page() argument 119 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page() 122 flush_tlb_current_page(mm, vma, addr); in flush_tlb_page() 130 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, in flush_tlb_range() argument 133 flush_tlb_mm(vma->vm_mm); in flush_tlb_range()
|
/arch/arm64/include/asm/ |
D | hugetlb.h | 33 extern int huge_ptep_set_access_flags(struct vm_area_struct *vma, 43 extern pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, 54 extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, 58 extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, 65 static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma, in flush_hugetlb_tlb_range() argument 69 unsigned long stride = huge_page_size(hstate_vma(vma)); in flush_hugetlb_tlb_range() 72 __flush_tlb_range(vma, start, end, stride, false, 2); in flush_hugetlb_tlb_range() 74 __flush_tlb_range(vma, start, end, stride, false, 1); in flush_hugetlb_tlb_range() 76 __flush_tlb_range(vma, start, end, PAGE_SIZE, false, 0); in flush_hugetlb_tlb_range()
|
/arch/powerpc/mm/ |
D | fault.c | 88 struct vm_area_struct *vma) in bad_access_pkey() argument 110 pkey = vma_pkey(vma); in bad_access_pkey() 231 struct vm_area_struct *vma) in access_pkey_error() argument 238 if (!arch_vma_access_permitted(vma, is_write, is_exec, 0)) in access_pkey_error() 244 static bool access_error(bool is_write, bool is_exec, struct vm_area_struct *vma) in access_error() argument 257 return !(vma->vm_flags & VM_EXEC) && in access_error() 259 !(vma->vm_flags & (VM_READ | VM_WRITE))); in access_error() 263 if (unlikely(!(vma->vm_flags & VM_WRITE))) in access_error() 273 if (unlikely(!vma_is_accessible(vma))) in access_error() 276 if (unlikely(radix_enabled() && ((vma->vm_flags & VM_ACCESS_FLAGS) == VM_EXEC))) in access_error() [all …]
|
/arch/nios2/kernel/ |
D | sys_nios2.c | 24 struct vm_area_struct *vma; in sys_cacheflush() local 45 vma = find_vma(mm, addr); in sys_cacheflush() 46 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) { in sys_cacheflush() 51 flush_cache_range(vma, addr, addr + len); in sys_cacheflush()
|
/arch/powerpc/include/asm/ |
D | fb.h | 9 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, in fb_pgprotect() argument 12 vma->vm_page_prot = phys_mem_access_prot(file, off >> PAGE_SHIFT, in fb_pgprotect() 13 vma->vm_end - vma->vm_start, in fb_pgprotect() 14 vma->vm_page_prot); in fb_pgprotect()
|