• Home
  • Raw
  • Download

Lines Matching refs:vma

41 static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd,  in lock_pte_protection()  argument
49 return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); in lock_pte_protection()
51 pmdl = pmd_lock(vma->vm_mm, pmd); in lock_pte_protection()
57 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); in lock_pte_protection()
62 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, in change_pte_range() argument
66 struct mm_struct *mm = vma->vm_mm; in change_pte_range()
71 pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl); in change_pte_range()
75 flush_tlb_batched_pending(vma->vm_mm); in change_pte_range()
90 page = vm_normal_page(vma, addr, oldpte); in change_pte_range()
107 !(vma->vm_flags & VM_SOFTDIRTY))) { in change_pte_range()
137 static inline unsigned long change_pmd_range(struct vm_area_struct *vma, in change_pmd_range() argument
142 struct mm_struct *mm = vma->vm_mm; in change_pmd_range()
164 split_huge_page_pmd(vma, addr, pmd); in change_pmd_range()
166 int nr_ptes = change_huge_pmd(vma, pmd, addr, in change_pmd_range()
181 this_pages = change_pte_range(vma, pmd, addr, next, newprot, in change_pmd_range()
194 static inline unsigned long change_pud_range(struct vm_area_struct *vma, in change_pud_range() argument
207 pages += change_pmd_range(vma, pud, addr, next, newprot, in change_pud_range()
214 static unsigned long change_protection_range(struct vm_area_struct *vma, in change_protection_range() argument
218 struct mm_struct *mm = vma->vm_mm; in change_protection_range()
226 flush_cache_range(vma, addr, end); in change_protection_range()
232 pages += change_pud_range(vma, pgd, addr, next, newprot, in change_protection_range()
238 flush_tlb_range(vma, start, end); in change_protection_range()
244 unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, in change_protection() argument
250 if (is_vm_hugetlb_page(vma)) in change_protection()
251 pages = hugetlb_change_protection(vma, start, end, newprot); in change_protection()
253 pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa); in change_protection()
279 static int prot_none_walk(struct vm_area_struct *vma, unsigned long start, in prot_none_walk() argument
295 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, in mprotect_fixup() argument
298 struct mm_struct *mm = vma->vm_mm; in mprotect_fixup()
299 unsigned long oldflags = vma->vm_flags; in mprotect_fixup()
307 *pprev = vma; in mprotect_fixup()
317 (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && in mprotect_fixup()
319 error = prot_none_walk(vma, start, end, newflags); in mprotect_fixup()
343 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); in mprotect_fixup()
345 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), in mprotect_fixup()
346 vma->vm_userfaultfd_ctx, vma_get_anon_name(vma)); in mprotect_fixup()
348 vma = *pprev; in mprotect_fixup()
352 *pprev = vma; in mprotect_fixup()
354 if (start != vma->vm_start) { in mprotect_fixup()
355 error = split_vma(mm, vma, start, 1); in mprotect_fixup()
360 if (end != vma->vm_end) { in mprotect_fixup()
361 error = split_vma(mm, vma, end, 0); in mprotect_fixup()
371 vma->vm_flags = newflags; in mprotect_fixup()
372 dirty_accountable = vma_wants_writenotify(vma); in mprotect_fixup()
373 vma_set_page_prot(vma); in mprotect_fixup()
375 change_protection(vma, start, end, vma->vm_page_prot, in mprotect_fixup()
384 populate_vma_page_range(vma, start, end, NULL); in mprotect_fixup()
387 vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); in mprotect_fixup()
388 vm_stat_account(mm, newflags, vma->vm_file, nrpages); in mprotect_fixup()
389 perf_event_mmap(vma); in mprotect_fixup()
401 struct vm_area_struct *vma, *prev; in SYSCALL_DEFINE3() local
430 vma = find_vma(current->mm, start); in SYSCALL_DEFINE3()
432 if (!vma) in SYSCALL_DEFINE3()
434 prev = vma->vm_prev; in SYSCALL_DEFINE3()
436 if (vma->vm_start >= end) in SYSCALL_DEFINE3()
438 start = vma->vm_start; in SYSCALL_DEFINE3()
440 if (!(vma->vm_flags & VM_GROWSDOWN)) in SYSCALL_DEFINE3()
443 if (vma->vm_start > start) in SYSCALL_DEFINE3()
446 end = vma->vm_end; in SYSCALL_DEFINE3()
448 if (!(vma->vm_flags & VM_GROWSUP)) in SYSCALL_DEFINE3()
452 if (start > vma->vm_start) in SYSCALL_DEFINE3()
453 prev = vma; in SYSCALL_DEFINE3()
461 newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); in SYSCALL_DEFINE3()
469 error = security_file_mprotect(vma, reqprot, prot); in SYSCALL_DEFINE3()
473 tmp = vma->vm_end; in SYSCALL_DEFINE3()
476 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); in SYSCALL_DEFINE3()
486 vma = prev->vm_next; in SYSCALL_DEFINE3()
487 if (!vma || vma->vm_start != nstart) { in SYSCALL_DEFINE3()