• Home
  • Raw
  • Download

Lines Matching +full:tlb +full:- +full:split

1 // SPDX-License-Identifier: GPL-2.0
13 #include <linux/page-isolation.h>
28 #include <linux/backing-dev.h>
35 #include <asm/tlb.h>
40 struct mmu_gather *tlb; member
45 * Any behaviour which results in changes to the vma->vm_flags needs to
71 /* Add 1 for NUL terminator at the end of the anon_name->name */ in anon_vma_name_alloc()
75 kref_init(&anon_name->kref); in anon_vma_name_alloc()
76 memcpy(anon_name->name, name, count); in anon_vma_name_alloc()
91 mmap_assert_locked(vma->vm_mm); in anon_vma_name()
93 if (vma->vm_file) in anon_vma_name()
96 return vma->anon_name; in anon_vma_name()
99 /* mmap_lock should be write-locked */
106 vma->anon_name = NULL; in replace_anon_vma_name()
114 vma->anon_name = anon_vma_name_reuse(anon_name); in replace_anon_vma_name()
124 return -EINVAL; in replace_anon_vma_name()
140 struct mm_struct *mm = vma->vm_mm; in madvise_update_vma()
144 if (new_flags == vma->vm_flags && anon_vma_name_eq(anon_vma_name(vma), anon_name)) { in madvise_update_vma()
149 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); in madvise_update_vma()
150 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, in madvise_update_vma()
151 vma->vm_file, pgoff, vma_policy(vma), in madvise_update_vma()
152 vma->vm_userfaultfd_ctx, anon_name); in madvise_update_vma()
160 if (start != vma->vm_start) { in madvise_update_vma()
161 if (unlikely(mm->map_count >= sysctl_max_map_count)) in madvise_update_vma()
162 return -ENOMEM; in madvise_update_vma()
168 if (end != vma->vm_end) { in madvise_update_vma()
169 if (unlikely(mm->map_count >= sysctl_max_map_count)) in madvise_update_vma()
170 return -ENOMEM; in madvise_update_vma()
180 vma->vm_flags = new_flags; in madvise_update_vma()
181 if (!vma->vm_file) { in madvise_update_vma()
195 struct vm_area_struct *vma = walk->private; in swapin_walk_pmd_entry()
207 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); in swapin_walk_pmd_entry()
208 pte = *(orig_pte + ((index - start) / PAGE_SIZE)); in swapin_walk_pmd_entry()
234 XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start)); in force_shm_swapin_readahead()
235 pgoff_t end_index = linear_page_index(vma, end + PAGE_SIZE - 1); in force_shm_swapin_readahead()
268 struct mm_struct *mm = vma->vm_mm; in madvise_willneed()
269 struct file *file = vma->vm_file; in madvise_willneed()
275 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma); in madvise_willneed()
280 if (shmem_mapping(file->f_mapping)) { in madvise_willneed()
282 file->f_mapping); in madvise_willneed()
287 return -EBADF; in madvise_willneed()
303 offset = (loff_t)(start - vma->vm_start) in madvise_willneed()
304 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); in madvise_willneed()
306 vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED); in madvise_willneed()
316 struct madvise_walk_private *private = walk->private; in madvise_cold_or_pageout_pte_range()
317 struct mmu_gather *tlb = private->tlb; in madvise_cold_or_pageout_pte_range() local
318 bool pageout = private->pageout; in madvise_cold_or_pageout_pte_range()
319 struct mm_struct *mm = tlb->mm; in madvise_cold_or_pageout_pte_range()
320 struct vm_area_struct *vma = walk->vma; in madvise_cold_or_pageout_pte_range()
327 return -EINTR; in madvise_cold_or_pageout_pte_range()
334 tlb_change_page_size(tlb, HPAGE_PMD_SIZE); in madvise_cold_or_pageout_pte_range()
355 if (next - addr != HPAGE_PMD_SIZE) { in madvise_cold_or_pageout_pte_range()
374 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); in madvise_cold_or_pageout_pte_range()
384 list_add(&page->lru, &page_list); in madvise_cold_or_pageout_pte_range()
399 tlb_change_page_size(tlb, PAGE_SIZE); in madvise_cold_or_pageout_pte_range()
400 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in madvise_cold_or_pageout_pte_range()
417 * Creating a THP page is expensive so split it only if we in madvise_cold_or_pageout_pte_range()
418 * are sure it's worth. Split it if we are only owner. in madvise_cold_or_pageout_pte_range()
438 pte--; in madvise_cold_or_pageout_pte_range()
439 addr -= PAGE_SIZE; in madvise_cold_or_pageout_pte_range()
451 tlb->fullmm); in madvise_cold_or_pageout_pte_range()
454 tlb_remove_tlb_entry(tlb, pte, addr); in madvise_cold_or_pageout_pte_range()
460 * As a side effect, it makes confuse idle-page tracking in madvise_cold_or_pageout_pte_range()
470 list_add(&page->lru, &page_list); in madvise_cold_or_pageout_pte_range()
489 static void madvise_cold_page_range(struct mmu_gather *tlb, in madvise_cold_page_range() argument
495 .tlb = tlb, in madvise_cold_page_range()
498 tlb_start_vma(tlb, vma); in madvise_cold_page_range()
499 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); in madvise_cold_page_range()
500 tlb_end_vma(tlb, vma); in madvise_cold_page_range()
507 struct mm_struct *mm = vma->vm_mm; in madvise_cold()
508 struct mmu_gather tlb; in madvise_cold() local
512 return -EINVAL; in madvise_cold()
515 tlb_gather_mmu(&tlb, mm, start_addr, end_addr); in madvise_cold()
516 madvise_cold_page_range(&tlb, vma, start_addr, end_addr); in madvise_cold()
517 tlb_finish_mmu(&tlb, start_addr, end_addr); in madvise_cold()
522 static void madvise_pageout_page_range(struct mmu_gather *tlb, in madvise_pageout_page_range() argument
528 .tlb = tlb, in madvise_pageout_page_range()
531 tlb_start_vma(tlb, vma); in madvise_pageout_page_range()
532 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); in madvise_pageout_page_range()
533 tlb_end_vma(tlb, vma); in madvise_pageout_page_range()
540 if (!vma->vm_file) in can_do_pageout()
543 * paging out pagecache only for non-anonymous mappings that correspond in can_do_pageout()
545 * otherwise we'd be including shared non-exclusive mappings, which in can_do_pageout()
548 return inode_owner_or_capable(file_inode(vma->vm_file)) || in can_do_pageout()
549 inode_permission(file_inode(vma->vm_file), MAY_WRITE) == 0; in can_do_pageout()
556 struct mm_struct *mm = vma->vm_mm; in madvise_pageout()
557 struct mmu_gather tlb; in madvise_pageout() local
561 return -EINVAL; in madvise_pageout()
567 tlb_gather_mmu(&tlb, mm, start_addr, end_addr); in madvise_pageout()
568 madvise_pageout_page_range(&tlb, vma, start_addr, end_addr); in madvise_pageout()
569 tlb_finish_mmu(&tlb, start_addr, end_addr); in madvise_pageout()
578 struct mmu_gather *tlb = walk->private; in madvise_free_pte_range() local
579 struct mm_struct *mm = tlb->mm; in madvise_free_pte_range()
580 struct vm_area_struct *vma = walk->vma; in madvise_free_pte_range()
589 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next)) in madvise_free_pte_range()
595 tlb_change_page_size(tlb, PAGE_SIZE); in madvise_free_pte_range()
606 * prevent swap-in which is more expensive rather than in madvise_free_pte_range()
615 nr_swap--; in madvise_free_pte_range()
617 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); in madvise_free_pte_range()
627 * is owned by only this process, split it and in madvise_free_pte_range()
648 pte--; in madvise_free_pte_range()
649 addr -= PAGE_SIZE; in madvise_free_pte_range()
678 * Some of architecture(ex, PPC) don't update TLB in madvise_free_pte_range()
684 tlb->fullmm); in madvise_free_pte_range()
689 tlb_remove_tlb_entry(tlb, pte, addr); in madvise_free_pte_range()
695 if (current->mm == mm) in madvise_free_pte_range()
714 struct mm_struct *mm = vma->vm_mm; in madvise_free_single_vma()
716 struct mmu_gather tlb; in madvise_free_single_vma() local
720 return -EINVAL; in madvise_free_single_vma()
722 range.start = max(vma->vm_start, start_addr); in madvise_free_single_vma()
723 if (range.start >= vma->vm_end) in madvise_free_single_vma()
724 return -EINVAL; in madvise_free_single_vma()
725 range.end = min(vma->vm_end, end_addr); in madvise_free_single_vma()
726 if (range.end <= vma->vm_start) in madvise_free_single_vma()
727 return -EINVAL; in madvise_free_single_vma()
732 tlb_gather_mmu(&tlb, mm, range.start, range.end); in madvise_free_single_vma()
736 tlb_start_vma(&tlb, vma); in madvise_free_single_vma()
737 walk_page_range(vma->vm_mm, range.start, range.end, in madvise_free_single_vma()
738 &madvise_free_walk_ops, &tlb); in madvise_free_single_vma()
739 tlb_end_vma(&tlb, vma); in madvise_free_single_vma()
741 tlb_finish_mmu(&tlb, range.start, range.end); in madvise_free_single_vma()
768 zap_page_range(vma, start, end - start); in madvise_dontneed_single_vma()
777 struct mm_struct *mm = vma->vm_mm; in madvise_dontneed_free()
781 return -EINVAL; in madvise_dontneed_free()
789 return -ENOMEM; in madvise_dontneed_free()
790 if (start < vma->vm_start) { in madvise_dontneed_free()
793 * with the lowest vma->vm_start where start in madvise_dontneed_free()
794 * is also < vma->vm_end. If start < in madvise_dontneed_free()
795 * vma->vm_start it means an hole materialized in madvise_dontneed_free()
800 return -ENOMEM; in madvise_dontneed_free()
803 return -EINVAL; in madvise_dontneed_free()
804 if (end > vma->vm_end) { in madvise_dontneed_free()
806 * Don't fail if end > vma->vm_end. If the old in madvise_dontneed_free()
814 * end-vma->vm_end range, but the manager can in madvise_dontneed_free()
817 end = vma->vm_end; in madvise_dontneed_free()
827 return -EINVAL; in madvise_dontneed_free()
841 struct mm_struct *mm = vma->vm_mm; in madvise_remove()
845 if (vma->vm_flags & VM_LOCKED) in madvise_remove()
846 return -EINVAL; in madvise_remove()
848 f = vma->vm_file; in madvise_remove()
850 if (!f || !f->f_mapping || !f->f_mapping->host) { in madvise_remove()
851 return -EINVAL; in madvise_remove()
854 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)) in madvise_remove()
855 return -EACCES; in madvise_remove()
857 offset = (loff_t)(start - vma->vm_start) in madvise_remove()
858 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); in madvise_remove()
873 offset, end - start); in madvise_remove()
891 unsigned long new_flags = vma->vm_flags; in madvise_vma_behavior()
918 if (vma->vm_flags & VM_IO) in madvise_vma_behavior()
919 return -EINVAL; in madvise_vma_behavior()
924 if (vma->vm_file || vma->vm_flags & VM_SHARED) in madvise_vma_behavior()
925 return -EINVAL; in madvise_vma_behavior()
936 return -EINVAL; in madvise_vma_behavior()
964 if (error == -ENOMEM) in madvise_vma_behavior()
965 error = -EAGAIN; in madvise_vma_behavior()
980 return -EPERM; in madvise_inject_error()
1014 /* Ensure that all poisoned pages are removed from per-cpu lists */ in madvise_inject_error()
1076 * original range will result in this function returning -ENOMEM while still
1094 * ranges, just ignore them, but return -ENOMEM at the end. in madvise_walk_vmas()
1095 * - different from the way of handling in mlock etc. in madvise_walk_vmas()
1098 if (vma && start > vma->vm_start) in madvise_walk_vmas()
1106 return -ENOMEM; in madvise_walk_vmas()
1108 /* Here start < (end|vma->vm_end). */ in madvise_walk_vmas()
1109 if (start < vma->vm_start) { in madvise_walk_vmas()
1110 unmapped_error = -ENOMEM; in madvise_walk_vmas()
1111 start = vma->vm_start; in madvise_walk_vmas()
1116 /* Here vma->vm_start <= start < (end|vma->vm_end) */ in madvise_walk_vmas()
1117 tmp = vma->vm_end; in madvise_walk_vmas()
1121 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */ in madvise_walk_vmas()
1126 if (prev && start < prev->vm_end) in madvise_walk_vmas()
1127 start = prev->vm_end; in madvise_walk_vmas()
1131 vma = prev->vm_next; in madvise_walk_vmas()
1148 if (vma->vm_file) in madvise_vma_anon_name()
1149 return -EBADF; in madvise_vma_anon_name()
1151 error = madvise_update_vma(vma, prev, start, end, vma->vm_flags, in madvise_vma_anon_name()
1158 if (error == -ENOMEM) in madvise_vma_anon_name()
1159 error = -EAGAIN; in madvise_vma_anon_name()
1170 return -EINVAL; in madvise_set_anon_name()
1173 /* Check to see whether len was rounded up from small -ve to zero */ in madvise_set_anon_name()
1175 return -EINVAL; in madvise_set_anon_name()
1179 return -EINVAL; in madvise_set_anon_name()
1193 * use appropriate read-ahead and caching techniques. The information
1198 * MADV_NORMAL - the default behavior is to read clusters. This
1199 * results in some read-ahead and read-behind.
1200 * MADV_RANDOM - the system should read the minimum amount of data
1201 * on any access, since it is unlikely that the appli-
1203 * MADV_SEQUENTIAL - pages in the given range will probably be accessed
1206 * MADV_WILLNEED - the application is notifying the system to read
1208 * MADV_DONTNEED - the application is finished with the given range,
1210 * MADV_FREE - the application marks pages in the given range as lazy free,
1212 * MADV_REMOVE - the application wants to free up the given range of
1214 * MADV_DONTFORK - omit this area from child's address space when forking:
1216 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
1217 * MADV_WIPEONFORK - present the child process with zero-filled memory in this
1219 * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK
1220 * MADV_HWPOISON - trigger memory error handler as if the given memory range
1222 * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
1223 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in
1225 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
1226 * MADV_HUGEPAGE - the application wants to back the given range by transparent
1229 * MADV_NOHUGEPAGE - mark the given range as not worth being backed by
1232 * MADV_DONTDUMP - the application wants to prevent pages in the given range
1234 * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump.
1235 * MADV_COLD - the application is not expected to use this memory soon,
1238 * MADV_PAGEOUT - the application is not expected to use this memory soon,
1242 * zero - success
1243 * -EINVAL - start + len < 0, start is not page-aligned,
1246 * or the specified address range includes file, Huge TLB,
1248 * -ENOMEM - addresses in the specified range are not currently
1250 * -EIO - an I/O error occurred while paging in data.
1251 * -EBADF - map exists, but area maps something that isn't a file.
1252 * -EAGAIN - a kernel resource was temporarily unavailable.
1265 return -EINVAL; in do_madvise()
1268 return -EINVAL; in do_madvise()
1271 /* Check to see whether len was rounded up from small -ve to zero */ in do_madvise()
1273 return -EINVAL; in do_madvise()
1277 return -EINVAL; in do_madvise()
1290 return -EINTR; in do_madvise()
1309 return do_madvise(current->mm, start, len_in, behavior); in SYSCALL_DEFINE3()
1326 ret = -EINVAL; in SYSCALL_DEFINE5()
1342 ret = -ESRCH; in SYSCALL_DEFINE5()
1347 ret = -EINVAL; in SYSCALL_DEFINE5()
1354 ret = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; in SYSCALL_DEFINE5()
1360 * only non-destructive hints are currently supported. in SYSCALL_DEFINE5()
1363 ret = -EPERM; in SYSCALL_DEFINE5()
1379 ret = total_len - iov_iter_count(&iter); in SYSCALL_DEFINE5()