Lines Matching refs:vma
137 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) in subpool_vma() argument
139 return subpool_inode(file_inode(vma->vm_file)); in subpool_vma()
327 struct vm_area_struct *vma, unsigned long address) in vma_hugecache_offset() argument
329 return ((address - vma->vm_start) >> huge_page_shift(h)) + in vma_hugecache_offset()
330 (vma->vm_pgoff >> huge_page_order(h)); in vma_hugecache_offset()
333 pgoff_t linear_hugepage_index(struct vm_area_struct *vma, in linear_hugepage_index() argument
336 return vma_hugecache_offset(hstate_vma(vma), vma, address); in linear_hugepage_index()
343 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) in vma_kernel_pagesize() argument
347 if (!is_vm_hugetlb_page(vma)) in vma_kernel_pagesize()
350 hstate = hstate_vma(vma); in vma_kernel_pagesize()
363 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) in vma_mmu_pagesize() argument
365 return vma_kernel_pagesize(vma); in vma_mmu_pagesize()
397 static unsigned long get_vma_private_data(struct vm_area_struct *vma) in get_vma_private_data() argument
399 return (unsigned long)vma->vm_private_data; in get_vma_private_data()
402 static void set_vma_private_data(struct vm_area_struct *vma, in set_vma_private_data() argument
405 vma->vm_private_data = (void *)value; in set_vma_private_data()
435 static struct resv_map *vma_resv_map(struct vm_area_struct *vma) in vma_resv_map() argument
437 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); in vma_resv_map()
438 if (vma->vm_flags & VM_MAYSHARE) { in vma_resv_map()
439 struct address_space *mapping = vma->vm_file->f_mapping; in vma_resv_map()
445 return (struct resv_map *)(get_vma_private_data(vma) & in vma_resv_map()
450 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) in set_vma_resv_map() argument
452 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); in set_vma_resv_map()
453 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); in set_vma_resv_map()
455 set_vma_private_data(vma, (get_vma_private_data(vma) & in set_vma_resv_map()
459 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) in set_vma_resv_flags() argument
461 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); in set_vma_resv_flags()
462 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); in set_vma_resv_flags()
464 set_vma_private_data(vma, get_vma_private_data(vma) | flags); in set_vma_resv_flags()
467 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) in is_vma_resv_set() argument
469 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); in is_vma_resv_set()
471 return (get_vma_private_data(vma) & flag) != 0; in is_vma_resv_set()
475 void reset_vma_resv_huge_pages(struct vm_area_struct *vma) in reset_vma_resv_huge_pages() argument
477 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); in reset_vma_resv_huge_pages()
478 if (!(vma->vm_flags & VM_MAYSHARE)) in reset_vma_resv_huge_pages()
479 vma->vm_private_data = (void *)0; in reset_vma_resv_huge_pages()
483 static int vma_has_reserves(struct vm_area_struct *vma, long chg) in vma_has_reserves() argument
485 if (vma->vm_flags & VM_NORESERVE) { in vma_has_reserves()
495 if (vma->vm_flags & VM_MAYSHARE && chg == 0) in vma_has_reserves()
502 if (vma->vm_flags & VM_MAYSHARE) in vma_has_reserves()
509 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) in vma_has_reserves()
553 struct vm_area_struct *vma, in dequeue_huge_page_vma() argument
570 if (!vma_has_reserves(vma, chg) && in dequeue_huge_page_vma()
580 zonelist = huge_zonelist(vma, address, in dequeue_huge_page_vma()
590 if (!vma_has_reserves(vma, chg)) in dequeue_huge_page_vma()
1358 struct vm_area_struct *vma, unsigned long addr) in vma_needs_reservation() argument
1364 resv = vma_resv_map(vma); in vma_needs_reservation()
1368 idx = vma_hugecache_offset(h, vma, addr); in vma_needs_reservation()
1371 if (vma->vm_flags & VM_MAYSHARE) in vma_needs_reservation()
1377 struct vm_area_struct *vma, unsigned long addr) in vma_commit_reservation() argument
1382 resv = vma_resv_map(vma); in vma_commit_reservation()
1386 idx = vma_hugecache_offset(h, vma, addr); in vma_commit_reservation()
1390 static struct page *alloc_huge_page(struct vm_area_struct *vma, in alloc_huge_page() argument
1393 struct hugepage_subpool *spool = subpool_vma(vma); in alloc_huge_page()
1394 struct hstate *h = hstate_vma(vma); in alloc_huge_page()
1409 chg = vma_needs_reservation(h, vma, addr); in alloc_huge_page()
1421 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg); in alloc_huge_page()
1437 vma_commit_reservation(h, vma, addr); in alloc_huge_page()
1453 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma, in alloc_huge_page_noerr() argument
1456 struct page *page = alloc_huge_page(vma, addr, avoid_reserve); in alloc_huge_page_noerr()
2464 static void hugetlb_vm_op_open(struct vm_area_struct *vma) in hugetlb_vm_op_open() argument
2466 struct resv_map *resv = vma_resv_map(vma); in hugetlb_vm_op_open()
2476 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) in hugetlb_vm_op_open()
2480 static void hugetlb_vm_op_close(struct vm_area_struct *vma) in hugetlb_vm_op_close() argument
2482 struct hstate *h = hstate_vma(vma); in hugetlb_vm_op_close()
2483 struct resv_map *resv = vma_resv_map(vma); in hugetlb_vm_op_close()
2484 struct hugepage_subpool *spool = subpool_vma(vma); in hugetlb_vm_op_close()
2487 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER)) in hugetlb_vm_op_close()
2490 start = vma_hugecache_offset(h, vma, vma->vm_start); in hugetlb_vm_op_close()
2491 end = vma_hugecache_offset(h, vma, vma->vm_end); in hugetlb_vm_op_close()
2509 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in hugetlb_vm_op_fault() argument
2521 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, in make_huge_pte() argument
2528 vma->vm_page_prot))); in make_huge_pte()
2531 vma->vm_page_prot)); in make_huge_pte()
2535 entry = arch_make_huge_pte(entry, vma, page, writable); in make_huge_pte()
2540 static void set_huge_ptep_writable(struct vm_area_struct *vma, in set_huge_ptep_writable() argument
2546 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) in set_huge_ptep_writable()
2547 update_mmu_cache(vma, address, ptep); in set_huge_ptep_writable()
2577 struct vm_area_struct *vma) in copy_hugetlb_page_range() argument
2583 struct hstate *h = hstate_vma(vma); in copy_hugetlb_page_range()
2589 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; in copy_hugetlb_page_range()
2591 mmun_start = vma->vm_start; in copy_hugetlb_page_range()
2592 mmun_end = vma->vm_end; in copy_hugetlb_page_range()
2596 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { in copy_hugetlb_page_range()
2650 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, in __unmap_hugepage_range() argument
2655 struct mm_struct *mm = vma->vm_mm; in __unmap_hugepage_range()
2661 struct hstate *h = hstate_vma(vma); in __unmap_hugepage_range()
2666 WARN_ON(!is_vm_hugetlb_page(vma)); in __unmap_hugepage_range()
2670 tlb_start_vma(tlb, vma); in __unmap_hugepage_range()
2710 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); in __unmap_hugepage_range()
2744 tlb_end_vma(tlb, vma); in __unmap_hugepage_range()
2748 struct vm_area_struct *vma, unsigned long start, in __unmap_hugepage_range_final() argument
2751 __unmap_hugepage_range(tlb, vma, start, end, ref_page); in __unmap_hugepage_range_final()
2763 vma->vm_flags &= ~VM_MAYSHARE; in __unmap_hugepage_range_final()
2766 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, in unmap_hugepage_range() argument
2772 mm = vma->vm_mm; in unmap_hugepage_range()
2775 __unmap_hugepage_range(&tlb, vma, start, end, ref_page); in unmap_hugepage_range()
2785 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, in unmap_ref_private() argument
2788 struct hstate *h = hstate_vma(vma); in unmap_ref_private()
2798 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + in unmap_ref_private()
2799 vma->vm_pgoff; in unmap_ref_private()
2800 mapping = file_inode(vma->vm_file)->i_mapping; in unmap_ref_private()
2810 if (iter_vma == vma) in unmap_ref_private()
2841 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, in hugetlb_cow() argument
2845 struct hstate *h = hstate_vma(vma); in hugetlb_cow()
2857 page_move_anon_rmap(old_page, vma, address); in hugetlb_cow()
2858 set_huge_ptep_writable(vma, address, ptep); in hugetlb_cow()
2871 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && in hugetlb_cow()
2882 new_page = alloc_huge_page(vma, address, outside_reserve); in hugetlb_cow()
2895 unmap_ref_private(mm, vma, old_page, address); in hugetlb_cow()
2918 if (unlikely(anon_vma_prepare(vma))) { in hugetlb_cow()
2923 copy_user_huge_page(new_page, old_page, address, vma, in hugetlb_cow()
2942 huge_ptep_clear_flush(vma, address, ptep); in hugetlb_cow()
2944 make_huge_pte(vma, new_page, 1)); in hugetlb_cow()
2946 hugepage_add_new_anon_rmap(new_page, vma, address); in hugetlb_cow()
2963 struct vm_area_struct *vma, unsigned long address) in hugetlbfs_pagecache_page() argument
2968 mapping = vma->vm_file->f_mapping; in hugetlbfs_pagecache_page()
2969 idx = vma_hugecache_offset(h, vma, address); in hugetlbfs_pagecache_page()
2979 struct vm_area_struct *vma, unsigned long address) in hugetlbfs_pagecache_present() argument
2985 mapping = vma->vm_file->f_mapping; in hugetlbfs_pagecache_present()
2986 idx = vma_hugecache_offset(h, vma, address); in hugetlbfs_pagecache_present()
2994 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, in hugetlb_no_page() argument
2998 struct hstate *h = hstate_vma(vma); in hugetlb_no_page()
3011 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { in hugetlb_no_page()
3027 page = alloc_huge_page(vma, address, 0); in hugetlb_no_page()
3040 if (vma->vm_flags & VM_MAYSHARE) { in hugetlb_no_page()
3058 if (unlikely(anon_vma_prepare(vma))) { in hugetlb_no_page()
3083 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) in hugetlb_no_page()
3084 if (vma_needs_reservation(h, vma, address) < 0) { in hugetlb_no_page()
3101 hugepage_add_new_anon_rmap(page, vma, address); in hugetlb_no_page()
3104 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) in hugetlb_no_page()
3105 && (vma->vm_flags & VM_SHARED))); in hugetlb_no_page()
3108 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { in hugetlb_no_page()
3110 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl); in hugetlb_no_page()
3128 struct vm_area_struct *vma, in fault_mutex_hash() argument
3135 if (vma->vm_flags & VM_SHARED) { in fault_mutex_hash()
3153 struct vm_area_struct *vma, in fault_mutex_hash() argument
3161 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, in hugetlb_fault() argument
3171 struct hstate *h = hstate_vma(vma); in hugetlb_fault()
3181 migration_entry_wait_huge(vma, mm, ptep); in hugetlb_fault()
3192 mapping = vma->vm_file->f_mapping; in hugetlb_fault()
3193 idx = vma_hugecache_offset(h, vma, address); in hugetlb_fault()
3200 hash = fault_mutex_hash(h, mm, vma, mapping, idx, address); in hugetlb_fault()
3205 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags); in hugetlb_fault()
3230 if (vma_needs_reservation(h, vma, address) < 0) { in hugetlb_fault()
3235 if (!(vma->vm_flags & VM_MAYSHARE)) in hugetlb_fault()
3237 vma, address); in hugetlb_fault()
3262 ret = hugetlb_cow(mm, vma, address, ptep, entry, in hugetlb_fault()
3269 if (huge_ptep_set_access_flags(vma, address, ptep, entry, in hugetlb_fault()
3271 update_mmu_cache(vma, address, ptep); in hugetlb_fault()
3297 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, in follow_hugetlb_page() argument
3305 struct hstate *h = hstate_vma(vma); in follow_hugetlb_page()
3307 while (vaddr < vma->vm_end && remainder) { in follow_hugetlb_page()
3333 !hugetlbfs_pagecache_present(h, vma, vaddr)) { in follow_hugetlb_page()
3357 ret = hugetlb_fault(mm, vma, vaddr, in follow_hugetlb_page()
3375 vmas[i] = vma; in follow_hugetlb_page()
3381 if (vaddr < vma->vm_end && remainder && in follow_hugetlb_page()
3397 unsigned long hugetlb_change_protection(struct vm_area_struct *vma, in hugetlb_change_protection() argument
3400 struct mm_struct *mm = vma->vm_mm; in hugetlb_change_protection()
3404 struct hstate *h = hstate_vma(vma); in hugetlb_change_protection()
3408 flush_cache_range(vma, address, end); in hugetlb_change_protection()
3411 mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex); in hugetlb_change_protection()
3445 pte = arch_make_huge_pte(pte, vma, NULL, 0); in hugetlb_change_protection()
3457 flush_tlb_range(vma, start, end); in hugetlb_change_protection()
3458 mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex); in hugetlb_change_protection()
3466 struct vm_area_struct *vma, in hugetlb_reserve_pages() argument
3488 if (!vma || vma->vm_flags & VM_MAYSHARE) { in hugetlb_reserve_pages()
3500 set_vma_resv_map(vma, resv_map); in hugetlb_reserve_pages()
3501 set_vma_resv_flags(vma, HPAGE_RESV_OWNER); in hugetlb_reserve_pages()
3536 if (!vma || vma->vm_flags & VM_MAYSHARE) in hugetlb_reserve_pages()
3540 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) in hugetlb_reserve_pages()
3564 struct vm_area_struct *vma, in page_table_shareable() argument
3573 unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED; in page_table_shareable()
3588 static int vma_shareable(struct vm_area_struct *vma, unsigned long addr) in vma_shareable() argument
3596 if (vma->vm_flags & VM_MAYSHARE && in vma_shareable()
3597 vma->vm_start <= base && end <= vma->vm_end) in vma_shareable()
3613 struct vm_area_struct *vma = find_vma(mm, addr); in huge_pmd_share() local
3614 struct address_space *mapping = vma->vm_file->f_mapping; in huge_pmd_share()
3615 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + in huge_pmd_share()
3616 vma->vm_pgoff; in huge_pmd_share()
3623 if (!vma_shareable(vma, addr)) in huge_pmd_share()
3628 if (svma == vma) in huge_pmd_share()
3631 saddr = page_table_shareable(svma, vma, addr, idx); in huge_pmd_share()
3644 ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte); in huge_pmd_share()