Home
last modified time | relevance | path

Searched refs:vmf (Results 1 – 11 of 11) sorted by relevance

/mm/
Dmemory.c2215 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf) in do_page_mkwrite() argument
2218 struct page *page = vmf->page; in do_page_mkwrite()
2219 unsigned int old_flags = vmf->flags; in do_page_mkwrite()
2221 vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; in do_page_mkwrite()
2223 if (vmf->vma->vm_file && in do_page_mkwrite()
2224 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host)) in do_page_mkwrite()
2227 ret = vmf->vma->vm_ops->page_mkwrite(vmf); in do_page_mkwrite()
2229 vmf->flags = old_flags; in do_page_mkwrite()
2249 static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf) in fault_dirty_shared_page() argument
2251 struct vm_area_struct *vma = vmf->vma; in fault_dirty_shared_page()
[all …]
Dhuge_memory.c583 static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, in __do_huge_pmd_anonymous_page() argument
586 struct vm_area_struct *vma = vmf->vma; in __do_huge_pmd_anonymous_page()
589 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in __do_huge_pmd_anonymous_page()
606 clear_huge_page(page, vmf->address, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page()
614 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page()
615 if (unlikely(!pmd_none(*vmf->pmd))) { in __do_huge_pmd_anonymous_page()
628 spin_unlock(vmf->ptl); in __do_huge_pmd_anonymous_page()
632 ret2 = handle_userfault(vmf, VM_UFFD_MISSING); in __do_huge_pmd_anonymous_page()
642 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in __do_huge_pmd_anonymous_page()
643 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in __do_huge_pmd_anonymous_page()
[all …]
Dswap_state.c540 struct vm_fault *vmf) in swap_cluster_readahead() argument
550 struct vm_area_struct *vma = vmf->vma; in swap_cluster_readahead()
551 unsigned long addr = vmf->address; in swap_cluster_readahead()
640 static void swap_ra_info(struct vm_fault *vmf, in swap_ra_info() argument
643 struct vm_area_struct *vma = vmf->vma; in swap_ra_info()
661 faddr = vmf->address; in swap_ra_info()
662 orig_pte = pte = pte_offset_map(vmf->pmd, faddr); in swap_ra_info()
723 struct vm_fault *vmf) in swap_vma_readahead() argument
726 struct vm_area_struct *vma = vmf->vma; in swap_vma_readahead()
734 swap_ra_info(vmf, &ra_info); in swap_vma_readahead()
[all …]
Dfilemap.c2343 static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page, in lock_page_maybe_drop_mmap() argument
2354 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) in lock_page_maybe_drop_mmap()
2357 *fpin = maybe_unlock_mmap_for_io(vmf, *fpin); in lock_page_maybe_drop_mmap()
2358 if (vmf->flags & FAULT_FLAG_KILLABLE) { in lock_page_maybe_drop_mmap()
2367 up_read(&vmf->vma->vm_mm->mmap_sem); in lock_page_maybe_drop_mmap()
2383 static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) in do_sync_mmap_readahead() argument
2385 struct file *file = vmf->vma->vm_file; in do_sync_mmap_readahead()
2389 pgoff_t offset = vmf->pgoff; in do_sync_mmap_readahead()
2392 if (vmf->vma->vm_flags & VM_RAND_READ) in do_sync_mmap_readahead()
2397 if (vmf->vma->vm_flags & VM_SEQ_READ) { in do_sync_mmap_readahead()
[all …]
Dinternal.h37 vm_fault_t do_swap_page(struct vm_fault *vmf);
365 static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf, in maybe_unlock_mmap_for_io() argument
368 int flags = vmf->flags; in maybe_unlock_mmap_for_io()
380 fpin = get_file(vmf->vma->vm_file); in maybe_unlock_mmap_for_io()
381 up_read(&vmf->vma->vm_mm->mmap_sem); in maybe_unlock_mmap_for_io()
Dkhugepaged.c897 struct vm_fault vmf = { in __collapse_huge_page_swapin() local
910 vmf.pte = pte_offset_map(pmd, address); in __collapse_huge_page_swapin()
911 for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE; in __collapse_huge_page_swapin()
912 vmf.pte++, vmf.address += PAGE_SIZE) { in __collapse_huge_page_swapin()
913 vmf.orig_pte = *vmf.pte; in __collapse_huge_page_swapin()
914 if (!is_swap_pte(vmf.orig_pte)) in __collapse_huge_page_swapin()
917 ret = do_swap_page(&vmf); in __collapse_huge_page_swapin()
922 if (hugepage_vma_revalidate(mm, address, &vmf.vma)) { in __collapse_huge_page_swapin()
938 vmf.pte = pte_offset_map(pmd, vmf.address); in __collapse_huge_page_swapin()
940 vmf.pte--; in __collapse_huge_page_swapin()
[all …]
Dshmem.c149 struct vm_fault *vmf, vm_fault_t *fault_type);
1455 struct vm_fault vmf; in shmem_swapin() local
1458 vmf.vma = &pvma; in shmem_swapin()
1459 vmf.address = 0; in shmem_swapin()
1460 page = swap_cluster_readahead(swap, gfp, &vmf); in shmem_swapin()
1742 struct vm_area_struct *vma, struct vm_fault *vmf, in shmem_getpage_gfp() argument
1803 *fault_type = handle_userfault(vmf, VM_UFFD_MISSING); in shmem_getpage_gfp()
1990 static vm_fault_t shmem_fault(struct vm_fault *vmf) in shmem_fault() argument
1992 struct vm_area_struct *vma = vmf->vma; in shmem_fault()
2023 vmf->pgoff >= shmem_falloc->start && in shmem_fault()
[all …]
Dmmap.c3337 static vm_fault_t special_mapping_fault(struct vm_fault *vmf);
3376 static vm_fault_t special_mapping_fault(struct vm_fault *vmf) in special_mapping_fault() argument
3378 struct vm_area_struct *vma = vmf->vma; in special_mapping_fault()
3388 return sm->fault(sm, vmf->vma, vmf); in special_mapping_fault()
3393 for (pgoff = vmf->pgoff; pgoff && *pages; ++pages) in special_mapping_fault()
3399 vmf->page = page; in special_mapping_fault()
Dswapfile.c1925 struct vm_fault vmf; in unuse_pte_range() local
1940 vmf.vma = vma; in unuse_pte_range()
1941 vmf.address = addr; in unuse_pte_range()
1942 vmf.pmd = pmd; in unuse_pte_range()
1943 page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, &vmf); in unuse_pte_range()
Dnommu.c1689 vm_fault_t filemap_fault(struct vm_fault *vmf) in filemap_fault() argument
1696 void filemap_map_pages(struct vm_fault *vmf, in filemap_map_pages() argument
Dhugetlb.c3343 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf) in hugetlb_vm_op_fault() argument
3949 struct vm_fault vmf = { in hugetlb_no_page() local
3969 ret = handle_userfault(&vmf, VM_UFFD_MISSING); in hugetlb_no_page()