Lines Matching refs:vmf
745 struct address_space *mapping, struct vm_fault *vmf, in dax_insert_entry() argument
769 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address); in dax_insert_entry()
1054 struct vm_fault *vmf) in dax_load_hole() argument
1057 unsigned long vaddr = vmf->address; in dax_load_hole()
1061 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, in dax_load_hole()
1064 ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); in dax_load_hole()
1065 trace_dax_load_hole(inode, vmf, ret); in dax_load_hole()
1259 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, in dax_iomap_pte_fault() argument
1262 struct vm_area_struct *vma = vmf->vma; in dax_iomap_pte_fault()
1264 XA_STATE(xas, &mapping->i_pages, vmf->pgoff); in dax_iomap_pte_fault()
1266 unsigned long vaddr = vmf->address; in dax_iomap_pte_fault()
1267 loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT; in dax_iomap_pte_fault()
1272 bool write = vmf->flags & FAULT_FLAG_WRITE; in dax_iomap_pte_fault()
1278 trace_dax_pte_fault(inode, vmf, ret); in dax_iomap_pte_fault()
1289 if (write && !vmf->cow_page) in dax_iomap_pte_fault()
1304 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { in dax_iomap_pte_fault()
1326 if (vmf->cow_page) { in dax_iomap_pte_fault()
1332 clear_user_highpage(vmf->cow_page, vaddr); in dax_iomap_pte_fault()
1336 sector, vmf->cow_page, vaddr); in dax_iomap_pte_fault()
1347 __SetPageUptodate(vmf->cow_page); in dax_iomap_pte_fault()
1348 ret = finish_fault(vmf); in dax_iomap_pte_fault()
1367 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, in dax_iomap_pte_fault()
1385 trace_dax_insert_mapping(inode, vmf, entry); in dax_iomap_pte_fault()
1395 ret = dax_load_hole(&xas, mapping, &entry, vmf); in dax_iomap_pte_fault()
1424 trace_dax_pte_fault_done(inode, vmf, ret); in dax_iomap_pte_fault()
1429 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, in dax_pmd_load_hole() argument
1432 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_pmd_load_hole()
1433 unsigned long pmd_addr = vmf->address & PMD_MASK; in dax_pmd_load_hole()
1434 struct vm_area_struct *vma = vmf->vma; in dax_pmd_load_hole()
1442 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); in dax_pmd_load_hole()
1448 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, in dax_pmd_load_hole()
1457 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); in dax_pmd_load_hole()
1458 if (!pmd_none(*(vmf->pmd))) { in dax_pmd_load_hole()
1464 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in dax_pmd_load_hole()
1467 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); in dax_pmd_load_hole()
1469 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); in dax_pmd_load_hole()
1471 trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry); in dax_pmd_load_hole()
1477 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry); in dax_pmd_load_hole()
1481 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, in dax_iomap_pmd_fault() argument
1484 struct vm_area_struct *vma = vmf->vma; in dax_iomap_pmd_fault()
1486 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); in dax_iomap_pmd_fault()
1487 unsigned long pmd_addr = vmf->address & PMD_MASK; in dax_iomap_pmd_fault()
1488 bool write = vmf->flags & FAULT_FLAG_WRITE; in dax_iomap_pmd_fault()
1508 trace_dax_pmd_fault(inode, vmf, max_pgoff, 0); in dax_iomap_pmd_fault()
1516 if ((vmf->pgoff & PG_PMD_COLOUR) != in dax_iomap_pmd_fault()
1517 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) in dax_iomap_pmd_fault()
1557 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && in dax_iomap_pmd_fault()
1558 !pmd_devmap(*vmf->pmd)) { in dax_iomap_pmd_fault()
1585 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, in dax_iomap_pmd_fault()
1602 trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry); in dax_iomap_pmd_fault()
1603 result = vmf_insert_pfn_pmd(vmf, pfn, write); in dax_iomap_pmd_fault()
1609 result = dax_pmd_load_hole(&xas, vmf, &iomap, &entry); in dax_iomap_pmd_fault()
1635 split_huge_pmd(vma, vmf->pmd, vmf->address); in dax_iomap_pmd_fault()
1639 trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result); in dax_iomap_pmd_fault()
1643 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, in dax_iomap_pmd_fault() argument
1663 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, in dax_iomap_fault() argument
1668 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops); in dax_iomap_fault()
1670 return dax_iomap_pmd_fault(vmf, pfnp, ops); in dax_iomap_fault()
1687 dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) in dax_insert_pfn_mkwrite() argument
1689 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_insert_pfn_mkwrite()
1690 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); in dax_insert_pfn_mkwrite()
1701 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, in dax_insert_pfn_mkwrite()
1709 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); in dax_insert_pfn_mkwrite()
1712 ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE); in dax_insert_pfn_mkwrite()
1717 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); in dax_insert_pfn_mkwrite()
1731 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, in dax_finish_sync_fault() argument
1735 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT; in dax_finish_sync_fault()
1739 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1); in dax_finish_sync_fault()
1742 return dax_insert_pfn_mkwrite(vmf, pfn, order); in dax_finish_sync_fault()