Lines Matching refs:vmf
717 struct address_space *mapping, struct vm_fault *vmf, in dax_insert_entry() argument
741 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address); in dax_insert_entry()
1032 struct vm_fault *vmf) in dax_load_hole() argument
1035 unsigned long vaddr = vmf->address; in dax_load_hole()
1039 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, in dax_load_hole()
1042 ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); in dax_load_hole()
1043 trace_dax_load_hole(inode, vmf, ret); in dax_load_hole()
1242 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, in dax_iomap_pte_fault() argument
1245 struct vm_area_struct *vma = vmf->vma; in dax_iomap_pte_fault()
1247 XA_STATE(xas, &mapping->i_pages, vmf->pgoff); in dax_iomap_pte_fault()
1249 unsigned long vaddr = vmf->address; in dax_iomap_pte_fault()
1250 loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT; in dax_iomap_pte_fault()
1254 bool write = vmf->flags & FAULT_FLAG_WRITE; in dax_iomap_pte_fault()
1260 trace_dax_pte_fault(inode, vmf, ret); in dax_iomap_pte_fault()
1271 if (write && !vmf->cow_page) in dax_iomap_pte_fault()
1286 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { in dax_iomap_pte_fault()
1308 if (vmf->cow_page) { in dax_iomap_pte_fault()
1314 clear_user_highpage(vmf->cow_page, vaddr); in dax_iomap_pte_fault()
1318 sector, PAGE_SIZE, vmf->cow_page, vaddr); in dax_iomap_pte_fault()
1329 __SetPageUptodate(vmf->cow_page); in dax_iomap_pte_fault()
1330 ret = finish_fault(vmf); in dax_iomap_pte_fault()
1349 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, in dax_iomap_pte_fault()
1367 trace_dax_insert_mapping(inode, vmf, entry); in dax_iomap_pte_fault()
1377 ret = dax_load_hole(&xas, mapping, &entry, vmf); in dax_iomap_pte_fault()
1406 trace_dax_pte_fault_done(inode, vmf, ret); in dax_iomap_pte_fault()
1411 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, in dax_pmd_load_hole() argument
1414 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_pmd_load_hole()
1415 unsigned long pmd_addr = vmf->address & PMD_MASK; in dax_pmd_load_hole()
1416 struct vm_area_struct *vma = vmf->vma; in dax_pmd_load_hole()
1424 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); in dax_pmd_load_hole()
1430 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, in dax_pmd_load_hole()
1439 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); in dax_pmd_load_hole()
1440 if (!pmd_none(*(vmf->pmd))) { in dax_pmd_load_hole()
1446 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in dax_pmd_load_hole()
1449 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); in dax_pmd_load_hole()
1451 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); in dax_pmd_load_hole()
1453 trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry); in dax_pmd_load_hole()
1459 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry); in dax_pmd_load_hole()
1463 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, in dax_iomap_pmd_fault() argument
1466 struct vm_area_struct *vma = vmf->vma; in dax_iomap_pmd_fault()
1468 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); in dax_iomap_pmd_fault()
1469 unsigned long pmd_addr = vmf->address & PMD_MASK; in dax_iomap_pmd_fault()
1470 bool write = vmf->flags & FAULT_FLAG_WRITE; in dax_iomap_pmd_fault()
1489 trace_dax_pmd_fault(inode, vmf, max_pgoff, 0); in dax_iomap_pmd_fault()
1497 if ((vmf->pgoff & PG_PMD_COLOUR) != in dax_iomap_pmd_fault()
1498 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) in dax_iomap_pmd_fault()
1538 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && in dax_iomap_pmd_fault()
1539 !pmd_devmap(*vmf->pmd)) { in dax_iomap_pmd_fault()
1565 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, in dax_iomap_pmd_fault()
1582 trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry); in dax_iomap_pmd_fault()
1583 result = vmf_insert_pfn_pmd(vmf, pfn, write); in dax_iomap_pmd_fault()
1589 result = dax_pmd_load_hole(&xas, vmf, &iomap, &entry); in dax_iomap_pmd_fault()
1615 split_huge_pmd(vma, vmf->pmd, vmf->address); in dax_iomap_pmd_fault()
1619 trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result); in dax_iomap_pmd_fault()
1623 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, in dax_iomap_pmd_fault() argument
1643 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, in dax_iomap_fault() argument
1648 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops); in dax_iomap_fault()
1650 return dax_iomap_pmd_fault(vmf, pfnp, ops); in dax_iomap_fault()
1667 dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) in dax_insert_pfn_mkwrite() argument
1669 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_insert_pfn_mkwrite()
1670 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); in dax_insert_pfn_mkwrite()
1681 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, in dax_insert_pfn_mkwrite()
1689 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); in dax_insert_pfn_mkwrite()
1692 ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE); in dax_insert_pfn_mkwrite()
1697 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); in dax_insert_pfn_mkwrite()
1711 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, in dax_finish_sync_fault() argument
1715 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT; in dax_finish_sync_fault()
1719 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1); in dax_finish_sync_fault()
1722 return dax_insert_pfn_mkwrite(vmf, pfn, order); in dax_finish_sync_fault()