• Home
  • Raw
  • Download

Lines Matching refs:mapping

331 static void dax_associate_entry(void *entry, struct address_space *mapping,  in dax_associate_entry()  argument
344 WARN_ON_ONCE(page->mapping); in dax_associate_entry()
345 page->mapping = mapping; in dax_associate_entry()
350 static void dax_disassociate_entry(void *entry, struct address_space *mapping, in dax_disassociate_entry() argument
362 WARN_ON_ONCE(page->mapping && page->mapping != mapping); in dax_disassociate_entry()
363 page->mapping = NULL; in dax_disassociate_entry()
397 struct address_space *mapping = READ_ONCE(page->mapping); in dax_lock_page() local
400 if (!mapping || !dax_mapping(mapping)) in dax_lock_page()
411 if (S_ISCHR(mapping->host->i_mode)) in dax_lock_page()
414 xas.xa = &mapping->i_pages; in dax_lock_page()
416 if (mapping != page->mapping) { in dax_lock_page()
438 struct address_space *mapping = page->mapping; in dax_unlock_page() local
439 XA_STATE(xas, &mapping->i_pages, page->index); in dax_unlock_page()
441 if (S_ISCHR(mapping->host->i_mode)) in dax_unlock_page()
477 struct address_space *mapping, unsigned int order) in grab_mapping_entry() argument
518 unmap_mapping_pages(mapping, in grab_mapping_entry()
525 dax_disassociate_entry(entry, mapping, false); in grab_mapping_entry()
528 mapping->nrexceptional--; in grab_mapping_entry()
544 mapping->nrexceptional++; in grab_mapping_entry()
549 if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM)) in grab_mapping_entry()
576 struct page *dax_layout_busy_page(struct address_space *mapping) in dax_layout_busy_page() argument
578 XA_STATE(xas, &mapping->i_pages, 0); in dax_layout_busy_page()
589 if (!dax_mapping(mapping) || !mapping_mapped(mapping)) in dax_layout_busy_page()
604 unmap_mapping_range(mapping, 0, 0, 0); in dax_layout_busy_page()
630 static int __dax_invalidate_entry(struct address_space *mapping, in __dax_invalidate_entry() argument
633 XA_STATE(xas, &mapping->i_pages, index); in __dax_invalidate_entry()
645 dax_disassociate_entry(entry, mapping, trunc); in __dax_invalidate_entry()
647 mapping->nrexceptional--; in __dax_invalidate_entry()
659 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) in dax_delete_mapping_entry() argument
661 int ret = __dax_invalidate_entry(mapping, index, true); in dax_delete_mapping_entry()
677 int dax_invalidate_mapping_entry_sync(struct address_space *mapping, in dax_invalidate_mapping_entry_sync() argument
680 return __dax_invalidate_entry(mapping, index, false); in dax_invalidate_mapping_entry_sync()
717 struct address_space *mapping, struct vm_fault *vmf, in dax_insert_entry() argument
723 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); in dax_insert_entry()
729 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, in dax_insert_entry()
732 unmap_mapping_pages(mapping, index, 1, false); in dax_insert_entry()
740 dax_disassociate_entry(entry, mapping, false); in dax_insert_entry()
741 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address); in dax_insert_entry()
776 static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index, in dax_entry_mkclean() argument
784 i_mmap_lock_read(mapping); in dax_entry_mkclean()
785 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { in dax_entry_mkclean()
846 i_mmap_unlock_read(mapping); in dax_entry_mkclean()
850 struct address_space *mapping, void *entry) in dax_writeback_one() argument
912 dax_entry_mkclean(mapping, index, pfn); in dax_writeback_one()
926 trace_dax_writeback_one(mapping->host, index, count); in dax_writeback_one()
939 int dax_writeback_mapping_range(struct address_space *mapping, in dax_writeback_mapping_range() argument
942 XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT); in dax_writeback_mapping_range()
943 struct inode *inode = mapping->host; in dax_writeback_mapping_range()
953 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL) in dax_writeback_mapping_range()
962 tag_pages_for_writeback(mapping, xas.xa_index, end_index); in dax_writeback_mapping_range()
966 ret = dax_writeback_one(&xas, dax_dev, mapping, entry); in dax_writeback_mapping_range()
968 mapping_set_error(mapping, ret); in dax_writeback_mapping_range()
1031 struct address_space *mapping, void **entry, in dax_load_hole() argument
1034 struct inode *inode = mapping->host; in dax_load_hole()
1039 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, in dax_load_hole()
1198 struct address_space *mapping = iocb->ki_filp->f_mapping; in dax_iomap_rw() local
1199 struct inode *inode = mapping->host; in dax_iomap_rw()
1246 struct address_space *mapping = vma->vm_file->f_mapping; in dax_iomap_pte_fault() local
1247 XA_STATE(xas, &mapping->i_pages, vmf->pgoff); in dax_iomap_pte_fault()
1248 struct inode *inode = mapping->host; in dax_iomap_pte_fault()
1274 entry = grab_mapping_entry(&xas, mapping, 0); in dax_iomap_pte_fault()
1349 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, in dax_iomap_pte_fault()
1377 ret = dax_load_hole(&xas, mapping, &entry, vmf); in dax_iomap_pte_fault()
1414 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_pmd_load_hole() local
1417 struct inode *inode = mapping->host; in dax_pmd_load_hole()
1430 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, in dax_pmd_load_hole()
1467 struct address_space *mapping = vma->vm_file->f_mapping; in dax_iomap_pmd_fault() local
1468 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); in dax_iomap_pmd_fault()
1473 struct inode *inode = mapping->host; in dax_iomap_pmd_fault()
1526 entry = grab_mapping_entry(&xas, mapping, PMD_ORDER); in dax_iomap_pmd_fault()
1565 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, in dax_iomap_pmd_fault()
1669 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_insert_pfn_mkwrite() local
1670 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); in dax_insert_pfn_mkwrite()
1681 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, in dax_insert_pfn_mkwrite()
1697 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); in dax_insert_pfn_mkwrite()