Lines Matching full:mapping
118 struct address_space *mapping; member
127 static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping, in dax_entry_waitqueue() argument
140 key->mapping = mapping; in dax_entry_waitqueue()
143 hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS); in dax_entry_waitqueue()
154 if (key->mapping != ewait->key.mapping || in wake_exceptional_entry_func()
161 * @entry may no longer be the entry at the index in the mapping.
165 static void dax_wake_mapping_entry_waiter(struct address_space *mapping, in dax_wake_mapping_entry_waiter() argument
171 wq = dax_entry_waitqueue(mapping, index, entry, &key); in dax_wake_mapping_entry_waiter()
187 static inline int slot_locked(struct address_space *mapping, void **slot) in slot_locked() argument
190 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock); in slot_locked()
197 static inline void *lock_slot(struct address_space *mapping, void **slot) in lock_slot() argument
200 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock); in lock_slot()
203 radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry); in lock_slot()
210 static inline void *unlock_slot(struct address_space *mapping, void **slot) in unlock_slot() argument
213 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock); in unlock_slot()
216 radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry); in unlock_slot()
220 static void put_unlocked_mapping_entry(struct address_space *mapping,
232 static void *get_unlocked_mapping_entry(struct address_space *mapping, in get_unlocked_mapping_entry() argument
243 entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, in get_unlocked_mapping_entry()
247 !slot_locked(mapping, slot)) { in get_unlocked_mapping_entry()
253 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key); in get_unlocked_mapping_entry()
256 xa_unlock_irq(&mapping->i_pages); in get_unlocked_mapping_entry()
259 xa_lock_irq(&mapping->i_pages); in get_unlocked_mapping_entry()
268 static void wait_entry_unlocked(struct address_space *mapping, pgoff_t index, in wait_entry_unlocked() argument
277 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key); in wait_entry_unlocked()
285 xa_unlock_irq(&mapping->i_pages); in wait_entry_unlocked()
290 static void unlock_mapping_entry(struct address_space *mapping, pgoff_t index) in unlock_mapping_entry() argument
294 xa_lock_irq(&mapping->i_pages); in unlock_mapping_entry()
295 entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, &slot); in unlock_mapping_entry()
297 !slot_locked(mapping, slot))) { in unlock_mapping_entry()
298 xa_unlock_irq(&mapping->i_pages); in unlock_mapping_entry()
301 unlock_slot(mapping, slot); in unlock_mapping_entry()
302 xa_unlock_irq(&mapping->i_pages); in unlock_mapping_entry()
303 dax_wake_mapping_entry_waiter(mapping, index, entry, false); in unlock_mapping_entry()
306 static void put_locked_mapping_entry(struct address_space *mapping, in put_locked_mapping_entry() argument
309 unlock_mapping_entry(mapping, index); in put_locked_mapping_entry()
316 static void put_unlocked_mapping_entry(struct address_space *mapping, in put_unlocked_mapping_entry() argument
323 dax_wake_mapping_entry_waiter(mapping, index, entry, false); in put_unlocked_mapping_entry()
356 static void dax_associate_entry(void *entry, struct address_space *mapping, in dax_associate_entry() argument
369 WARN_ON_ONCE(page->mapping); in dax_associate_entry()
370 page->mapping = mapping; in dax_associate_entry()
375 static void dax_disassociate_entry(void *entry, struct address_space *mapping, in dax_disassociate_entry() argument
387 WARN_ON_ONCE(page->mapping && page->mapping != mapping); in dax_disassociate_entry()
388 page->mapping = NULL; in dax_disassociate_entry()
412 struct address_space *mapping; in dax_lock_mapping_entry() local
416 mapping = READ_ONCE(page->mapping); in dax_lock_mapping_entry()
418 if (!mapping || !dax_mapping(mapping)) in dax_lock_mapping_entry()
428 inode = mapping->host; in dax_lock_mapping_entry()
434 xa_lock_irq(&mapping->i_pages); in dax_lock_mapping_entry()
435 if (mapping != page->mapping) { in dax_lock_mapping_entry()
436 xa_unlock_irq(&mapping->i_pages); in dax_lock_mapping_entry()
441 entry = __radix_tree_lookup(&mapping->i_pages, index, in dax_lock_mapping_entry()
444 xa_unlock_irq(&mapping->i_pages); in dax_lock_mapping_entry()
446 } else if (slot_locked(mapping, slot)) { in dax_lock_mapping_entry()
448 wait_entry_unlocked(mapping, index, &slot, entry); in dax_lock_mapping_entry()
452 lock_slot(mapping, slot); in dax_lock_mapping_entry()
454 xa_unlock_irq(&mapping->i_pages); in dax_lock_mapping_entry()
464 struct address_space *mapping = page->mapping; in dax_unlock_mapping_entry() local
465 struct inode *inode = mapping->host; in dax_unlock_mapping_entry()
470 unlock_mapping_entry(mapping, page->index); in dax_unlock_mapping_entry()
499 static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index, in grab_mapping_entry() argument
506 xa_lock_irq(&mapping->i_pages); in grab_mapping_entry()
507 entry = get_unlocked_mapping_entry(mapping, index, &slot); in grab_mapping_entry()
517 put_unlocked_mapping_entry(mapping, index, in grab_mapping_entry()
540 entry = lock_slot(mapping, slot); in grab_mapping_entry()
543 xa_unlock_irq(&mapping->i_pages); in grab_mapping_entry()
550 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, in grab_mapping_entry()
554 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM); in grab_mapping_entry()
557 put_locked_mapping_entry(mapping, index); in grab_mapping_entry()
560 xa_lock_irq(&mapping->i_pages); in grab_mapping_entry()
569 entry = __radix_tree_lookup(&mapping->i_pages, index, in grab_mapping_entry()
573 xa_unlock_irq(&mapping->i_pages); in grab_mapping_entry()
579 dax_disassociate_entry(entry, mapping, false); in grab_mapping_entry()
580 radix_tree_delete(&mapping->i_pages, index); in grab_mapping_entry()
581 mapping->nrexceptional--; in grab_mapping_entry()
582 dax_wake_mapping_entry_waiter(mapping, index, entry, in grab_mapping_entry()
588 err = __radix_tree_insert(&mapping->i_pages, index, in grab_mapping_entry()
592 xa_unlock_irq(&mapping->i_pages); in grab_mapping_entry()
604 mapping->nrexceptional++; in grab_mapping_entry()
605 xa_unlock_irq(&mapping->i_pages); in grab_mapping_entry()
608 entry = lock_slot(mapping, slot); in grab_mapping_entry()
610 xa_unlock_irq(&mapping->i_pages); in grab_mapping_entry()
615 * dax_layout_busy_page - find first pinned page in @mapping
616 * @mapping: address space to scan for a page with ref count > 1
621 * any page in the mapping is busy, i.e. for DMA, or other
629 struct page *dax_layout_busy_page(struct address_space *mapping) in dax_layout_busy_page() argument
643 if (!dax_mapping(mapping) || !mapping_mapped(mapping)) in dax_layout_busy_page()
662 unmap_mapping_range(mapping, 0, 0, 0); in dax_layout_busy_page()
664 while (index < end && pagevec_lookup_entries(&pvec, mapping, index, in dax_layout_busy_page()
681 xa_lock_irq(&mapping->i_pages); in dax_layout_busy_page()
682 entry = get_unlocked_mapping_entry(mapping, index, NULL); in dax_layout_busy_page()
692 put_unlocked_mapping_entry(mapping, index, entry); in dax_layout_busy_page()
693 xa_unlock_irq(&mapping->i_pages); in dax_layout_busy_page()
715 static int __dax_invalidate_mapping_entry(struct address_space *mapping, in __dax_invalidate_mapping_entry() argument
720 struct radix_tree_root *pages = &mapping->i_pages; in __dax_invalidate_mapping_entry()
723 entry = get_unlocked_mapping_entry(mapping, index, NULL); in __dax_invalidate_mapping_entry()
730 dax_disassociate_entry(entry, mapping, trunc); in __dax_invalidate_mapping_entry()
732 mapping->nrexceptional--; in __dax_invalidate_mapping_entry()
735 put_unlocked_mapping_entry(mapping, index, entry); in __dax_invalidate_mapping_entry()
740 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
743 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) in dax_delete_mapping_entry() argument
745 int ret = __dax_invalidate_mapping_entry(mapping, index, true); in dax_delete_mapping_entry()
761 int dax_invalidate_mapping_entry_sync(struct address_space *mapping, in dax_invalidate_mapping_entry_sync() argument
764 return __dax_invalidate_mapping_entry(mapping, index, false); in dax_invalidate_mapping_entry_sync()
800 static void *dax_insert_mapping_entry(struct address_space *mapping, in dax_insert_mapping_entry() argument
805 struct radix_tree_root *pages = &mapping->i_pages; in dax_insert_mapping_entry()
811 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); in dax_insert_mapping_entry()
814 /* we are replacing a zero page with block mapping */ in dax_insert_mapping_entry()
816 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, in dax_insert_mapping_entry()
819 unmap_mapping_pages(mapping, vmf->pgoff, 1, false); in dax_insert_mapping_entry()
825 dax_disassociate_entry(entry, mapping, false); in dax_insert_mapping_entry()
826 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address); in dax_insert_mapping_entry()
867 static void dax_mapping_entry_mkclean(struct address_space *mapping, in dax_mapping_entry_mkclean() argument
875 i_mmap_lock_read(mapping); in dax_mapping_entry_mkclean()
876 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { in dax_mapping_entry_mkclean()
935 i_mmap_unlock_read(mapping); in dax_mapping_entry_mkclean()
939 struct address_space *mapping, pgoff_t index, void *entry) in dax_writeback_one() argument
941 struct radix_tree_root *pages = &mapping->i_pages; in dax_writeback_one()
948 * A page got tagged dirty in DAX mapping? Something is seriously in dax_writeback_one()
955 entry2 = get_unlocked_mapping_entry(mapping, index, &slot); in dax_writeback_one()
976 entry = lock_slot(mapping, slot); in dax_writeback_one()
997 dax_mapping_entry_mkclean(mapping, index, pfn); in dax_writeback_one()
1002 * the pfn mappings are writeprotected and fault waits for mapping in dax_writeback_one()
1008 trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT); in dax_writeback_one()
1009 put_locked_mapping_entry(mapping, index); in dax_writeback_one()
1013 put_unlocked_mapping_entry(mapping, index, entry2); in dax_writeback_one()
1019 * Flush the mapping to the persistent domain within the byte range of [start,
1023 int dax_writeback_mapping_range(struct address_space *mapping, in dax_writeback_mapping_range() argument
1026 struct inode *inode = mapping->host; in dax_writeback_mapping_range()
1037 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL) in dax_writeback_mapping_range()
1049 tag_pages_for_writeback(mapping, start_index, end_index); in dax_writeback_mapping_range()
1053 pvec.nr = find_get_entries_tag(mapping, start_index, in dax_writeback_mapping_range()
1066 ret = dax_writeback_one(dax_dev, mapping, indices[i], in dax_writeback_mapping_range()
1069 mapping_set_error(mapping, ret); in dax_writeback_mapping_range()
1122 * sparse files. Instead we insert a read-only mapping of the 4k zero page.
1123 * If this page is ever written to we will re-fault and change the mapping to
1126 static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry, in dax_load_hole() argument
1129 struct inode *inode = mapping->host; in dax_load_hole()
1134 dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE, in dax_load_hole()
1292 struct address_space *mapping = iocb->ki_filp->f_mapping; in dax_iomap_rw() local
1293 struct inode *inode = mapping->host; in dax_iomap_rw()
1331 * MAP_SYNC on a dax mapping guarantees dirty metadata is
1345 struct address_space *mapping = vma->vm_file->f_mapping; in dax_iomap_pte_fault() local
1346 struct inode *inode = mapping->host; in dax_iomap_pte_fault()
1372 entry = grab_mapping_entry(mapping, vmf->pgoff, 0); in dax_iomap_pte_fault()
1447 entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn, in dax_iomap_pte_fault()
1475 ret = dax_load_hole(mapping, entry, vmf); in dax_iomap_pte_fault()
1502 put_locked_mapping_entry(mapping, vmf->pgoff); in dax_iomap_pte_fault()
1512 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_pmd_load_hole() local
1514 struct inode *inode = mapping->host; in dax_pmd_load_hole()
1527 ret = dax_insert_mapping_entry(mapping, vmf, entry, pfn, in dax_pmd_load_hole()
1552 struct address_space *mapping = vma->vm_file->f_mapping; in dax_iomap_pmd_fault() local
1557 struct inode *inode = mapping->host; in dax_iomap_pmd_fault()
1611 entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD); in dax_iomap_pmd_fault()
1629 * setting up a mapping, so really we're using iomap_begin() as a way in dax_iomap_pmd_fault()
1648 entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn, in dax_iomap_pmd_fault()
1695 put_locked_mapping_entry(mapping, pgoff); in dax_iomap_pmd_fault()
1754 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_insert_pfn_mkwrite() local
1759 xa_lock_irq(&mapping->i_pages); in dax_insert_pfn_mkwrite()
1760 entry = get_unlocked_mapping_entry(mapping, index, &slot); in dax_insert_pfn_mkwrite()
1765 put_unlocked_mapping_entry(mapping, index, entry); in dax_insert_pfn_mkwrite()
1766 xa_unlock_irq(&mapping->i_pages); in dax_insert_pfn_mkwrite()
1767 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, in dax_insert_pfn_mkwrite()
1771 radix_tree_tag_set(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY); in dax_insert_pfn_mkwrite()
1772 entry = lock_slot(mapping, slot); in dax_insert_pfn_mkwrite()
1773 xa_unlock_irq(&mapping->i_pages); in dax_insert_pfn_mkwrite()
1786 put_locked_mapping_entry(mapping, index); in dax_insert_pfn_mkwrite()
1787 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); in dax_insert_pfn_mkwrite()