Lines Matching refs:entry
83 static unsigned long dax_to_pfn(void *entry) in dax_to_pfn() argument
85 return xa_to_value(entry) >> DAX_SHIFT; in dax_to_pfn()
93 static bool dax_is_locked(void *entry) in dax_is_locked() argument
95 return xa_to_value(entry) & DAX_LOCKED; in dax_is_locked()
98 static unsigned int dax_entry_order(void *entry) in dax_entry_order() argument
100 if (xa_to_value(entry) & DAX_PMD) in dax_entry_order()
105 static unsigned long dax_is_pmd_entry(void *entry) in dax_is_pmd_entry() argument
107 return xa_to_value(entry) & DAX_PMD; in dax_is_pmd_entry()
110 static bool dax_is_pte_entry(void *entry) in dax_is_pte_entry() argument
112 return !(xa_to_value(entry) & DAX_PMD); in dax_is_pte_entry()
115 static int dax_is_zero_entry(void *entry) in dax_is_zero_entry() argument
117 return xa_to_value(entry) & DAX_ZERO_PAGE; in dax_is_zero_entry()
120 static int dax_is_empty_entry(void *entry) in dax_is_empty_entry() argument
122 return xa_to_value(entry) & DAX_EMPTY; in dax_is_empty_entry()
129 static bool dax_is_conflict(void *entry) in dax_is_conflict() argument
131 return entry == XA_RETRY_ENTRY; in dax_is_conflict()
148 void *entry, struct exceptional_entry_key *key) in dax_entry_waitqueue() argument
158 if (dax_is_pmd_entry(entry)) in dax_entry_waitqueue()
185 static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all) in dax_wake_entry() argument
190 wq = dax_entry_waitqueue(xas, entry, &key); in dax_wake_entry()
214 void *entry; in get_unlocked_entry() local
222 entry = xas_find_conflict(xas); in get_unlocked_entry()
223 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) in get_unlocked_entry()
224 return entry; in get_unlocked_entry()
225 if (dax_entry_order(entry) < order) in get_unlocked_entry()
227 if (!dax_is_locked(entry)) in get_unlocked_entry()
228 return entry; in get_unlocked_entry()
230 wq = dax_entry_waitqueue(xas, entry, &ewait.key); in get_unlocked_entry()
246 static void wait_entry_unlocked(struct xa_state *xas, void *entry) in wait_entry_unlocked() argument
254 wq = dax_entry_waitqueue(xas, entry, &ewait.key); in wait_entry_unlocked()
267 static void put_unlocked_entry(struct xa_state *xas, void *entry) in put_unlocked_entry() argument
270 if (entry && !dax_is_conflict(entry)) in put_unlocked_entry()
271 dax_wake_entry(xas, entry, false); in put_unlocked_entry()
279 static void dax_unlock_entry(struct xa_state *xas, void *entry) in dax_unlock_entry() argument
283 BUG_ON(dax_is_locked(entry)); in dax_unlock_entry()
286 old = xas_store(xas, entry); in dax_unlock_entry()
289 dax_wake_entry(xas, entry, false); in dax_unlock_entry()
295 static void *dax_lock_entry(struct xa_state *xas, void *entry) in dax_lock_entry() argument
297 unsigned long v = xa_to_value(entry); in dax_lock_entry()
301 static unsigned long dax_entry_size(void *entry) in dax_entry_size() argument
303 if (dax_is_zero_entry(entry)) in dax_entry_size()
305 else if (dax_is_empty_entry(entry)) in dax_entry_size()
307 else if (dax_is_pmd_entry(entry)) in dax_entry_size()
313 static unsigned long dax_end_pfn(void *entry) in dax_end_pfn() argument
315 return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE; in dax_end_pfn()
322 #define for_each_mapped_pfn(entry, pfn) \ argument
323 for (pfn = dax_to_pfn(entry); \
324 pfn < dax_end_pfn(entry); pfn++)
331 static void dax_associate_entry(void *entry, struct address_space *mapping, in dax_associate_entry() argument
334 unsigned long size = dax_entry_size(entry), pfn, index; in dax_associate_entry()
341 for_each_mapped_pfn(entry, pfn) { in dax_associate_entry()
350 static void dax_disassociate_entry(void *entry, struct address_space *mapping, in dax_disassociate_entry() argument
358 for_each_mapped_pfn(entry, pfn) { in dax_disassociate_entry()
368 static struct page *dax_busy_page(void *entry) in dax_busy_page() argument
372 for_each_mapped_pfn(entry, pfn) { in dax_busy_page()
392 void *entry; in dax_lock_page() local
399 entry = NULL; in dax_lock_page()
410 entry = (void *)~0UL; in dax_lock_page()
421 entry = xas_load(&xas); in dax_lock_page()
422 if (dax_is_locked(entry)) { in dax_lock_page()
424 wait_entry_unlocked(&xas, entry); in dax_lock_page()
428 dax_lock_entry(&xas, entry); in dax_lock_page()
433 return (dax_entry_t)entry; in dax_lock_page()
481 void *entry; in grab_mapping_entry() local
485 entry = get_unlocked_entry(xas, order); in grab_mapping_entry()
487 if (entry) { in grab_mapping_entry()
488 if (dax_is_conflict(entry)) in grab_mapping_entry()
490 if (!xa_is_value(entry)) { in grab_mapping_entry()
496 if (dax_is_pmd_entry(entry) && in grab_mapping_entry()
497 (dax_is_zero_entry(entry) || in grab_mapping_entry()
498 dax_is_empty_entry(entry))) { in grab_mapping_entry()
509 dax_lock_entry(xas, entry); in grab_mapping_entry()
516 if (dax_is_zero_entry(entry)) { in grab_mapping_entry()
525 dax_disassociate_entry(entry, mapping, false); in grab_mapping_entry()
527 dax_wake_entry(xas, entry, true); in grab_mapping_entry()
529 entry = NULL; in grab_mapping_entry()
533 if (entry) { in grab_mapping_entry()
534 dax_lock_entry(xas, entry); in grab_mapping_entry()
540 entry = dax_make_entry(pfn_to_pfn_t(0), flags); in grab_mapping_entry()
541 dax_lock_entry(xas, entry); in grab_mapping_entry()
555 return entry; in grab_mapping_entry()
579 void *entry; in dax_layout_busy_page() local
607 xas_for_each(&xas, entry, ULONG_MAX) { in dax_layout_busy_page()
608 if (WARN_ON_ONCE(!xa_is_value(entry))) in dax_layout_busy_page()
610 if (unlikely(dax_is_locked(entry))) in dax_layout_busy_page()
611 entry = get_unlocked_entry(&xas, 0); in dax_layout_busy_page()
612 if (entry) in dax_layout_busy_page()
613 page = dax_busy_page(entry); in dax_layout_busy_page()
614 put_unlocked_entry(&xas, entry); in dax_layout_busy_page()
635 void *entry; in __dax_invalidate_entry() local
638 entry = get_unlocked_entry(&xas, 0); in __dax_invalidate_entry()
639 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) in __dax_invalidate_entry()
645 dax_disassociate_entry(entry, mapping, trunc); in __dax_invalidate_entry()
650 put_unlocked_entry(&xas, entry); in __dax_invalidate_entry()
718 void *entry, pfn_t pfn, unsigned long flags, bool dirty) in dax_insert_entry() argument
725 if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) { in dax_insert_entry()
728 if (dax_is_pmd_entry(entry)) in dax_insert_entry()
737 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { in dax_insert_entry()
740 dax_disassociate_entry(entry, mapping, false); in dax_insert_entry()
751 WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) | in dax_insert_entry()
753 entry = new_entry; in dax_insert_entry()
762 return entry; in dax_insert_entry()
850 struct address_space *mapping, void *entry) in dax_writeback_one() argument
859 if (WARN_ON(!xa_is_value(entry))) in dax_writeback_one()
862 if (unlikely(dax_is_locked(entry))) { in dax_writeback_one()
863 void *old_entry = entry; in dax_writeback_one()
865 entry = get_unlocked_entry(xas, 0); in dax_writeback_one()
868 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) in dax_writeback_one()
875 if (dax_to_pfn(old_entry) != dax_to_pfn(entry)) in dax_writeback_one()
877 if (WARN_ON_ONCE(dax_is_empty_entry(entry) || in dax_writeback_one()
878 dax_is_zero_entry(entry))) { in dax_writeback_one()
889 dax_lock_entry(xas, entry); in dax_writeback_one()
908 pfn = dax_to_pfn(entry); in dax_writeback_one()
909 count = 1UL << dax_entry_order(entry); in dax_writeback_one()
922 xas_store(xas, entry); in dax_writeback_one()
924 dax_wake_entry(xas, entry, false); in dax_writeback_one()
930 put_unlocked_entry(xas, entry); in dax_writeback_one()
946 void *entry; in dax_writeback_mapping_range() local
965 xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) { in dax_writeback_mapping_range()
966 ret = dax_writeback_one(&xas, dax_dev, mapping, entry); in dax_writeback_mapping_range()
1031 struct address_space *mapping, void **entry, in dax_load_hole() argument
1039 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, in dax_load_hole()
1257 void *entry; in dax_iomap_pte_fault() local
1274 entry = grab_mapping_entry(&xas, mapping, 0); in dax_iomap_pte_fault()
1275 if (xa_is_internal(entry)) { in dax_iomap_pte_fault()
1276 ret = xa_to_internal(entry); in dax_iomap_pte_fault()
1349 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, in dax_iomap_pte_fault()
1367 trace_dax_insert_mapping(inode, vmf, entry); in dax_iomap_pte_fault()
1377 ret = dax_load_hole(&xas, mapping, &entry, vmf); in dax_iomap_pte_fault()
1404 dax_unlock_entry(&xas, entry); in dax_iomap_pte_fault()
1412 struct iomap *iomap, void **entry) in dax_pmd_load_hole() argument
1430 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, in dax_pmd_load_hole()
1453 trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry); in dax_pmd_load_hole()
1459 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry); in dax_pmd_load_hole()
1477 void *entry; in dax_iomap_pmd_fault() local
1526 entry = grab_mapping_entry(&xas, mapping, PMD_ORDER); in dax_iomap_pmd_fault()
1527 if (xa_is_internal(entry)) { in dax_iomap_pmd_fault()
1528 result = xa_to_internal(entry); in dax_iomap_pmd_fault()
1565 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, in dax_iomap_pmd_fault()
1582 trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry); in dax_iomap_pmd_fault()
1589 result = dax_pmd_load_hole(&xas, vmf, &iomap, &entry); in dax_iomap_pmd_fault()
1612 dax_unlock_entry(&xas, entry); in dax_iomap_pmd_fault()
1671 void *entry; in dax_insert_pfn_mkwrite() local
1675 entry = get_unlocked_entry(&xas, order); in dax_insert_pfn_mkwrite()
1677 if (!entry || dax_is_conflict(entry) || in dax_insert_pfn_mkwrite()
1678 (order == 0 && !dax_is_pte_entry(entry))) { in dax_insert_pfn_mkwrite()
1679 put_unlocked_entry(&xas, entry); in dax_insert_pfn_mkwrite()
1686 dax_lock_entry(&xas, entry); in dax_insert_pfn_mkwrite()
1696 dax_unlock_entry(&xas, entry); in dax_insert_pfn_mkwrite()