Lines Matching full:entry
52 /* The order of a PMD entry */
69 * for pages. We use one bit for locking, one bit for the entry size (PMD)
70 * and two more to tell us if the entry is a zero page or an empty entry that
73 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
74 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
83 static unsigned long dax_to_pfn(void *entry) in dax_to_pfn() argument
85 return xa_to_value(entry) >> DAX_SHIFT; in dax_to_pfn()
93 static bool dax_is_locked(void *entry) in dax_is_locked() argument
95 return xa_to_value(entry) & DAX_LOCKED; in dax_is_locked()
98 static unsigned int dax_entry_order(void *entry) in dax_entry_order() argument
100 if (xa_to_value(entry) & DAX_PMD) in dax_entry_order()
105 static unsigned long dax_is_pmd_entry(void *entry) in dax_is_pmd_entry() argument
107 return xa_to_value(entry) & DAX_PMD; in dax_is_pmd_entry()
110 static bool dax_is_pte_entry(void *entry) in dax_is_pte_entry() argument
112 return !(xa_to_value(entry) & DAX_PMD); in dax_is_pte_entry()
115 static int dax_is_zero_entry(void *entry) in dax_is_zero_entry() argument
117 return xa_to_value(entry) & DAX_ZERO_PAGE; in dax_is_zero_entry()
120 static int dax_is_empty_entry(void *entry) in dax_is_empty_entry() argument
122 return xa_to_value(entry) & DAX_EMPTY; in dax_is_empty_entry()
126 * true if the entry that was found is of a smaller order than the entry
129 static bool dax_is_conflict(void *entry) in dax_is_conflict() argument
131 return entry == XA_RETRY_ENTRY; in dax_is_conflict()
135 * DAX page cache entry locking
158 void *entry, struct exceptional_entry_key *key) in dax_entry_waitqueue() argument
164 * If 'entry' is a PMD, align the 'index' that we use for the wait in dax_entry_waitqueue()
168 if (dax_is_pmd_entry(entry)) in dax_entry_waitqueue()
191 * @entry may no longer be the entry at the index in the mapping.
192 * The important information it's conveying is whether the entry at
193 * this index used to be a PMD entry.
195 static void dax_wake_entry(struct xa_state *xas, void *entry, in dax_wake_entry() argument
201 wq = dax_entry_waitqueue(xas, entry, &key); in dax_wake_entry()
204 * Checking for locked entry and prepare_to_wait_exclusive() happens in dax_wake_entry()
205 * under the i_pages lock, ditto for entry handling in our callers. in dax_wake_entry()
206 * So at this point all tasks that could have seen our entry locked in dax_wake_entry()
214 * Look up entry in page cache, wait for it to become unlocked if it
215 * is a DAX entry and return it. The caller must subsequently call
216 * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
217 * if it did. The entry returned may have a larger order than @order.
218 * If @order is larger than the order of the entry found in i_pages, this
219 * function returns a dax_is_conflict entry.
225 void *entry; in get_unlocked_entry() local
233 entry = xas_find_conflict(xas); in get_unlocked_entry()
234 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) in get_unlocked_entry()
235 return entry; in get_unlocked_entry()
236 if (dax_entry_order(entry) < order) in get_unlocked_entry()
238 if (!dax_is_locked(entry)) in get_unlocked_entry()
239 return entry; in get_unlocked_entry()
241 wq = dax_entry_waitqueue(xas, entry, &ewait.key); in get_unlocked_entry()
257 static void wait_entry_unlocked(struct xa_state *xas, void *entry) in wait_entry_unlocked() argument
265 wq = dax_entry_waitqueue(xas, entry, &ewait.key); in wait_entry_unlocked()
268 * path ever successfully retrieves an unlocked entry before an in wait_entry_unlocked()
278 static void put_unlocked_entry(struct xa_state *xas, void *entry, in put_unlocked_entry() argument
281 if (entry && !dax_is_conflict(entry)) in put_unlocked_entry()
282 dax_wake_entry(xas, entry, mode); in put_unlocked_entry()
286 * We used the xa_state to get the entry, but then we locked the entry and
290 static void dax_unlock_entry(struct xa_state *xas, void *entry) in dax_unlock_entry() argument
294 BUG_ON(dax_is_locked(entry)); in dax_unlock_entry()
297 old = xas_store(xas, entry); in dax_unlock_entry()
300 dax_wake_entry(xas, entry, WAKE_NEXT); in dax_unlock_entry()
304 * Return: The entry stored at this location before it was locked.
306 static void *dax_lock_entry(struct xa_state *xas, void *entry) in dax_lock_entry() argument
308 unsigned long v = xa_to_value(entry); in dax_lock_entry()
312 static unsigned long dax_entry_size(void *entry) in dax_entry_size() argument
314 if (dax_is_zero_entry(entry)) in dax_entry_size()
316 else if (dax_is_empty_entry(entry)) in dax_entry_size()
318 else if (dax_is_pmd_entry(entry)) in dax_entry_size()
324 static unsigned long dax_end_pfn(void *entry) in dax_end_pfn() argument
326 return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE; in dax_end_pfn()
330 * Iterate through all mapped pfns represented by an entry, i.e. skip
333 #define for_each_mapped_pfn(entry, pfn) \ argument
334 for (pfn = dax_to_pfn(entry); \
335 pfn < dax_end_pfn(entry); pfn++)
342 static void dax_associate_entry(void *entry, struct address_space *mapping, in dax_associate_entry() argument
345 unsigned long size = dax_entry_size(entry), pfn, index; in dax_associate_entry()
352 for_each_mapped_pfn(entry, pfn) { in dax_associate_entry()
361 static void dax_disassociate_entry(void *entry, struct address_space *mapping, in dax_disassociate_entry() argument
369 for_each_mapped_pfn(entry, pfn) { in dax_disassociate_entry()
379 static struct page *dax_busy_page(void *entry) in dax_busy_page() argument
383 for_each_mapped_pfn(entry, pfn) { in dax_busy_page()
393 * dax_lock_mapping_entry - Lock the DAX entry corresponding to a page
394 * @page: The page whose entry we want to lock
397 * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
403 void *entry; in dax_lock_page() local
410 entry = NULL; in dax_lock_page()
421 entry = (void *)~0UL; in dax_lock_page()
432 entry = xas_load(&xas); in dax_lock_page()
433 if (dax_is_locked(entry)) { in dax_lock_page()
435 wait_entry_unlocked(&xas, entry); in dax_lock_page()
439 dax_lock_entry(&xas, entry); in dax_lock_page()
444 return (dax_entry_t)entry; in dax_lock_page()
459 * Find page cache entry at given index. If it is a DAX entry, return it
460 * with the entry locked. If the page cache doesn't contain an entry at
461 * that index, add a locked empty entry.
463 * When requesting an entry with size DAX_PMD, grab_mapping_entry() will
464 * either return that locked entry or will return VM_FAULT_FALLBACK.
469 * evict PTE entries in order to 'upgrade' them to a PMD entry. A PMD
471 * PTE insertion will cause an existing PMD entry to be unmapped and
477 * the tree, and PTE writes will simply dirty the entire PMD entry.
484 * a VM_FAULT code, encoded as an xarray internal entry. The ERR_PTR values
491 bool pmd_downgrade; /* splitting PMD entry into PTE entries? */ in grab_mapping_entry()
492 void *entry; in grab_mapping_entry() local
497 entry = get_unlocked_entry(xas, order); in grab_mapping_entry()
499 if (entry) { in grab_mapping_entry()
500 if (dax_is_conflict(entry)) in grab_mapping_entry()
502 if (!xa_is_value(entry)) { in grab_mapping_entry()
508 if (dax_is_pmd_entry(entry) && in grab_mapping_entry()
509 (dax_is_zero_entry(entry) || in grab_mapping_entry()
510 dax_is_empty_entry(entry))) { in grab_mapping_entry()
518 * Make sure 'entry' remains valid while we drop in grab_mapping_entry()
521 dax_lock_entry(xas, entry); in grab_mapping_entry()
528 if (dax_is_zero_entry(entry)) { in grab_mapping_entry()
537 dax_disassociate_entry(entry, mapping, false); in grab_mapping_entry()
539 dax_wake_entry(xas, entry, WAKE_ALL); in grab_mapping_entry()
541 entry = NULL; in grab_mapping_entry()
545 if (entry) { in grab_mapping_entry()
546 dax_lock_entry(xas, entry); in grab_mapping_entry()
552 entry = dax_make_entry(pfn_to_pfn_t(0), flags); in grab_mapping_entry()
553 dax_lock_entry(xas, entry); in grab_mapping_entry()
567 return entry; in grab_mapping_entry()
594 void *entry; in dax_layout_busy_page_range() local
630 xas_for_each(&xas, entry, end_idx) { in dax_layout_busy_page_range()
631 if (WARN_ON_ONCE(!xa_is_value(entry))) in dax_layout_busy_page_range()
633 if (unlikely(dax_is_locked(entry))) in dax_layout_busy_page_range()
634 entry = get_unlocked_entry(&xas, 0); in dax_layout_busy_page_range()
635 if (entry) in dax_layout_busy_page_range()
636 page = dax_busy_page(entry); in dax_layout_busy_page_range()
637 put_unlocked_entry(&xas, entry, WAKE_NEXT); in dax_layout_busy_page_range()
664 void *entry; in __dax_invalidate_entry() local
667 entry = get_unlocked_entry(&xas, 0); in __dax_invalidate_entry()
668 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) in __dax_invalidate_entry()
674 dax_disassociate_entry(entry, mapping, trunc); in __dax_invalidate_entry()
679 put_unlocked_entry(&xas, entry, WAKE_ALL); in __dax_invalidate_entry()
685 * Delete DAX entry at @index from @mapping. Wait for it
696 * caller has seen a DAX entry for this index, we better find it in dax_delete_mapping_entry()
704 * Invalidate DAX entry if it is clean.
738 * By this point grab_mapping_entry() has ensured that we have a locked entry
746 void *entry, pfn_t pfn, unsigned long flags, bool dirty) in dax_insert_entry() argument
753 if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) { in dax_insert_entry()
756 if (dax_is_pmd_entry(entry)) in dax_insert_entry()
759 else /* pte entry */ in dax_insert_entry()
765 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { in dax_insert_entry()
768 dax_disassociate_entry(entry, mapping, false); in dax_insert_entry()
771 * Only swap our new entry into the page cache if the current in dax_insert_entry()
772 * entry is a zero page or an empty entry. If a normal PTE or in dax_insert_entry()
773 * PMD entry is already in the cache, we leave it alone. This in dax_insert_entry()
775 * existing entry is a PMD, we will just leave the PMD in the in dax_insert_entry()
779 WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) | in dax_insert_entry()
781 entry = new_entry; in dax_insert_entry()
790 return entry; in dax_insert_entry()
878 struct address_space *mapping, void *entry) in dax_writeback_one() argument
887 if (WARN_ON(!xa_is_value(entry))) in dax_writeback_one()
890 if (unlikely(dax_is_locked(entry))) { in dax_writeback_one()
891 void *old_entry = entry; in dax_writeback_one()
893 entry = get_unlocked_entry(xas, 0); in dax_writeback_one()
895 /* Entry got punched out / reallocated? */ in dax_writeback_one()
896 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) in dax_writeback_one()
899 * Entry got reallocated elsewhere? No need to writeback. in dax_writeback_one()
901 * difference in lockbit or entry type. in dax_writeback_one()
903 if (dax_to_pfn(old_entry) != dax_to_pfn(entry)) in dax_writeback_one()
905 if (WARN_ON_ONCE(dax_is_empty_entry(entry) || in dax_writeback_one()
906 dax_is_zero_entry(entry))) { in dax_writeback_one()
911 /* Another fsync thread may have already done this entry */ in dax_writeback_one()
916 /* Lock the entry to serialize with page faults */ in dax_writeback_one()
917 dax_lock_entry(xas, entry); in dax_writeback_one()
923 * at the entry only under the i_pages lock and once they do that in dax_writeback_one()
924 * they will see the entry locked and wait for it to unlock. in dax_writeback_one()
936 pfn = dax_to_pfn(entry); in dax_writeback_one()
937 count = 1UL << dax_entry_order(entry); in dax_writeback_one()
946 * entry lock. in dax_writeback_one()
950 xas_store(xas, entry); in dax_writeback_one()
952 dax_wake_entry(xas, entry, WAKE_NEXT); in dax_writeback_one()
958 put_unlocked_entry(xas, entry, WAKE_NEXT); in dax_writeback_one()
973 void *entry; in dax_writeback_mapping_range() local
988 xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) { in dax_writeback_mapping_range()
989 ret = dax_writeback_one(&xas, dax_dev, mapping, entry); in dax_writeback_mapping_range()
1053 struct address_space *mapping, void **entry, in dax_load_hole() argument
1061 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, in dax_load_hole()
1275 void *entry; in dax_iomap_pte_fault() local
1292 entry = grab_mapping_entry(&xas, mapping, 0); in dax_iomap_pte_fault()
1293 if (xa_is_internal(entry)) { in dax_iomap_pte_fault()
1294 ret = xa_to_internal(entry); in dax_iomap_pte_fault()
1367 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, in dax_iomap_pte_fault()
1385 trace_dax_insert_mapping(inode, vmf, entry); in dax_iomap_pte_fault()
1395 ret = dax_load_hole(&xas, mapping, &entry, vmf); in dax_iomap_pte_fault()
1422 dax_unlock_entry(&xas, entry); in dax_iomap_pte_fault()
1430 struct iomap *iomap, void **entry) in dax_pmd_load_hole() argument
1448 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, in dax_pmd_load_hole()
1471 trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry); in dax_pmd_load_hole()
1477 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry); in dax_pmd_load_hole()
1496 void *entry; in dax_iomap_pmd_fault() local
1540 * grab_mapping_entry() will make sure we get an empty PMD entry, in dax_iomap_pmd_fault()
1541 * a zero PMD entry or a DAX PMD. If it can't (because a PTE in dax_iomap_pmd_fault()
1542 * entry is already in the array, for instance), it will return in dax_iomap_pmd_fault()
1545 entry = grab_mapping_entry(&xas, mapping, PMD_ORDER); in dax_iomap_pmd_fault()
1546 if (xa_is_internal(entry)) { in dax_iomap_pmd_fault()
1547 result = xa_to_internal(entry); in dax_iomap_pmd_fault()
1585 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, in dax_iomap_pmd_fault()
1602 trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry); in dax_iomap_pmd_fault()
1609 result = dax_pmd_load_hole(&xas, vmf, &iomap, &entry); in dax_iomap_pmd_fault()
1632 dax_unlock_entry(&xas, entry); in dax_iomap_pmd_fault()
1678 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1681 * @order: Order of entry to insert.
1683 * This function inserts a writeable PTE or PMD entry into the page tables
1684 * for an mmaped DAX file. It also marks the page cache entry as dirty.
1691 void *entry; in dax_insert_pfn_mkwrite() local
1695 entry = get_unlocked_entry(&xas, order); in dax_insert_pfn_mkwrite()
1696 /* Did we race with someone splitting entry or so? */ in dax_insert_pfn_mkwrite()
1697 if (!entry || dax_is_conflict(entry) || in dax_insert_pfn_mkwrite()
1698 (order == 0 && !dax_is_pte_entry(entry))) { in dax_insert_pfn_mkwrite()
1699 put_unlocked_entry(&xas, entry, WAKE_NEXT); in dax_insert_pfn_mkwrite()
1706 dax_lock_entry(&xas, entry); in dax_insert_pfn_mkwrite()
1716 dax_unlock_entry(&xas, entry); in dax_insert_pfn_mkwrite()
1724 * @pe_size: Size of entry to be inserted
1729 * table entry.