• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * fs/dax.c - Direct Access filesystem code
4  * Copyright (c) 2013-2014 Intel Corporation
5  * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
6  * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
7  */
8 
9 #include <linux/atomic.h>
10 #include <linux/blkdev.h>
11 #include <linux/buffer_head.h>
12 #include <linux/dax.h>
13 #include <linux/fs.h>
14 #include <linux/genhd.h>
15 #include <linux/highmem.h>
16 #include <linux/memcontrol.h>
17 #include <linux/mm.h>
18 #include <linux/mutex.h>
19 #include <linux/pagevec.h>
20 #include <linux/sched.h>
21 #include <linux/sched/signal.h>
22 #include <linux/uio.h>
23 #include <linux/vmstat.h>
24 #include <linux/pfn_t.h>
25 #include <linux/sizes.h>
26 #include <linux/mmu_notifier.h>
27 #include <linux/iomap.h>
28 #include <asm/pgalloc.h>
29 
30 #define CREATE_TRACE_POINTS
31 #include <trace/events/fs_dax.h>
32 
pe_order(enum page_entry_size pe_size)33 static inline unsigned int pe_order(enum page_entry_size pe_size)
34 {
35 	if (pe_size == PE_SIZE_PTE)
36 		return PAGE_SHIFT - PAGE_SHIFT;
37 	if (pe_size == PE_SIZE_PMD)
38 		return PMD_SHIFT - PAGE_SHIFT;
39 	if (pe_size == PE_SIZE_PUD)
40 		return PUD_SHIFT - PAGE_SHIFT;
41 	return ~0;
42 }
43 
44 /* We choose 4096 entries - same as per-zone page wait tables */
45 #define DAX_WAIT_TABLE_BITS 12
46 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
47 
48 /* The 'colour' (ie low bits) within a PMD of a page offset.  */
49 #define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
50 #define PG_PMD_NR	(PMD_SIZE >> PAGE_SHIFT)
51 
52 /* The order of a PMD entry */
53 #define PMD_ORDER	(PMD_SHIFT - PAGE_SHIFT)
54 
55 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
56 
init_dax_wait_table(void)57 static int __init init_dax_wait_table(void)
58 {
59 	int i;
60 
61 	for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
62 		init_waitqueue_head(wait_table + i);
63 	return 0;
64 }
65 fs_initcall(init_dax_wait_table);
66 
67 /*
68  * DAX pagecache entries use XArray value entries so they can't be mistaken
69  * for pages.  We use one bit for locking, one bit for the entry size (PMD)
70  * and two more to tell us if the entry is a zero page or an empty entry that
71  * is just used for locking.  In total four special bits.
72  *
73  * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
74  * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
75  * block allocation.
76  */
77 #define DAX_SHIFT	(4)
78 #define DAX_LOCKED	(1UL << 0)
79 #define DAX_PMD		(1UL << 1)
80 #define DAX_ZERO_PAGE	(1UL << 2)
81 #define DAX_EMPTY	(1UL << 3)
82 
dax_to_pfn(void * entry)83 static unsigned long dax_to_pfn(void *entry)
84 {
85 	return xa_to_value(entry) >> DAX_SHIFT;
86 }
87 
dax_make_entry(pfn_t pfn,unsigned long flags)88 static void *dax_make_entry(pfn_t pfn, unsigned long flags)
89 {
90 	return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
91 }
92 
dax_is_locked(void * entry)93 static bool dax_is_locked(void *entry)
94 {
95 	return xa_to_value(entry) & DAX_LOCKED;
96 }
97 
dax_entry_order(void * entry)98 static unsigned int dax_entry_order(void *entry)
99 {
100 	if (xa_to_value(entry) & DAX_PMD)
101 		return PMD_ORDER;
102 	return 0;
103 }
104 
dax_is_pmd_entry(void * entry)105 static unsigned long dax_is_pmd_entry(void *entry)
106 {
107 	return xa_to_value(entry) & DAX_PMD;
108 }
109 
dax_is_pte_entry(void * entry)110 static bool dax_is_pte_entry(void *entry)
111 {
112 	return !(xa_to_value(entry) & DAX_PMD);
113 }
114 
dax_is_zero_entry(void * entry)115 static int dax_is_zero_entry(void *entry)
116 {
117 	return xa_to_value(entry) & DAX_ZERO_PAGE;
118 }
119 
dax_is_empty_entry(void * entry)120 static int dax_is_empty_entry(void *entry)
121 {
122 	return xa_to_value(entry) & DAX_EMPTY;
123 }
124 
125 /*
126  * true if the entry that was found is of a smaller order than the entry
127  * we were looking for
128  */
dax_is_conflict(void * entry)129 static bool dax_is_conflict(void *entry)
130 {
131 	return entry == XA_RETRY_ENTRY;
132 }
133 
134 /*
135  * DAX page cache entry locking
136  */
137 struct exceptional_entry_key {
138 	struct xarray *xa;
139 	pgoff_t entry_start;
140 };
141 
142 struct wait_exceptional_entry_queue {
143 	wait_queue_entry_t wait;
144 	struct exceptional_entry_key key;
145 };
146 
dax_entry_waitqueue(struct xa_state * xas,void * entry,struct exceptional_entry_key * key)147 static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
148 		void *entry, struct exceptional_entry_key *key)
149 {
150 	unsigned long hash;
151 	unsigned long index = xas->xa_index;
152 
153 	/*
154 	 * If 'entry' is a PMD, align the 'index' that we use for the wait
155 	 * queue to the start of that PMD.  This ensures that all offsets in
156 	 * the range covered by the PMD map to the same bit lock.
157 	 */
158 	if (dax_is_pmd_entry(entry))
159 		index &= ~PG_PMD_COLOUR;
160 	key->xa = xas->xa;
161 	key->entry_start = index;
162 
163 	hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS);
164 	return wait_table + hash;
165 }
166 
wake_exceptional_entry_func(wait_queue_entry_t * wait,unsigned int mode,int sync,void * keyp)167 static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
168 		unsigned int mode, int sync, void *keyp)
169 {
170 	struct exceptional_entry_key *key = keyp;
171 	struct wait_exceptional_entry_queue *ewait =
172 		container_of(wait, struct wait_exceptional_entry_queue, wait);
173 
174 	if (key->xa != ewait->key.xa ||
175 	    key->entry_start != ewait->key.entry_start)
176 		return 0;
177 	return autoremove_wake_function(wait, mode, sync, NULL);
178 }
179 
180 /*
181  * @entry may no longer be the entry at the index in the mapping.
182  * The important information it's conveying is whether the entry at
183  * this index used to be a PMD entry.
184  */
dax_wake_entry(struct xa_state * xas,void * entry,bool wake_all)185 static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
186 {
187 	struct exceptional_entry_key key;
188 	wait_queue_head_t *wq;
189 
190 	wq = dax_entry_waitqueue(xas, entry, &key);
191 
192 	/*
193 	 * Checking for locked entry and prepare_to_wait_exclusive() happens
194 	 * under the i_pages lock, ditto for entry handling in our callers.
195 	 * So at this point all tasks that could have seen our entry locked
196 	 * must be in the waitqueue and the following check will see them.
197 	 */
198 	if (waitqueue_active(wq))
199 		__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
200 }
201 
202 /*
203  * Look up entry in page cache, wait for it to become unlocked if it
204  * is a DAX entry and return it.  The caller must subsequently call
205  * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
206  * if it did.  The entry returned may have a larger order than @order.
207  * If @order is larger than the order of the entry found in i_pages, this
208  * function returns a dax_is_conflict entry.
209  *
210  * Must be called with the i_pages lock held.
211  */
get_unlocked_entry(struct xa_state * xas,unsigned int order)212 static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
213 {
214 	void *entry;
215 	struct wait_exceptional_entry_queue ewait;
216 	wait_queue_head_t *wq;
217 
218 	init_wait(&ewait.wait);
219 	ewait.wait.func = wake_exceptional_entry_func;
220 
221 	for (;;) {
222 		entry = xas_find_conflict(xas);
223 		if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
224 			return entry;
225 		if (dax_entry_order(entry) < order)
226 			return XA_RETRY_ENTRY;
227 		if (!dax_is_locked(entry))
228 			return entry;
229 
230 		wq = dax_entry_waitqueue(xas, entry, &ewait.key);
231 		prepare_to_wait_exclusive(wq, &ewait.wait,
232 					  TASK_UNINTERRUPTIBLE);
233 		xas_unlock_irq(xas);
234 		xas_reset(xas);
235 		schedule();
236 		finish_wait(wq, &ewait.wait);
237 		xas_lock_irq(xas);
238 	}
239 }
240 
241 /*
242  * The only thing keeping the address space around is the i_pages lock
243  * (it's cycled in clear_inode() after removing the entries from i_pages)
244  * After we call xas_unlock_irq(), we cannot touch xas->xa.
245  */
wait_entry_unlocked(struct xa_state * xas,void * entry)246 static void wait_entry_unlocked(struct xa_state *xas, void *entry)
247 {
248 	struct wait_exceptional_entry_queue ewait;
249 	wait_queue_head_t *wq;
250 
251 	init_wait(&ewait.wait);
252 	ewait.wait.func = wake_exceptional_entry_func;
253 
254 	wq = dax_entry_waitqueue(xas, entry, &ewait.key);
255 	/*
256 	 * Unlike get_unlocked_entry() there is no guarantee that this
257 	 * path ever successfully retrieves an unlocked entry before an
258 	 * inode dies. Perform a non-exclusive wait in case this path
259 	 * never successfully performs its own wake up.
260 	 */
261 	prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
262 	xas_unlock_irq(xas);
263 	schedule();
264 	finish_wait(wq, &ewait.wait);
265 }
266 
put_unlocked_entry(struct xa_state * xas,void * entry)267 static void put_unlocked_entry(struct xa_state *xas, void *entry)
268 {
269 	/* If we were the only waiter woken, wake the next one */
270 	if (entry && !dax_is_conflict(entry))
271 		dax_wake_entry(xas, entry, false);
272 }
273 
274 /*
275  * We used the xa_state to get the entry, but then we locked the entry and
276  * dropped the xa_lock, so we know the xa_state is stale and must be reset
277  * before use.
278  */
dax_unlock_entry(struct xa_state * xas,void * entry)279 static void dax_unlock_entry(struct xa_state *xas, void *entry)
280 {
281 	void *old;
282 
283 	BUG_ON(dax_is_locked(entry));
284 	xas_reset(xas);
285 	xas_lock_irq(xas);
286 	old = xas_store(xas, entry);
287 	xas_unlock_irq(xas);
288 	BUG_ON(!dax_is_locked(old));
289 	dax_wake_entry(xas, entry, false);
290 }
291 
292 /*
293  * Return: The entry stored at this location before it was locked.
294  */
dax_lock_entry(struct xa_state * xas,void * entry)295 static void *dax_lock_entry(struct xa_state *xas, void *entry)
296 {
297 	unsigned long v = xa_to_value(entry);
298 	return xas_store(xas, xa_mk_value(v | DAX_LOCKED));
299 }
300 
dax_entry_size(void * entry)301 static unsigned long dax_entry_size(void *entry)
302 {
303 	if (dax_is_zero_entry(entry))
304 		return 0;
305 	else if (dax_is_empty_entry(entry))
306 		return 0;
307 	else if (dax_is_pmd_entry(entry))
308 		return PMD_SIZE;
309 	else
310 		return PAGE_SIZE;
311 }
312 
dax_end_pfn(void * entry)313 static unsigned long dax_end_pfn(void *entry)
314 {
315 	return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
316 }
317 
318 /*
319  * Iterate through all mapped pfns represented by an entry, i.e. skip
320  * 'empty' and 'zero' entries.
321  */
322 #define for_each_mapped_pfn(entry, pfn) \
323 	for (pfn = dax_to_pfn(entry); \
324 			pfn < dax_end_pfn(entry); pfn++)
325 
326 /*
327  * TODO: for reflink+dax we need a way to associate a single page with
328  * multiple address_space instances at different linear_page_index()
329  * offsets.
330  */
dax_associate_entry(void * entry,struct address_space * mapping,struct vm_area_struct * vma,unsigned long address)331 static void dax_associate_entry(void *entry, struct address_space *mapping,
332 		struct vm_area_struct *vma, unsigned long address)
333 {
334 	unsigned long size = dax_entry_size(entry), pfn, index;
335 	int i = 0;
336 
337 	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
338 		return;
339 
340 	index = linear_page_index(vma, address & ~(size - 1));
341 	for_each_mapped_pfn(entry, pfn) {
342 		struct page *page = pfn_to_page(pfn);
343 
344 		WARN_ON_ONCE(page->mapping);
345 		page->mapping = mapping;
346 		page->index = index + i++;
347 	}
348 }
349 
dax_disassociate_entry(void * entry,struct address_space * mapping,bool trunc)350 static void dax_disassociate_entry(void *entry, struct address_space *mapping,
351 		bool trunc)
352 {
353 	unsigned long pfn;
354 
355 	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
356 		return;
357 
358 	for_each_mapped_pfn(entry, pfn) {
359 		struct page *page = pfn_to_page(pfn);
360 
361 		WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
362 		WARN_ON_ONCE(page->mapping && page->mapping != mapping);
363 		page->mapping = NULL;
364 		page->index = 0;
365 	}
366 }
367 
dax_busy_page(void * entry)368 static struct page *dax_busy_page(void *entry)
369 {
370 	unsigned long pfn;
371 
372 	for_each_mapped_pfn(entry, pfn) {
373 		struct page *page = pfn_to_page(pfn);
374 
375 		if (page_ref_count(page) > 1)
376 			return page;
377 	}
378 	return NULL;
379 }
380 
381 /*
382  * dax_lock_mapping_entry - Lock the DAX entry corresponding to a page
383  * @page: The page whose entry we want to lock
384  *
385  * Context: Process context.
386  * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
387  * not be locked.
388  */
dax_lock_page(struct page * page)389 dax_entry_t dax_lock_page(struct page *page)
390 {
391 	XA_STATE(xas, NULL, 0);
392 	void *entry;
393 
394 	/* Ensure page->mapping isn't freed while we look at it */
395 	rcu_read_lock();
396 	for (;;) {
397 		struct address_space *mapping = READ_ONCE(page->mapping);
398 
399 		entry = NULL;
400 		if (!mapping || !dax_mapping(mapping))
401 			break;
402 
403 		/*
404 		 * In the device-dax case there's no need to lock, a
405 		 * struct dev_pagemap pin is sufficient to keep the
406 		 * inode alive, and we assume we have dev_pagemap pin
407 		 * otherwise we would not have a valid pfn_to_page()
408 		 * translation.
409 		 */
410 		entry = (void *)~0UL;
411 		if (S_ISCHR(mapping->host->i_mode))
412 			break;
413 
414 		xas.xa = &mapping->i_pages;
415 		xas_lock_irq(&xas);
416 		if (mapping != page->mapping) {
417 			xas_unlock_irq(&xas);
418 			continue;
419 		}
420 		xas_set(&xas, page->index);
421 		entry = xas_load(&xas);
422 		if (dax_is_locked(entry)) {
423 			rcu_read_unlock();
424 			wait_entry_unlocked(&xas, entry);
425 			rcu_read_lock();
426 			continue;
427 		}
428 		dax_lock_entry(&xas, entry);
429 		xas_unlock_irq(&xas);
430 		break;
431 	}
432 	rcu_read_unlock();
433 	return (dax_entry_t)entry;
434 }
435 
dax_unlock_page(struct page * page,dax_entry_t cookie)436 void dax_unlock_page(struct page *page, dax_entry_t cookie)
437 {
438 	struct address_space *mapping = page->mapping;
439 	XA_STATE(xas, &mapping->i_pages, page->index);
440 
441 	if (S_ISCHR(mapping->host->i_mode))
442 		return;
443 
444 	dax_unlock_entry(&xas, (void *)cookie);
445 }
446 
447 /*
448  * Find page cache entry at given index. If it is a DAX entry, return it
449  * with the entry locked. If the page cache doesn't contain an entry at
450  * that index, add a locked empty entry.
451  *
452  * When requesting an entry with size DAX_PMD, grab_mapping_entry() will
453  * either return that locked entry or will return VM_FAULT_FALLBACK.
454  * This will happen if there are any PTE entries within the PMD range
455  * that we are requesting.
456  *
457  * We always favor PTE entries over PMD entries. There isn't a flow where we
458  * evict PTE entries in order to 'upgrade' them to a PMD entry.  A PMD
459  * insertion will fail if it finds any PTE entries already in the tree, and a
460  * PTE insertion will cause an existing PMD entry to be unmapped and
461  * downgraded to PTE entries.  This happens for both PMD zero pages as
462  * well as PMD empty entries.
463  *
464  * The exception to this downgrade path is for PMD entries that have
465  * real storage backing them.  We will leave these real PMD entries in
466  * the tree, and PTE writes will simply dirty the entire PMD entry.
467  *
468  * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
469  * persistent memory the benefit is doubtful. We can add that later if we can
470  * show it helps.
471  *
472  * On error, this function does not return an ERR_PTR.  Instead it returns
473  * a VM_FAULT code, encoded as an xarray internal entry.  The ERR_PTR values
474  * overlap with xarray value entries.
475  */
grab_mapping_entry(struct xa_state * xas,struct address_space * mapping,unsigned int order)476 static void *grab_mapping_entry(struct xa_state *xas,
477 		struct address_space *mapping, unsigned int order)
478 {
479 	unsigned long index = xas->xa_index;
480 	bool pmd_downgrade;	/* splitting PMD entry into PTE entries? */
481 	void *entry;
482 
483 retry:
484 	pmd_downgrade = false;
485 	xas_lock_irq(xas);
486 	entry = get_unlocked_entry(xas, order);
487 
488 	if (entry) {
489 		if (dax_is_conflict(entry))
490 			goto fallback;
491 		if (!xa_is_value(entry)) {
492 			xas_set_err(xas, EIO);
493 			goto out_unlock;
494 		}
495 
496 		if (order == 0) {
497 			if (dax_is_pmd_entry(entry) &&
498 			    (dax_is_zero_entry(entry) ||
499 			     dax_is_empty_entry(entry))) {
500 				pmd_downgrade = true;
501 			}
502 		}
503 	}
504 
505 	if (pmd_downgrade) {
506 		/*
507 		 * Make sure 'entry' remains valid while we drop
508 		 * the i_pages lock.
509 		 */
510 		dax_lock_entry(xas, entry);
511 
512 		/*
513 		 * Besides huge zero pages the only other thing that gets
514 		 * downgraded are empty entries which don't need to be
515 		 * unmapped.
516 		 */
517 		if (dax_is_zero_entry(entry)) {
518 			xas_unlock_irq(xas);
519 			unmap_mapping_pages(mapping,
520 					xas->xa_index & ~PG_PMD_COLOUR,
521 					PG_PMD_NR, false);
522 			xas_reset(xas);
523 			xas_lock_irq(xas);
524 		}
525 
526 		dax_disassociate_entry(entry, mapping, false);
527 		xas_store(xas, NULL);	/* undo the PMD join */
528 		dax_wake_entry(xas, entry, true);
529 		mapping->nrexceptional--;
530 		entry = NULL;
531 		xas_set(xas, index);
532 	}
533 
534 	if (entry) {
535 		dax_lock_entry(xas, entry);
536 	} else {
537 		unsigned long flags = DAX_EMPTY;
538 
539 		if (order > 0)
540 			flags |= DAX_PMD;
541 		entry = dax_make_entry(pfn_to_pfn_t(0), flags);
542 		dax_lock_entry(xas, entry);
543 		if (xas_error(xas))
544 			goto out_unlock;
545 		mapping->nrexceptional++;
546 	}
547 
548 out_unlock:
549 	xas_unlock_irq(xas);
550 	if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM))
551 		goto retry;
552 	if (xas->xa_node == XA_ERROR(-ENOMEM))
553 		return xa_mk_internal(VM_FAULT_OOM);
554 	if (xas_error(xas))
555 		return xa_mk_internal(VM_FAULT_SIGBUS);
556 	return entry;
557 fallback:
558 	xas_unlock_irq(xas);
559 	return xa_mk_internal(VM_FAULT_FALLBACK);
560 }
561 
562 /**
563  * dax_layout_busy_page - find first pinned page in @mapping
564  * @mapping: address space to scan for a page with ref count > 1
565  *
566  * DAX requires ZONE_DEVICE mapped pages. These pages are never
567  * 'onlined' to the page allocator so they are considered idle when
568  * page->count == 1. A filesystem uses this interface to determine if
569  * any page in the mapping is busy, i.e. for DMA, or other
570  * get_user_pages() usages.
571  *
572  * It is expected that the filesystem is holding locks to block the
573  * establishment of new mappings in this address_space. I.e. it expects
574  * to be able to run unmap_mapping_range() and subsequently not race
575  * mapping_mapped() becoming true.
576  */
dax_layout_busy_page(struct address_space * mapping)577 struct page *dax_layout_busy_page(struct address_space *mapping)
578 {
579 	XA_STATE(xas, &mapping->i_pages, 0);
580 	void *entry;
581 	unsigned int scanned = 0;
582 	struct page *page = NULL;
583 
584 	/*
585 	 * In the 'limited' case get_user_pages() for dax is disabled.
586 	 */
587 	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
588 		return NULL;
589 
590 	if (!dax_mapping(mapping) || !mapping_mapped(mapping))
591 		return NULL;
592 
593 	/*
594 	 * If we race get_user_pages_fast() here either we'll see the
595 	 * elevated page count in the iteration and wait, or
596 	 * get_user_pages_fast() will see that the page it took a reference
597 	 * against is no longer mapped in the page tables and bail to the
598 	 * get_user_pages() slow path.  The slow path is protected by
599 	 * pte_lock() and pmd_lock(). New references are not taken without
600 	 * holding those locks, and unmap_mapping_range() will not zero the
601 	 * pte or pmd without holding the respective lock, so we are
602 	 * guaranteed to either see new references or prevent new
603 	 * references from being established.
604 	 */
605 	unmap_mapping_range(mapping, 0, 0, 0);
606 
607 	xas_lock_irq(&xas);
608 	xas_for_each(&xas, entry, ULONG_MAX) {
609 		if (WARN_ON_ONCE(!xa_is_value(entry)))
610 			continue;
611 		if (unlikely(dax_is_locked(entry)))
612 			entry = get_unlocked_entry(&xas, 0);
613 		if (entry)
614 			page = dax_busy_page(entry);
615 		put_unlocked_entry(&xas, entry);
616 		if (page)
617 			break;
618 		if (++scanned % XA_CHECK_SCHED)
619 			continue;
620 
621 		xas_pause(&xas);
622 		xas_unlock_irq(&xas);
623 		cond_resched();
624 		xas_lock_irq(&xas);
625 	}
626 	xas_unlock_irq(&xas);
627 	return page;
628 }
629 EXPORT_SYMBOL_GPL(dax_layout_busy_page);
630 
__dax_invalidate_entry(struct address_space * mapping,pgoff_t index,bool trunc)631 static int __dax_invalidate_entry(struct address_space *mapping,
632 					  pgoff_t index, bool trunc)
633 {
634 	XA_STATE(xas, &mapping->i_pages, index);
635 	int ret = 0;
636 	void *entry;
637 
638 	xas_lock_irq(&xas);
639 	entry = get_unlocked_entry(&xas, 0);
640 	if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
641 		goto out;
642 	if (!trunc &&
643 	    (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) ||
644 	     xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE)))
645 		goto out;
646 	dax_disassociate_entry(entry, mapping, trunc);
647 	xas_store(&xas, NULL);
648 	mapping->nrexceptional--;
649 	ret = 1;
650 out:
651 	put_unlocked_entry(&xas, entry);
652 	xas_unlock_irq(&xas);
653 	return ret;
654 }
655 
656 /*
657  * Delete DAX entry at @index from @mapping.  Wait for it
658  * to be unlocked before deleting it.
659  */
dax_delete_mapping_entry(struct address_space * mapping,pgoff_t index)660 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
661 {
662 	int ret = __dax_invalidate_entry(mapping, index, true);
663 
664 	/*
665 	 * This gets called from truncate / punch_hole path. As such, the caller
666 	 * must hold locks protecting against concurrent modifications of the
667 	 * page cache (usually fs-private i_mmap_sem for writing). Since the
668 	 * caller has seen a DAX entry for this index, we better find it
669 	 * at that index as well...
670 	 */
671 	WARN_ON_ONCE(!ret);
672 	return ret;
673 }
674 
675 /*
676  * Invalidate DAX entry if it is clean.
677  */
dax_invalidate_mapping_entry_sync(struct address_space * mapping,pgoff_t index)678 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
679 				      pgoff_t index)
680 {
681 	return __dax_invalidate_entry(mapping, index, false);
682 }
683 
copy_user_dax(struct block_device * bdev,struct dax_device * dax_dev,sector_t sector,size_t size,struct page * to,unsigned long vaddr)684 static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
685 		sector_t sector, size_t size, struct page *to,
686 		unsigned long vaddr)
687 {
688 	void *vto, *kaddr;
689 	pgoff_t pgoff;
690 	long rc;
691 	int id;
692 
693 	rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
694 	if (rc)
695 		return rc;
696 
697 	id = dax_read_lock();
698 	rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, NULL);
699 	if (rc < 0) {
700 		dax_read_unlock(id);
701 		return rc;
702 	}
703 	vto = kmap_atomic(to);
704 	copy_user_page(vto, (void __force *)kaddr, vaddr, to);
705 	kunmap_atomic(vto);
706 	dax_read_unlock(id);
707 	return 0;
708 }
709 
710 /*
711  * By this point grab_mapping_entry() has ensured that we have a locked entry
712  * of the appropriate size so we don't have to worry about downgrading PMDs to
713  * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
714  * already in the tree, we will skip the insertion and just dirty the PMD as
715  * appropriate.
716  */
dax_insert_entry(struct xa_state * xas,struct address_space * mapping,struct vm_fault * vmf,void * entry,pfn_t pfn,unsigned long flags,bool dirty)717 static void *dax_insert_entry(struct xa_state *xas,
718 		struct address_space *mapping, struct vm_fault *vmf,
719 		void *entry, pfn_t pfn, unsigned long flags, bool dirty)
720 {
721 	void *new_entry = dax_make_entry(pfn, flags);
722 
723 	if (dirty)
724 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
725 
726 	if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) {
727 		unsigned long index = xas->xa_index;
728 		/* we are replacing a zero page with block mapping */
729 		if (dax_is_pmd_entry(entry))
730 			unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
731 					PG_PMD_NR, false);
732 		else /* pte entry */
733 			unmap_mapping_pages(mapping, index, 1, false);
734 	}
735 
736 	xas_reset(xas);
737 	xas_lock_irq(xas);
738 	if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
739 		void *old;
740 
741 		dax_disassociate_entry(entry, mapping, false);
742 		dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
743 		/*
744 		 * Only swap our new entry into the page cache if the current
745 		 * entry is a zero page or an empty entry.  If a normal PTE or
746 		 * PMD entry is already in the cache, we leave it alone.  This
747 		 * means that if we are trying to insert a PTE and the
748 		 * existing entry is a PMD, we will just leave the PMD in the
749 		 * tree and dirty it if necessary.
750 		 */
751 		old = dax_lock_entry(xas, new_entry);
752 		WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) |
753 					DAX_LOCKED));
754 		entry = new_entry;
755 	} else {
756 		xas_load(xas);	/* Walk the xa_state */
757 	}
758 
759 	if (dirty)
760 		xas_set_mark(xas, PAGECACHE_TAG_DIRTY);
761 
762 	xas_unlock_irq(xas);
763 	return entry;
764 }
765 
766 static inline
pgoff_address(pgoff_t pgoff,struct vm_area_struct * vma)767 unsigned long pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
768 {
769 	unsigned long address;
770 
771 	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
772 	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
773 	return address;
774 }
775 
776 /* Walk all mappings of a given index of a file and writeprotect them */
dax_entry_mkclean(struct address_space * mapping,pgoff_t index,unsigned long pfn)777 static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
778 		unsigned long pfn)
779 {
780 	struct vm_area_struct *vma;
781 	pte_t pte, *ptep = NULL;
782 	pmd_t *pmdp = NULL;
783 	spinlock_t *ptl;
784 
785 	i_mmap_lock_read(mapping);
786 	vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
787 		struct mmu_notifier_range range;
788 		unsigned long address;
789 
790 		cond_resched();
791 
792 		if (!(vma->vm_flags & VM_SHARED))
793 			continue;
794 
795 		address = pgoff_address(index, vma);
796 
797 		/*
798 		 * follow_invalidate_pte() will use the range to call
799 		 * mmu_notifier_invalidate_range_start() on our behalf before
800 		 * taking any lock.
801 		 */
802 		if (follow_invalidate_pte(vma->vm_mm, address, &range, &ptep,
803 					  &pmdp, &ptl))
804 			continue;
805 
806 		/*
807 		 * No need to call mmu_notifier_invalidate_range() as we are
808 		 * downgrading page table protection not changing it to point
809 		 * to a new page.
810 		 *
811 		 * See Documentation/vm/mmu_notifier.rst
812 		 */
813 		if (pmdp) {
814 #ifdef CONFIG_FS_DAX_PMD
815 			pmd_t pmd;
816 
817 			if (pfn != pmd_pfn(*pmdp))
818 				goto unlock_pmd;
819 			if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
820 				goto unlock_pmd;
821 
822 			flush_cache_range(vma, address,
823 					  address + HPAGE_PMD_SIZE);
824 			pmd = pmdp_invalidate(vma, address, pmdp);
825 			pmd = pmd_wrprotect(pmd);
826 			pmd = pmd_mkclean(pmd);
827 			set_pmd_at(vma->vm_mm, address, pmdp, pmd);
828 unlock_pmd:
829 #endif
830 			spin_unlock(ptl);
831 		} else {
832 			if (pfn != pte_pfn(*ptep))
833 				goto unlock_pte;
834 			if (!pte_dirty(*ptep) && !pte_write(*ptep))
835 				goto unlock_pte;
836 
837 			flush_cache_page(vma, address, pfn);
838 			pte = ptep_clear_flush(vma, address, ptep);
839 			pte = pte_wrprotect(pte);
840 			pte = pte_mkclean(pte);
841 			set_pte_at(vma->vm_mm, address, ptep, pte);
842 unlock_pte:
843 			pte_unmap_unlock(ptep, ptl);
844 		}
845 
846 		mmu_notifier_invalidate_range_end(&range);
847 	}
848 	i_mmap_unlock_read(mapping);
849 }
850 
dax_writeback_one(struct xa_state * xas,struct dax_device * dax_dev,struct address_space * mapping,void * entry)851 static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
852 		struct address_space *mapping, void *entry)
853 {
854 	unsigned long pfn, index, count;
855 	long ret = 0;
856 
857 	/*
858 	 * A page got tagged dirty in DAX mapping? Something is seriously
859 	 * wrong.
860 	 */
861 	if (WARN_ON(!xa_is_value(entry)))
862 		return -EIO;
863 
864 	if (unlikely(dax_is_locked(entry))) {
865 		void *old_entry = entry;
866 
867 		entry = get_unlocked_entry(xas, 0);
868 
869 		/* Entry got punched out / reallocated? */
870 		if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
871 			goto put_unlocked;
872 		/*
873 		 * Entry got reallocated elsewhere? No need to writeback.
874 		 * We have to compare pfns as we must not bail out due to
875 		 * difference in lockbit or entry type.
876 		 */
877 		if (dax_to_pfn(old_entry) != dax_to_pfn(entry))
878 			goto put_unlocked;
879 		if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
880 					dax_is_zero_entry(entry))) {
881 			ret = -EIO;
882 			goto put_unlocked;
883 		}
884 
885 		/* Another fsync thread may have already done this entry */
886 		if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE))
887 			goto put_unlocked;
888 	}
889 
890 	/* Lock the entry to serialize with page faults */
891 	dax_lock_entry(xas, entry);
892 
893 	/*
894 	 * We can clear the tag now but we have to be careful so that concurrent
895 	 * dax_writeback_one() calls for the same index cannot finish before we
896 	 * actually flush the caches. This is achieved as the calls will look
897 	 * at the entry only under the i_pages lock and once they do that
898 	 * they will see the entry locked and wait for it to unlock.
899 	 */
900 	xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE);
901 	xas_unlock_irq(xas);
902 
903 	/*
904 	 * If dax_writeback_mapping_range() was given a wbc->range_start
905 	 * in the middle of a PMD, the 'index' we use needs to be
906 	 * aligned to the start of the PMD.
907 	 * This allows us to flush for PMD_SIZE and not have to worry about
908 	 * partial PMD writebacks.
909 	 */
910 	pfn = dax_to_pfn(entry);
911 	count = 1UL << dax_entry_order(entry);
912 	index = xas->xa_index & ~(count - 1);
913 
914 	dax_entry_mkclean(mapping, index, pfn);
915 	dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE);
916 	/*
917 	 * After we have flushed the cache, we can clear the dirty tag. There
918 	 * cannot be new dirty data in the pfn after the flush has completed as
919 	 * the pfn mappings are writeprotected and fault waits for mapping
920 	 * entry lock.
921 	 */
922 	xas_reset(xas);
923 	xas_lock_irq(xas);
924 	xas_store(xas, entry);
925 	xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
926 	dax_wake_entry(xas, entry, false);
927 
928 	trace_dax_writeback_one(mapping->host, index, count);
929 	return ret;
930 
931  put_unlocked:
932 	put_unlocked_entry(xas, entry);
933 	return ret;
934 }
935 
936 /*
937  * Flush the mapping to the persistent domain within the byte range of [start,
938  * end]. This is required by data integrity operations to ensure file data is
939  * on persistent storage prior to completion of the operation.
940  */
dax_writeback_mapping_range(struct address_space * mapping,struct block_device * bdev,struct writeback_control * wbc)941 int dax_writeback_mapping_range(struct address_space *mapping,
942 		struct block_device *bdev, struct writeback_control *wbc)
943 {
944 	XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT);
945 	struct inode *inode = mapping->host;
946 	pgoff_t end_index = wbc->range_end >> PAGE_SHIFT;
947 	struct dax_device *dax_dev;
948 	void *entry;
949 	int ret = 0;
950 	unsigned int scanned = 0;
951 
952 	if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
953 		return -EIO;
954 
955 	if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
956 		return 0;
957 
958 	dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
959 	if (!dax_dev)
960 		return -EIO;
961 
962 	trace_dax_writeback_range(inode, xas.xa_index, end_index);
963 
964 	tag_pages_for_writeback(mapping, xas.xa_index, end_index);
965 
966 	xas_lock_irq(&xas);
967 	xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) {
968 		ret = dax_writeback_one(&xas, dax_dev, mapping, entry);
969 		if (ret < 0) {
970 			mapping_set_error(mapping, ret);
971 			break;
972 		}
973 		if (++scanned % XA_CHECK_SCHED)
974 			continue;
975 
976 		xas_pause(&xas);
977 		xas_unlock_irq(&xas);
978 		cond_resched();
979 		xas_lock_irq(&xas);
980 	}
981 	xas_unlock_irq(&xas);
982 	put_dax(dax_dev);
983 	trace_dax_writeback_range_done(inode, xas.xa_index, end_index);
984 	return ret;
985 }
986 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
987 
dax_iomap_sector(struct iomap * iomap,loff_t pos)988 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
989 {
990 	return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
991 }
992 
dax_iomap_pfn(struct iomap * iomap,loff_t pos,size_t size,pfn_t * pfnp)993 static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
994 			 pfn_t *pfnp)
995 {
996 	const sector_t sector = dax_iomap_sector(iomap, pos);
997 	pgoff_t pgoff;
998 	int id, rc;
999 	long length;
1000 
1001 	rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff);
1002 	if (rc)
1003 		return rc;
1004 	id = dax_read_lock();
1005 	length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
1006 				   NULL, pfnp);
1007 	if (length < 0) {
1008 		rc = length;
1009 		goto out;
1010 	}
1011 	rc = -EINVAL;
1012 	if (PFN_PHYS(length) < size)
1013 		goto out;
1014 	if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
1015 		goto out;
1016 	/* For larger pages we need devmap */
1017 	if (length > 1 && !pfn_t_devmap(*pfnp))
1018 		goto out;
1019 	rc = 0;
1020 out:
1021 	dax_read_unlock(id);
1022 	return rc;
1023 }
1024 
1025 /*
1026  * The user has performed a load from a hole in the file.  Allocating a new
1027  * page in the file would cause excessive storage usage for workloads with
1028  * sparse files.  Instead we insert a read-only mapping of the 4k zero page.
1029  * If this page is ever written to we will re-fault and change the mapping to
1030  * point to real DAX storage instead.
1031  */
dax_load_hole(struct xa_state * xas,struct address_space * mapping,void ** entry,struct vm_fault * vmf)1032 static vm_fault_t dax_load_hole(struct xa_state *xas,
1033 		struct address_space *mapping, void **entry,
1034 		struct vm_fault *vmf)
1035 {
1036 	struct inode *inode = mapping->host;
1037 	unsigned long vaddr = vmf->address;
1038 	pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
1039 	vm_fault_t ret;
1040 
1041 	*entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
1042 			DAX_ZERO_PAGE, false);
1043 
1044 	ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
1045 	trace_dax_load_hole(inode, vmf, ret);
1046 	return ret;
1047 }
1048 
dax_range_is_aligned(struct block_device * bdev,unsigned int offset,unsigned int length)1049 static bool dax_range_is_aligned(struct block_device *bdev,
1050 				 unsigned int offset, unsigned int length)
1051 {
1052 	unsigned short sector_size = bdev_logical_block_size(bdev);
1053 
1054 	if (!IS_ALIGNED(offset, sector_size))
1055 		return false;
1056 	if (!IS_ALIGNED(length, sector_size))
1057 		return false;
1058 
1059 	return true;
1060 }
1061 
__dax_zero_page_range(struct block_device * bdev,struct dax_device * dax_dev,sector_t sector,unsigned int offset,unsigned int size)1062 int __dax_zero_page_range(struct block_device *bdev,
1063 		struct dax_device *dax_dev, sector_t sector,
1064 		unsigned int offset, unsigned int size)
1065 {
1066 	if (dax_range_is_aligned(bdev, offset, size)) {
1067 		sector_t start_sector = sector + (offset >> 9);
1068 
1069 		return blkdev_issue_zeroout(bdev, start_sector,
1070 				size >> 9, GFP_NOFS, 0);
1071 	} else {
1072 		pgoff_t pgoff;
1073 		long rc, id;
1074 		void *kaddr;
1075 
1076 		rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
1077 		if (rc)
1078 			return rc;
1079 
1080 		id = dax_read_lock();
1081 		rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
1082 		if (rc < 0) {
1083 			dax_read_unlock(id);
1084 			return rc;
1085 		}
1086 		memset(kaddr + offset, 0, size);
1087 		dax_flush(dax_dev, kaddr + offset, size);
1088 		dax_read_unlock(id);
1089 	}
1090 	return 0;
1091 }
1092 EXPORT_SYMBOL_GPL(__dax_zero_page_range);
1093 
1094 static loff_t
dax_iomap_actor(struct inode * inode,loff_t pos,loff_t length,void * data,struct iomap * iomap)1095 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1096 		struct iomap *iomap)
1097 {
1098 	struct block_device *bdev = iomap->bdev;
1099 	struct dax_device *dax_dev = iomap->dax_dev;
1100 	struct iov_iter *iter = data;
1101 	loff_t end = pos + length, done = 0;
1102 	ssize_t ret = 0;
1103 	size_t xfer;
1104 	int id;
1105 
1106 	if (iov_iter_rw(iter) == READ) {
1107 		end = min(end, i_size_read(inode));
1108 		if (pos >= end)
1109 			return 0;
1110 
1111 		if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1112 			return iov_iter_zero(min(length, end - pos), iter);
1113 	}
1114 
1115 	if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1116 		return -EIO;
1117 
1118 	/*
1119 	 * Write can allocate block for an area which has a hole page mapped
1120 	 * into page tables. We have to tear down these mappings so that data
1121 	 * written by write(2) is visible in mmap.
1122 	 */
1123 	if (iomap->flags & IOMAP_F_NEW) {
1124 		invalidate_inode_pages2_range(inode->i_mapping,
1125 					      pos >> PAGE_SHIFT,
1126 					      (end - 1) >> PAGE_SHIFT);
1127 	}
1128 
1129 	id = dax_read_lock();
1130 	while (pos < end) {
1131 		unsigned offset = pos & (PAGE_SIZE - 1);
1132 		const size_t size = ALIGN(length + offset, PAGE_SIZE);
1133 		const sector_t sector = dax_iomap_sector(iomap, pos);
1134 		ssize_t map_len;
1135 		pgoff_t pgoff;
1136 		void *kaddr;
1137 
1138 		if (fatal_signal_pending(current)) {
1139 			ret = -EINTR;
1140 			break;
1141 		}
1142 
1143 		ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
1144 		if (ret)
1145 			break;
1146 
1147 		map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1148 				&kaddr, NULL);
1149 		if (map_len < 0) {
1150 			ret = map_len;
1151 			break;
1152 		}
1153 
1154 		map_len = PFN_PHYS(map_len);
1155 		kaddr += offset;
1156 		map_len -= offset;
1157 		if (map_len > end - pos)
1158 			map_len = end - pos;
1159 
1160 		/*
1161 		 * The userspace address for the memory copy has already been
1162 		 * validated via access_ok() in either vfs_read() or
1163 		 * vfs_write(), depending on which operation we are doing.
1164 		 */
1165 		if (iov_iter_rw(iter) == WRITE)
1166 			xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
1167 					map_len, iter);
1168 		else
1169 			xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
1170 					map_len, iter);
1171 
1172 		pos += xfer;
1173 		length -= xfer;
1174 		done += xfer;
1175 
1176 		if (xfer == 0)
1177 			ret = -EFAULT;
1178 		if (xfer < map_len)
1179 			break;
1180 	}
1181 	dax_read_unlock(id);
1182 
1183 	return done ? done : ret;
1184 }
1185 
1186 /**
1187  * dax_iomap_rw - Perform I/O to a DAX file
1188  * @iocb:	The control block for this I/O
1189  * @iter:	The addresses to do I/O from or to
1190  * @ops:	iomap ops passed from the file system
1191  *
1192  * This function performs read and write operations to directly mapped
1193  * persistent memory.  The callers needs to take care of read/write exclusion
1194  * and evicting any page cache pages in the region under I/O.
1195  */
1196 ssize_t
dax_iomap_rw(struct kiocb * iocb,struct iov_iter * iter,const struct iomap_ops * ops)1197 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1198 		const struct iomap_ops *ops)
1199 {
1200 	struct address_space *mapping = iocb->ki_filp->f_mapping;
1201 	struct inode *inode = mapping->host;
1202 	loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1203 	unsigned flags = 0;
1204 
1205 	if (iov_iter_rw(iter) == WRITE) {
1206 		lockdep_assert_held_write(&inode->i_rwsem);
1207 		flags |= IOMAP_WRITE;
1208 	} else {
1209 		lockdep_assert_held(&inode->i_rwsem);
1210 	}
1211 
1212 	if (iocb->ki_flags & IOCB_NOWAIT)
1213 		flags |= IOMAP_NOWAIT;
1214 
1215 	while (iov_iter_count(iter)) {
1216 		ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1217 				iter, dax_iomap_actor);
1218 		if (ret <= 0)
1219 			break;
1220 		pos += ret;
1221 		done += ret;
1222 	}
1223 
1224 	iocb->ki_pos += done;
1225 	return done ? done : ret;
1226 }
1227 EXPORT_SYMBOL_GPL(dax_iomap_rw);
1228 
dax_fault_return(int error)1229 static vm_fault_t dax_fault_return(int error)
1230 {
1231 	if (error == 0)
1232 		return VM_FAULT_NOPAGE;
1233 	return vmf_error(error);
1234 }
1235 
1236 /*
1237  * MAP_SYNC on a dax mapping guarantees dirty metadata is
1238  * flushed on write-faults (non-cow), but not read-faults.
1239  */
dax_fault_is_synchronous(unsigned long flags,struct vm_area_struct * vma,struct iomap * iomap)1240 static bool dax_fault_is_synchronous(unsigned long flags,
1241 		struct vm_area_struct *vma, struct iomap *iomap)
1242 {
1243 	return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
1244 		&& (iomap->flags & IOMAP_F_DIRTY);
1245 }
1246 
dax_iomap_pte_fault(struct vm_fault * vmf,pfn_t * pfnp,int * iomap_errp,const struct iomap_ops * ops)1247 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1248 			       int *iomap_errp, const struct iomap_ops *ops)
1249 {
1250 	struct vm_area_struct *vma = vmf->vma;
1251 	struct address_space *mapping = vma->vm_file->f_mapping;
1252 	XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
1253 	struct inode *inode = mapping->host;
1254 	unsigned long vaddr = vmf->address;
1255 	loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1256 	struct iomap iomap = { 0 };
1257 	unsigned flags = IOMAP_FAULT;
1258 	int error, major = 0;
1259 	bool write = vmf->flags & FAULT_FLAG_WRITE;
1260 	bool sync;
1261 	vm_fault_t ret = 0;
1262 	void *entry;
1263 	pfn_t pfn;
1264 
1265 	trace_dax_pte_fault(inode, vmf, ret);
1266 	/*
1267 	 * Check whether offset isn't beyond end of file now. Caller is supposed
1268 	 * to hold locks serializing us with truncate / punch hole so this is
1269 	 * a reliable test.
1270 	 */
1271 	if (pos >= i_size_read(inode)) {
1272 		ret = VM_FAULT_SIGBUS;
1273 		goto out;
1274 	}
1275 
1276 	if (write && !vmf->cow_page)
1277 		flags |= IOMAP_WRITE;
1278 
1279 	entry = grab_mapping_entry(&xas, mapping, 0);
1280 	if (xa_is_internal(entry)) {
1281 		ret = xa_to_internal(entry);
1282 		goto out;
1283 	}
1284 
1285 	/*
1286 	 * It is possible, particularly with mixed reads & writes to private
1287 	 * mappings, that we have raced with a PMD fault that overlaps with
1288 	 * the PTE we need to set up.  If so just return and the fault will be
1289 	 * retried.
1290 	 */
1291 	if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1292 		ret = VM_FAULT_NOPAGE;
1293 		goto unlock_entry;
1294 	}
1295 
1296 	/*
1297 	 * Note that we don't bother to use iomap_apply here: DAX required
1298 	 * the file system block size to be equal the page size, which means
1299 	 * that we never have to deal with more than a single extent here.
1300 	 */
1301 	error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1302 	if (iomap_errp)
1303 		*iomap_errp = error;
1304 	if (error) {
1305 		ret = dax_fault_return(error);
1306 		goto unlock_entry;
1307 	}
1308 	if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1309 		error = -EIO;	/* fs corruption? */
1310 		goto error_finish_iomap;
1311 	}
1312 
1313 	if (vmf->cow_page) {
1314 		sector_t sector = dax_iomap_sector(&iomap, pos);
1315 
1316 		switch (iomap.type) {
1317 		case IOMAP_HOLE:
1318 		case IOMAP_UNWRITTEN:
1319 			clear_user_highpage(vmf->cow_page, vaddr);
1320 			break;
1321 		case IOMAP_MAPPED:
1322 			error = copy_user_dax(iomap.bdev, iomap.dax_dev,
1323 					sector, PAGE_SIZE, vmf->cow_page, vaddr);
1324 			break;
1325 		default:
1326 			WARN_ON_ONCE(1);
1327 			error = -EIO;
1328 			break;
1329 		}
1330 
1331 		if (error)
1332 			goto error_finish_iomap;
1333 
1334 		__SetPageUptodate(vmf->cow_page);
1335 		ret = finish_fault(vmf);
1336 		if (!ret)
1337 			ret = VM_FAULT_DONE_COW;
1338 		goto finish_iomap;
1339 	}
1340 
1341 	sync = dax_fault_is_synchronous(flags, vma, &iomap);
1342 
1343 	switch (iomap.type) {
1344 	case IOMAP_MAPPED:
1345 		if (iomap.flags & IOMAP_F_NEW) {
1346 			count_vm_event(PGMAJFAULT);
1347 			count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
1348 			major = VM_FAULT_MAJOR;
1349 		}
1350 		error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
1351 		if (error < 0)
1352 			goto error_finish_iomap;
1353 
1354 		entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
1355 						 0, write && !sync);
1356 
1357 		/*
1358 		 * If we are doing synchronous page fault and inode needs fsync,
1359 		 * we can insert PTE into page tables only after that happens.
1360 		 * Skip insertion for now and return the pfn so that caller can
1361 		 * insert it after fsync is done.
1362 		 */
1363 		if (sync) {
1364 			if (WARN_ON_ONCE(!pfnp)) {
1365 				error = -EIO;
1366 				goto error_finish_iomap;
1367 			}
1368 			*pfnp = pfn;
1369 			ret = VM_FAULT_NEEDDSYNC | major;
1370 			goto finish_iomap;
1371 		}
1372 		trace_dax_insert_mapping(inode, vmf, entry);
1373 		if (write)
1374 			ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
1375 		else
1376 			ret = vmf_insert_mixed(vma, vaddr, pfn);
1377 
1378 		goto finish_iomap;
1379 	case IOMAP_UNWRITTEN:
1380 	case IOMAP_HOLE:
1381 		if (!write) {
1382 			ret = dax_load_hole(&xas, mapping, &entry, vmf);
1383 			goto finish_iomap;
1384 		}
1385 		/*FALLTHRU*/
1386 	default:
1387 		WARN_ON_ONCE(1);
1388 		error = -EIO;
1389 		break;
1390 	}
1391 
1392  error_finish_iomap:
1393 	ret = dax_fault_return(error);
1394  finish_iomap:
1395 	if (ops->iomap_end) {
1396 		int copied = PAGE_SIZE;
1397 
1398 		if (ret & VM_FAULT_ERROR)
1399 			copied = 0;
1400 		/*
1401 		 * The fault is done by now and there's no way back (other
1402 		 * thread may be already happily using PTE we have installed).
1403 		 * Just ignore error from ->iomap_end since we cannot do much
1404 		 * with it.
1405 		 */
1406 		ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1407 	}
1408  unlock_entry:
1409 	dax_unlock_entry(&xas, entry);
1410  out:
1411 	trace_dax_pte_fault_done(inode, vmf, ret);
1412 	return ret | major;
1413 }
1414 
1415 #ifdef CONFIG_FS_DAX_PMD
dax_pmd_load_hole(struct xa_state * xas,struct vm_fault * vmf,struct iomap * iomap,void ** entry)1416 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1417 		struct iomap *iomap, void **entry)
1418 {
1419 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1420 	unsigned long pmd_addr = vmf->address & PMD_MASK;
1421 	struct vm_area_struct *vma = vmf->vma;
1422 	struct inode *inode = mapping->host;
1423 	pgtable_t pgtable = NULL;
1424 	struct page *zero_page;
1425 	spinlock_t *ptl;
1426 	pmd_t pmd_entry;
1427 	pfn_t pfn;
1428 
1429 	zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1430 
1431 	if (unlikely(!zero_page))
1432 		goto fallback;
1433 
1434 	pfn = page_to_pfn_t(zero_page);
1435 	*entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
1436 			DAX_PMD | DAX_ZERO_PAGE, false);
1437 
1438 	if (arch_needs_pgtable_deposit()) {
1439 		pgtable = pte_alloc_one(vma->vm_mm);
1440 		if (!pgtable)
1441 			return VM_FAULT_OOM;
1442 	}
1443 
1444 	ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1445 	if (!pmd_none(*(vmf->pmd))) {
1446 		spin_unlock(ptl);
1447 		goto fallback;
1448 	}
1449 
1450 	if (pgtable) {
1451 		pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
1452 		mm_inc_nr_ptes(vma->vm_mm);
1453 	}
1454 	pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1455 	pmd_entry = pmd_mkhuge(pmd_entry);
1456 	set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1457 	spin_unlock(ptl);
1458 	trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
1459 	return VM_FAULT_NOPAGE;
1460 
1461 fallback:
1462 	if (pgtable)
1463 		pte_free(vma->vm_mm, pgtable);
1464 	trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
1465 	return VM_FAULT_FALLBACK;
1466 }
1467 
dax_iomap_pmd_fault(struct vm_fault * vmf,pfn_t * pfnp,const struct iomap_ops * ops)1468 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1469 			       const struct iomap_ops *ops)
1470 {
1471 	struct vm_area_struct *vma = vmf->vma;
1472 	struct address_space *mapping = vma->vm_file->f_mapping;
1473 	XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
1474 	unsigned long pmd_addr = vmf->address & PMD_MASK;
1475 	bool write = vmf->flags & FAULT_FLAG_WRITE;
1476 	bool sync;
1477 	unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1478 	struct inode *inode = mapping->host;
1479 	vm_fault_t result = VM_FAULT_FALLBACK;
1480 	struct iomap iomap = { 0 };
1481 	pgoff_t max_pgoff;
1482 	void *entry;
1483 	loff_t pos;
1484 	int error;
1485 	pfn_t pfn;
1486 
1487 	/*
1488 	 * Check whether offset isn't beyond end of file now. Caller is
1489 	 * supposed to hold locks serializing us with truncate / punch hole so
1490 	 * this is a reliable test.
1491 	 */
1492 	max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1493 
1494 	trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1495 
1496 	/*
1497 	 * Make sure that the faulting address's PMD offset (color) matches
1498 	 * the PMD offset from the start of the file.  This is necessary so
1499 	 * that a PMD range in the page table overlaps exactly with a PMD
1500 	 * range in the page cache.
1501 	 */
1502 	if ((vmf->pgoff & PG_PMD_COLOUR) !=
1503 	    ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1504 		goto fallback;
1505 
1506 	/* Fall back to PTEs if we're going to COW */
1507 	if (write && !(vma->vm_flags & VM_SHARED))
1508 		goto fallback;
1509 
1510 	/* If the PMD would extend outside the VMA */
1511 	if (pmd_addr < vma->vm_start)
1512 		goto fallback;
1513 	if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1514 		goto fallback;
1515 
1516 	if (xas.xa_index >= max_pgoff) {
1517 		result = VM_FAULT_SIGBUS;
1518 		goto out;
1519 	}
1520 
1521 	/* If the PMD would extend beyond the file size */
1522 	if ((xas.xa_index | PG_PMD_COLOUR) >= max_pgoff)
1523 		goto fallback;
1524 
1525 	/*
1526 	 * grab_mapping_entry() will make sure we get an empty PMD entry,
1527 	 * a zero PMD entry or a DAX PMD.  If it can't (because a PTE
1528 	 * entry is already in the array, for instance), it will return
1529 	 * VM_FAULT_FALLBACK.
1530 	 */
1531 	entry = grab_mapping_entry(&xas, mapping, PMD_ORDER);
1532 	if (xa_is_internal(entry)) {
1533 		result = xa_to_internal(entry);
1534 		goto fallback;
1535 	}
1536 
1537 	/*
1538 	 * It is possible, particularly with mixed reads & writes to private
1539 	 * mappings, that we have raced with a PTE fault that overlaps with
1540 	 * the PMD we need to set up.  If so just return and the fault will be
1541 	 * retried.
1542 	 */
1543 	if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1544 			!pmd_devmap(*vmf->pmd)) {
1545 		result = 0;
1546 		goto unlock_entry;
1547 	}
1548 
1549 	/*
1550 	 * Note that we don't use iomap_apply here.  We aren't doing I/O, only
1551 	 * setting up a mapping, so really we're using iomap_begin() as a way
1552 	 * to look up our filesystem block.
1553 	 */
1554 	pos = (loff_t)xas.xa_index << PAGE_SHIFT;
1555 	error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1556 	if (error)
1557 		goto unlock_entry;
1558 
1559 	if (iomap.offset + iomap.length < pos + PMD_SIZE)
1560 		goto finish_iomap;
1561 
1562 	sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap);
1563 
1564 	switch (iomap.type) {
1565 	case IOMAP_MAPPED:
1566 		error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn);
1567 		if (error < 0)
1568 			goto finish_iomap;
1569 
1570 		entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
1571 						DAX_PMD, write && !sync);
1572 
1573 		/*
1574 		 * If we are doing synchronous page fault and inode needs fsync,
1575 		 * we can insert PMD into page tables only after that happens.
1576 		 * Skip insertion for now and return the pfn so that caller can
1577 		 * insert it after fsync is done.
1578 		 */
1579 		if (sync) {
1580 			if (WARN_ON_ONCE(!pfnp))
1581 				goto finish_iomap;
1582 			*pfnp = pfn;
1583 			result = VM_FAULT_NEEDDSYNC;
1584 			goto finish_iomap;
1585 		}
1586 
1587 		trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
1588 		result = vmf_insert_pfn_pmd(vmf, pfn, write);
1589 		break;
1590 	case IOMAP_UNWRITTEN:
1591 	case IOMAP_HOLE:
1592 		if (WARN_ON_ONCE(write))
1593 			break;
1594 		result = dax_pmd_load_hole(&xas, vmf, &iomap, &entry);
1595 		break;
1596 	default:
1597 		WARN_ON_ONCE(1);
1598 		break;
1599 	}
1600 
1601  finish_iomap:
1602 	if (ops->iomap_end) {
1603 		int copied = PMD_SIZE;
1604 
1605 		if (result == VM_FAULT_FALLBACK)
1606 			copied = 0;
1607 		/*
1608 		 * The fault is done by now and there's no way back (other
1609 		 * thread may be already happily using PMD we have installed).
1610 		 * Just ignore error from ->iomap_end since we cannot do much
1611 		 * with it.
1612 		 */
1613 		ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1614 				&iomap);
1615 	}
1616  unlock_entry:
1617 	dax_unlock_entry(&xas, entry);
1618  fallback:
1619 	if (result == VM_FAULT_FALLBACK) {
1620 		split_huge_pmd(vma, vmf->pmd, vmf->address);
1621 		count_vm_event(THP_FAULT_FALLBACK);
1622 	}
1623 out:
1624 	trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1625 	return result;
1626 }
1627 #else
dax_iomap_pmd_fault(struct vm_fault * vmf,pfn_t * pfnp,const struct iomap_ops * ops)1628 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1629 			       const struct iomap_ops *ops)
1630 {
1631 	return VM_FAULT_FALLBACK;
1632 }
1633 #endif /* CONFIG_FS_DAX_PMD */
1634 
1635 /**
1636  * dax_iomap_fault - handle a page fault on a DAX file
1637  * @vmf: The description of the fault
1638  * @pe_size: Size of the page to fault in
1639  * @pfnp: PFN to insert for synchronous faults if fsync is required
1640  * @iomap_errp: Storage for detailed error code in case of error
1641  * @ops: Iomap ops passed from the file system
1642  *
1643  * When a page fault occurs, filesystems may call this helper in
1644  * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1645  * has done all the necessary locking for page fault to proceed
1646  * successfully.
1647  */
dax_iomap_fault(struct vm_fault * vmf,enum page_entry_size pe_size,pfn_t * pfnp,int * iomap_errp,const struct iomap_ops * ops)1648 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1649 		    pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
1650 {
1651 	switch (pe_size) {
1652 	case PE_SIZE_PTE:
1653 		return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
1654 	case PE_SIZE_PMD:
1655 		return dax_iomap_pmd_fault(vmf, pfnp, ops);
1656 	default:
1657 		return VM_FAULT_FALLBACK;
1658 	}
1659 }
1660 EXPORT_SYMBOL_GPL(dax_iomap_fault);
1661 
1662 /*
1663  * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1664  * @vmf: The description of the fault
1665  * @pfn: PFN to insert
1666  * @order: Order of entry to insert.
1667  *
1668  * This function inserts a writeable PTE or PMD entry into the page tables
1669  * for an mmaped DAX file.  It also marks the page cache entry as dirty.
1670  */
1671 static vm_fault_t
dax_insert_pfn_mkwrite(struct vm_fault * vmf,pfn_t pfn,unsigned int order)1672 dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
1673 {
1674 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1675 	XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
1676 	void *entry;
1677 	vm_fault_t ret;
1678 
1679 	xas_lock_irq(&xas);
1680 	entry = get_unlocked_entry(&xas, order);
1681 	/* Did we race with someone splitting entry or so? */
1682 	if (!entry || dax_is_conflict(entry) ||
1683 	    (order == 0 && !dax_is_pte_entry(entry))) {
1684 		put_unlocked_entry(&xas, entry);
1685 		xas_unlock_irq(&xas);
1686 		trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1687 						      VM_FAULT_NOPAGE);
1688 		return VM_FAULT_NOPAGE;
1689 	}
1690 	xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
1691 	dax_lock_entry(&xas, entry);
1692 	xas_unlock_irq(&xas);
1693 	if (order == 0)
1694 		ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1695 #ifdef CONFIG_FS_DAX_PMD
1696 	else if (order == PMD_ORDER)
1697 		ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
1698 #endif
1699 	else
1700 		ret = VM_FAULT_FALLBACK;
1701 	dax_unlock_entry(&xas, entry);
1702 	trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
1703 	return ret;
1704 }
1705 
1706 /**
1707  * dax_finish_sync_fault - finish synchronous page fault
1708  * @vmf: The description of the fault
1709  * @pe_size: Size of entry to be inserted
1710  * @pfn: PFN to insert
1711  *
1712  * This function ensures that the file range touched by the page fault is
1713  * stored persistently on the media and handles inserting of appropriate page
1714  * table entry.
1715  */
dax_finish_sync_fault(struct vm_fault * vmf,enum page_entry_size pe_size,pfn_t pfn)1716 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
1717 		enum page_entry_size pe_size, pfn_t pfn)
1718 {
1719 	int err;
1720 	loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
1721 	unsigned int order = pe_order(pe_size);
1722 	size_t len = PAGE_SIZE << order;
1723 
1724 	err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1725 	if (err)
1726 		return VM_FAULT_SIGBUS;
1727 	return dax_insert_pfn_mkwrite(vmf, pfn, order);
1728 }
1729 EXPORT_SYMBOL_GPL(dax_finish_sync_fault);
1730