• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mm/truncate.c - code for taking down pages from address_spaces
4  *
5  * Copyright (C) 2002, Linus Torvalds
6  *
7  * 10Sep2002	Andrew Morton
8  *		Initial version.
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/backing-dev.h>
13 #include <linux/dax.h>
14 #include <linux/gfp.h>
15 #include <linux/mm.h>
16 #include <linux/swap.h>
17 #include <linux/export.h>
18 #include <linux/pagemap.h>
19 #include <linux/highmem.h>
20 #include <linux/pagevec.h>
21 #include <linux/task_io_accounting_ops.h>
22 #include <linux/shmem_fs.h>
23 #include <linux/cleancache.h>
24 #include <linux/rmap.h>
25 #include "internal.h"
26 
27 #undef CREATE_TRACE_POINTS
28 #include <trace/hooks/vmscan.h>
29 
30 /*
31  * Regular page slots are stabilized by the page lock even without the tree
32  * itself locked.  These unlocked entries need verification under the tree
33  * lock.
34  */
__clear_shadow_entry(struct address_space * mapping,pgoff_t index,void * entry)35 static inline void __clear_shadow_entry(struct address_space *mapping,
36 				pgoff_t index, void *entry)
37 {
38 	XA_STATE(xas, &mapping->i_pages, index);
39 
40 	xas_set_update(&xas, workingset_update_node);
41 	if (xas_load(&xas) != entry)
42 		return;
43 	xas_store(&xas, NULL);
44 }
45 
clear_shadow_entry(struct address_space * mapping,pgoff_t index,void * entry)46 static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
47 			       void *entry)
48 {
49 	spin_lock(&mapping->host->i_lock);
50 	xa_lock_irq(&mapping->i_pages);
51 	__clear_shadow_entry(mapping, index, entry);
52 	xa_unlock_irq(&mapping->i_pages);
53 	if (mapping_shrinkable(mapping))
54 		inode_add_lru(mapping->host);
55 	spin_unlock(&mapping->host->i_lock);
56 }
57 
58 /*
59  * Unconditionally remove exceptional entries. Usually called from truncate
60  * path. Note that the folio_batch may be altered by this function by removing
61  * exceptional entries similar to what folio_batch_remove_exceptionals() does.
62  */
truncate_folio_batch_exceptionals(struct address_space * mapping,struct folio_batch * fbatch,pgoff_t * indices)63 static void truncate_folio_batch_exceptionals(struct address_space *mapping,
64 				struct folio_batch *fbatch, pgoff_t *indices)
65 {
66 	int i, j;
67 	bool dax;
68 
69 	/* Handled by shmem itself */
70 	if (shmem_mapping(mapping))
71 		return;
72 
73 	for (j = 0; j < folio_batch_count(fbatch); j++)
74 		if (xa_is_value(fbatch->folios[j]))
75 			break;
76 
77 	if (j == folio_batch_count(fbatch))
78 		return;
79 
80 	dax = dax_mapping(mapping);
81 	if (!dax) {
82 		spin_lock(&mapping->host->i_lock);
83 		xa_lock_irq(&mapping->i_pages);
84 	}
85 
86 	for (i = j; i < folio_batch_count(fbatch); i++) {
87 		struct folio *folio = fbatch->folios[i];
88 		pgoff_t index = indices[i];
89 
90 		if (!xa_is_value(folio)) {
91 			fbatch->folios[j++] = folio;
92 			continue;
93 		}
94 
95 		if (unlikely(dax)) {
96 			dax_delete_mapping_entry(mapping, index);
97 			continue;
98 		}
99 
100 		__clear_shadow_entry(mapping, index, folio);
101 	}
102 
103 	if (!dax) {
104 		xa_unlock_irq(&mapping->i_pages);
105 		if (mapping_shrinkable(mapping))
106 			inode_add_lru(mapping->host);
107 		spin_unlock(&mapping->host->i_lock);
108 	}
109 	fbatch->nr = j;
110 }
111 
112 /*
113  * Invalidate exceptional entry if easily possible. This handles exceptional
114  * entries for invalidate_inode_pages().
115  */
invalidate_exceptional_entry(struct address_space * mapping,pgoff_t index,void * entry)116 static int invalidate_exceptional_entry(struct address_space *mapping,
117 					pgoff_t index, void *entry)
118 {
119 	/* Handled by shmem itself, or for DAX we do nothing. */
120 	if (shmem_mapping(mapping) || dax_mapping(mapping))
121 		return 1;
122 	clear_shadow_entry(mapping, index, entry);
123 	return 1;
124 }
125 
126 /*
127  * Invalidate exceptional entry if clean. This handles exceptional entries for
128  * invalidate_inode_pages2() so for DAX it evicts only clean entries.
129  */
invalidate_exceptional_entry2(struct address_space * mapping,pgoff_t index,void * entry)130 static int invalidate_exceptional_entry2(struct address_space *mapping,
131 					 pgoff_t index, void *entry)
132 {
133 	/* Handled by shmem itself */
134 	if (shmem_mapping(mapping))
135 		return 1;
136 	if (dax_mapping(mapping))
137 		return dax_invalidate_mapping_entry_sync(mapping, index);
138 	clear_shadow_entry(mapping, index, entry);
139 	return 1;
140 }
141 
142 /**
143  * folio_invalidate - Invalidate part or all of a folio.
144  * @folio: The folio which is affected.
145  * @offset: start of the range to invalidate
146  * @length: length of the range to invalidate
147  *
148  * folio_invalidate() is called when all or part of the folio has become
149  * invalidated by a truncate operation.
150  *
151  * folio_invalidate() does not have to release all buffers, but it must
152  * ensure that no dirty buffer is left outside @offset and that no I/O
153  * is underway against any of the blocks which are outside the truncation
154  * point.  Because the caller is about to free (and possibly reuse) those
155  * blocks on-disk.
156  */
folio_invalidate(struct folio * folio,size_t offset,size_t length)157 void folio_invalidate(struct folio *folio, size_t offset, size_t length)
158 {
159 	const struct address_space_operations *aops = folio->mapping->a_ops;
160 
161 	if (aops->invalidate_folio)
162 		aops->invalidate_folio(folio, offset, length);
163 }
164 EXPORT_SYMBOL_GPL(folio_invalidate);
165 
166 /*
167  * If truncate cannot remove the fs-private metadata from the page, the page
168  * becomes orphaned.  It will be left on the LRU and may even be mapped into
169  * user pagetables if we're racing with filemap_fault().
170  *
171  * We need to bail out if page->mapping is no longer equal to the original
172  * mapping.  This happens a) when the VM reclaimed the page while we waited on
173  * its lock, b) when a concurrent invalidate_mapping_pages got there first and
174  * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
175  */
truncate_cleanup_folio(struct folio * folio)176 static void truncate_cleanup_folio(struct folio *folio)
177 {
178 	if (folio_mapped(folio))
179 		unmap_mapping_folio(folio);
180 
181 	if (folio_has_private(folio))
182 		folio_invalidate(folio, 0, folio_size(folio));
183 
184 	/*
185 	 * Some filesystems seem to re-dirty the page even after
186 	 * the VM has canceled the dirty bit (eg ext3 journaling).
187 	 * Hence dirty accounting check is placed after invalidation.
188 	 */
189 	folio_cancel_dirty(folio);
190 	folio_clear_mappedtodisk(folio);
191 }
192 
truncate_inode_folio(struct address_space * mapping,struct folio * folio)193 int truncate_inode_folio(struct address_space *mapping, struct folio *folio)
194 {
195 	if (folio->mapping != mapping)
196 		return -EIO;
197 
198 	truncate_cleanup_folio(folio);
199 	filemap_remove_folio(folio);
200 	return 0;
201 }
202 
203 /*
204  * Handle partial folios.  The folio may be entirely within the
205  * range if a split has raced with us.  If not, we zero the part of the
206  * folio that's within the [start, end] range, and then split the folio if
207  * it's large.  split_page_range() will discard pages which now lie beyond
208  * i_size, and we rely on the caller to discard pages which lie within a
209  * newly created hole.
210  *
211  * Returns false if splitting failed so the caller can avoid
212  * discarding the entire folio which is stubbornly unsplit.
213  */
truncate_inode_partial_folio(struct folio * folio,loff_t start,loff_t end)214 bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
215 {
216 	int err;
217 	loff_t pos = folio_pos(folio);
218 	unsigned int offset, length;
219 
220 	if (pos < start)
221 		offset = start - pos;
222 	else
223 		offset = 0;
224 	length = folio_size(folio);
225 	if (pos + length <= (u64)end)
226 		length = length - offset;
227 	else
228 		length = end + 1 - pos - offset;
229 
230 	folio_wait_writeback(folio);
231 	if (length == folio_size(folio)) {
232 		truncate_inode_folio(folio->mapping, folio);
233 		return true;
234 	}
235 
236 	/*
237 	 * We may be zeroing pages we're about to discard, but it avoids
238 	 * doing a complex calculation here, and then doing the zeroing
239 	 * anyway if the page split fails.
240 	 */
241 	folio_zero_range(folio, offset, length);
242 
243 	cleancache_invalidate_page(folio->mapping, &folio->page);
244 	if (folio_has_private(folio))
245 		folio_invalidate(folio, offset, length);
246 	if (!folio_test_large(folio))
247 		return true;
248 	err = split_folio(folio);
249 	if (!err)
250 		return true;
251 	if (err > 0)
252 		return false;
253 	if (folio_test_dirty(folio))
254 		return false;
255 	truncate_inode_folio(folio->mapping, folio);
256 	return true;
257 }
258 
259 /*
260  * Used to get rid of pages on hardware memory corruption.
261  */
generic_error_remove_page(struct address_space * mapping,struct page * page)262 int generic_error_remove_page(struct address_space *mapping, struct page *page)
263 {
264 	VM_BUG_ON_PAGE(PageTail(page), page);
265 
266 	if (!mapping)
267 		return -EINVAL;
268 	/*
269 	 * Only punch for normal data pages for now.
270 	 * Handling other types like directories would need more auditing.
271 	 */
272 	if (!S_ISREG(mapping->host->i_mode))
273 		return -EIO;
274 	return truncate_inode_folio(mapping, page_folio(page));
275 }
276 EXPORT_SYMBOL(generic_error_remove_page);
277 
mapping_evict_folio(struct address_space * mapping,struct folio * folio)278 static long mapping_evict_folio(struct address_space *mapping,
279 		struct folio *folio)
280 {
281 	if (folio_test_dirty(folio) || folio_test_writeback(folio))
282 		return 0;
283 	/* The refcount will be elevated if any page in the folio is mapped */
284 	if (folio_ref_count(folio) >
285 			folio_nr_pages(folio) + folio_has_private(folio) + 1)
286 		return 0;
287 	if (!filemap_release_folio(folio, 0))
288 		return 0;
289 
290 	return remove_mapping(mapping, folio);
291 }
292 
293 /**
294  * invalidate_inode_page() - Remove an unused page from the pagecache.
295  * @page: The page to remove.
296  *
297  * Safely invalidate one page from its pagecache mapping.
298  * It only drops clean, unused pages.
299  *
300  * Context: Page must be locked.
301  * Return: The number of pages successfully removed.
302  */
invalidate_inode_page(struct page * page)303 long invalidate_inode_page(struct page *page)
304 {
305 	struct folio *folio = page_folio(page);
306 	struct address_space *mapping = folio_mapping(folio);
307 
308 	/* The page may have been truncated before it was locked */
309 	if (!mapping)
310 		return 0;
311 	return mapping_evict_folio(mapping, folio);
312 }
313 
314 /**
315  * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
316  * @mapping: mapping to truncate
317  * @lstart: offset from which to truncate
318  * @lend: offset to which to truncate (inclusive)
319  *
320  * Truncate the page cache, removing the pages that are between
321  * specified offsets (and zeroing out partial pages
322  * if lstart or lend + 1 is not page aligned).
323  *
324  * Truncate takes two passes - the first pass is nonblocking.  It will not
325  * block on page locks and it will not block on writeback.  The second pass
326  * will wait.  This is to prevent as much IO as possible in the affected region.
327  * The first pass will remove most pages, so the search cost of the second pass
328  * is low.
329  *
330  * We pass down the cache-hot hint to the page freeing code.  Even if the
331  * mapping is large, it is probably the case that the final pages are the most
332  * recently touched, and freeing happens in ascending file offset order.
333  *
334  * Note that since ->invalidate_folio() accepts range to invalidate
335  * truncate_inode_pages_range is able to handle cases where lend + 1 is not
336  * page aligned properly.
337  */
truncate_inode_pages_range(struct address_space * mapping,loff_t lstart,loff_t lend)338 void truncate_inode_pages_range(struct address_space *mapping,
339 				loff_t lstart, loff_t lend)
340 {
341 	pgoff_t		start;		/* inclusive */
342 	pgoff_t		end;		/* exclusive */
343 	struct folio_batch fbatch;
344 	pgoff_t		indices[PAGEVEC_SIZE];
345 	pgoff_t		index;
346 	int		i;
347 	struct folio	*folio;
348 	bool		same_folio;
349 
350 	if (mapping_empty(mapping))
351 		goto out;
352 
353 	/*
354 	 * 'start' and 'end' always covers the range of pages to be fully
355 	 * truncated. Partial pages are covered with 'partial_start' at the
356 	 * start of the range and 'partial_end' at the end of the range.
357 	 * Note that 'end' is exclusive while 'lend' is inclusive.
358 	 */
359 	start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
360 	if (lend == -1)
361 		/*
362 		 * lend == -1 indicates end-of-file so we have to set 'end'
363 		 * to the highest possible pgoff_t and since the type is
364 		 * unsigned we're using -1.
365 		 */
366 		end = -1;
367 	else
368 		end = (lend + 1) >> PAGE_SHIFT;
369 
370 	folio_batch_init(&fbatch);
371 	index = start;
372 	while (index < end && find_lock_entries(mapping, &index, end - 1,
373 			&fbatch, indices)) {
374 		truncate_folio_batch_exceptionals(mapping, &fbatch, indices);
375 		for (i = 0; i < folio_batch_count(&fbatch); i++)
376 			truncate_cleanup_folio(fbatch.folios[i]);
377 		delete_from_page_cache_batch(mapping, &fbatch);
378 		for (i = 0; i < folio_batch_count(&fbatch); i++)
379 			folio_unlock(fbatch.folios[i]);
380 		folio_batch_release(&fbatch);
381 		cond_resched();
382 	}
383 
384 	same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
385 	folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0);
386 	if (!IS_ERR(folio)) {
387 		same_folio = lend < folio_pos(folio) + folio_size(folio);
388 		if (!truncate_inode_partial_folio(folio, lstart, lend)) {
389 			start = folio_next_index(folio);
390 			if (same_folio)
391 				end = folio->index;
392 		}
393 		folio_unlock(folio);
394 		folio_put(folio);
395 		folio = NULL;
396 	}
397 
398 	if (!same_folio) {
399 		folio = __filemap_get_folio(mapping, lend >> PAGE_SHIFT,
400 						FGP_LOCK, 0);
401 		if (!IS_ERR(folio)) {
402 			if (!truncate_inode_partial_folio(folio, lstart, lend))
403 				end = folio->index;
404 			folio_unlock(folio);
405 			folio_put(folio);
406 		}
407 	}
408 
409 	index = start;
410 	while (index < end) {
411 		cond_resched();
412 		if (!find_get_entries(mapping, &index, end - 1, &fbatch,
413 				indices)) {
414 			/* If all gone from start onwards, we're done */
415 			if (index == start)
416 				break;
417 			/* Otherwise restart to make sure all gone */
418 			index = start;
419 			continue;
420 		}
421 
422 		for (i = 0; i < folio_batch_count(&fbatch); i++) {
423 			struct folio *folio = fbatch.folios[i];
424 
425 			/* We rely upon deletion not changing page->index */
426 
427 			if (xa_is_value(folio))
428 				continue;
429 
430 			folio_lock(folio);
431 			VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
432 			folio_wait_writeback(folio);
433 			truncate_inode_folio(mapping, folio);
434 			folio_unlock(folio);
435 		}
436 		truncate_folio_batch_exceptionals(mapping, &fbatch, indices);
437 		folio_batch_release(&fbatch);
438 	}
439 
440 out:
441 	cleancache_invalidate_inode(mapping);
442 }
443 EXPORT_SYMBOL(truncate_inode_pages_range);
444 
445 /**
446  * truncate_inode_pages - truncate *all* the pages from an offset
447  * @mapping: mapping to truncate
448  * @lstart: offset from which to truncate
449  *
450  * Called under (and serialised by) inode->i_rwsem and
451  * mapping->invalidate_lock.
452  *
453  * Note: When this function returns, there can be a page in the process of
454  * deletion (inside __filemap_remove_folio()) in the specified range.  Thus
455  * mapping->nrpages can be non-zero when this function returns even after
456  * truncation of the whole mapping.
457  */
truncate_inode_pages(struct address_space * mapping,loff_t lstart)458 void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
459 {
460 	truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
461 }
462 EXPORT_SYMBOL(truncate_inode_pages);
463 
464 /**
465  * truncate_inode_pages_final - truncate *all* pages before inode dies
466  * @mapping: mapping to truncate
467  *
468  * Called under (and serialized by) inode->i_rwsem.
469  *
470  * Filesystems have to use this in the .evict_inode path to inform the
471  * VM that this is the final truncate and the inode is going away.
472  */
truncate_inode_pages_final(struct address_space * mapping)473 void truncate_inode_pages_final(struct address_space *mapping)
474 {
475 	/*
476 	 * Page reclaim can not participate in regular inode lifetime
477 	 * management (can't call iput()) and thus can race with the
478 	 * inode teardown.  Tell it when the address space is exiting,
479 	 * so that it does not install eviction information after the
480 	 * final truncate has begun.
481 	 */
482 	mapping_set_exiting(mapping);
483 
484 	if (!mapping_empty(mapping)) {
485 		/*
486 		 * As truncation uses a lockless tree lookup, cycle
487 		 * the tree lock to make sure any ongoing tree
488 		 * modification that does not see AS_EXITING is
489 		 * completed before starting the final truncate.
490 		 */
491 		xa_lock_irq(&mapping->i_pages);
492 		xa_unlock_irq(&mapping->i_pages);
493 	}
494 
495 	/*
496 	 * Cleancache needs notification even if there are no pages or shadow
497 	 * entries.
498 	 */
499 	truncate_inode_pages(mapping, 0);
500 }
501 EXPORT_SYMBOL(truncate_inode_pages_final);
502 
503 /**
504  * mapping_try_invalidate - Invalidate all the evictable folios of one inode
505  * @mapping: the address_space which holds the folios to invalidate
506  * @start: the offset 'from' which to invalidate
507  * @end: the offset 'to' which to invalidate (inclusive)
508  * @nr_failed: How many folio invalidations failed
509  *
510  * This function is similar to invalidate_mapping_pages(), except that it
511  * returns the number of folios which could not be evicted in @nr_failed.
512  */
mapping_try_invalidate(struct address_space * mapping,pgoff_t start,pgoff_t end,unsigned long * nr_failed)513 unsigned long mapping_try_invalidate(struct address_space *mapping,
514 		pgoff_t start, pgoff_t end, unsigned long *nr_failed)
515 {
516 	pgoff_t indices[PAGEVEC_SIZE];
517 	struct folio_batch fbatch;
518 	pgoff_t index = start;
519 	unsigned long ret;
520 	unsigned long count = 0;
521 	int i;
522 	bool skip = false;
523 
524 	trace_android_vh_invalidate_mapping_pagevec(mapping, &skip);
525 	if (skip)
526 		return count;
527 
528 	folio_batch_init(&fbatch);
529 	while (find_lock_entries(mapping, &index, end, &fbatch, indices)) {
530 		for (i = 0; i < folio_batch_count(&fbatch); i++) {
531 			struct folio *folio = fbatch.folios[i];
532 
533 			/* We rely upon deletion not changing folio->index */
534 
535 			if (xa_is_value(folio)) {
536 				count += invalidate_exceptional_entry(mapping,
537 							     indices[i], folio);
538 				continue;
539 			}
540 
541 			ret = mapping_evict_folio(mapping, folio);
542 			folio_unlock(folio);
543 			/*
544 			 * Invalidation is a hint that the folio is no longer
545 			 * of interest and try to speed up its reclaim.
546 			 */
547 			if (!ret) {
548 				deactivate_file_folio(folio);
549 				/* Likely in the lru cache of a remote CPU */
550 				if (nr_failed)
551 					(*nr_failed)++;
552 			}
553 			count += ret;
554 		}
555 		folio_batch_remove_exceptionals(&fbatch);
556 		folio_batch_release(&fbatch);
557 		cond_resched();
558 	}
559 	return count;
560 }
561 
562 /**
563  * invalidate_mapping_pages - Invalidate all clean, unlocked cache of one inode
564  * @mapping: the address_space which holds the cache to invalidate
565  * @start: the offset 'from' which to invalidate
566  * @end: the offset 'to' which to invalidate (inclusive)
567  *
568  * This function removes pages that are clean, unmapped and unlocked,
569  * as well as shadow entries. It will not block on IO activity.
570  *
571  * If you want to remove all the pages of one inode, regardless of
572  * their use and writeback state, use truncate_inode_pages().
573  *
574  * Return: The number of indices that had their contents invalidated
575  */
invalidate_mapping_pages(struct address_space * mapping,pgoff_t start,pgoff_t end)576 unsigned long invalidate_mapping_pages(struct address_space *mapping,
577 		pgoff_t start, pgoff_t end)
578 {
579 	return mapping_try_invalidate(mapping, start, end, NULL);
580 }
581 EXPORT_SYMBOL(invalidate_mapping_pages);
582 
583 /*
584  * This is like invalidate_inode_page(), except it ignores the page's
585  * refcount.  We do this because invalidate_inode_pages2() needs stronger
586  * invalidation guarantees, and cannot afford to leave pages behind because
587  * shrink_page_list() has a temp ref on them, or because they're transiently
588  * sitting in the folio_add_lru() caches.
589  */
invalidate_complete_folio2(struct address_space * mapping,struct folio * folio)590 static int invalidate_complete_folio2(struct address_space *mapping,
591 					struct folio *folio)
592 {
593 	if (folio->mapping != mapping)
594 		return 0;
595 
596 	if (!filemap_release_folio(folio, GFP_KERNEL))
597 		return 0;
598 
599 	spin_lock(&mapping->host->i_lock);
600 	xa_lock_irq(&mapping->i_pages);
601 	if (folio_test_dirty(folio))
602 		goto failed;
603 
604 	BUG_ON(folio_has_private(folio));
605 	__filemap_remove_folio(folio, NULL);
606 	xa_unlock_irq(&mapping->i_pages);
607 	if (mapping_shrinkable(mapping))
608 		inode_add_lru(mapping->host);
609 	spin_unlock(&mapping->host->i_lock);
610 
611 	filemap_free_folio(mapping, folio);
612 	return 1;
613 failed:
614 	xa_unlock_irq(&mapping->i_pages);
615 	spin_unlock(&mapping->host->i_lock);
616 	return 0;
617 }
618 
folio_launder(struct address_space * mapping,struct folio * folio)619 static int folio_launder(struct address_space *mapping, struct folio *folio)
620 {
621 	if (!folio_test_dirty(folio))
622 		return 0;
623 	if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL)
624 		return 0;
625 	return mapping->a_ops->launder_folio(folio);
626 }
627 
628 /**
629  * invalidate_inode_pages2_range - remove range of pages from an address_space
630  * @mapping: the address_space
631  * @start: the page offset 'from' which to invalidate
632  * @end: the page offset 'to' which to invalidate (inclusive)
633  *
634  * Any pages which are found to be mapped into pagetables are unmapped prior to
635  * invalidation.
636  *
637  * Return: -EBUSY if any pages could not be invalidated.
638  */
invalidate_inode_pages2_range(struct address_space * mapping,pgoff_t start,pgoff_t end)639 int invalidate_inode_pages2_range(struct address_space *mapping,
640 				  pgoff_t start, pgoff_t end)
641 {
642 	pgoff_t indices[PAGEVEC_SIZE];
643 	struct folio_batch fbatch;
644 	pgoff_t index;
645 	int i;
646 	int ret = 0;
647 	int ret2 = 0;
648 	int did_range_unmap = 0;
649 
650 	if (mapping_empty(mapping))
651 		goto out;
652 
653 	folio_batch_init(&fbatch);
654 	index = start;
655 	while (find_get_entries(mapping, &index, end, &fbatch, indices)) {
656 		for (i = 0; i < folio_batch_count(&fbatch); i++) {
657 			struct folio *folio = fbatch.folios[i];
658 
659 			/* We rely upon deletion not changing folio->index */
660 
661 			if (xa_is_value(folio)) {
662 				if (!invalidate_exceptional_entry2(mapping,
663 						indices[i], folio))
664 					ret = -EBUSY;
665 				continue;
666 			}
667 
668 			if (!did_range_unmap && folio_mapped(folio)) {
669 				/*
670 				 * If folio is mapped, before taking its lock,
671 				 * zap the rest of the file in one hit.
672 				 */
673 				unmap_mapping_pages(mapping, indices[i],
674 						(1 + end - indices[i]), false);
675 				did_range_unmap = 1;
676 			}
677 
678 			folio_lock(folio);
679 			if (unlikely(folio->mapping != mapping)) {
680 				folio_unlock(folio);
681 				continue;
682 			}
683 			VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
684 			folio_wait_writeback(folio);
685 
686 			if (folio_mapped(folio))
687 				unmap_mapping_folio(folio);
688 			BUG_ON(folio_mapped(folio));
689 
690 			ret2 = folio_launder(mapping, folio);
691 			if (ret2 == 0) {
692 				if (!invalidate_complete_folio2(mapping, folio))
693 					ret2 = -EBUSY;
694 			}
695 			if (ret2 < 0)
696 				ret = ret2;
697 			folio_unlock(folio);
698 		}
699 		folio_batch_remove_exceptionals(&fbatch);
700 		folio_batch_release(&fbatch);
701 		cond_resched();
702 	}
703 	/*
704 	 * For DAX we invalidate page tables after invalidating page cache.  We
705 	 * could invalidate page tables while invalidating each entry however
706 	 * that would be expensive. And doing range unmapping before doesn't
707 	 * work as we have no cheap way to find whether page cache entry didn't
708 	 * get remapped later.
709 	 */
710 	if (dax_mapping(mapping)) {
711 		unmap_mapping_pages(mapping, start, end - start + 1, false);
712 	}
713 out:
714 	cleancache_invalidate_inode(mapping);
715 	return ret;
716 }
717 EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
718 
719 /**
720  * invalidate_inode_pages2 - remove all pages from an address_space
721  * @mapping: the address_space
722  *
723  * Any pages which are found to be mapped into pagetables are unmapped prior to
724  * invalidation.
725  *
726  * Return: -EBUSY if any pages could not be invalidated.
727  */
invalidate_inode_pages2(struct address_space * mapping)728 int invalidate_inode_pages2(struct address_space *mapping)
729 {
730 	return invalidate_inode_pages2_range(mapping, 0, -1);
731 }
732 EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
733 
734 /**
735  * truncate_pagecache - unmap and remove pagecache that has been truncated
736  * @inode: inode
737  * @newsize: new file size
738  *
739  * inode's new i_size must already be written before truncate_pagecache
740  * is called.
741  *
742  * This function should typically be called before the filesystem
743  * releases resources associated with the freed range (eg. deallocates
744  * blocks). This way, pagecache will always stay logically coherent
745  * with on-disk format, and the filesystem would not have to deal with
746  * situations such as writepage being called for a page that has already
747  * had its underlying blocks deallocated.
748  */
truncate_pagecache(struct inode * inode,loff_t newsize)749 void truncate_pagecache(struct inode *inode, loff_t newsize)
750 {
751 	struct address_space *mapping = inode->i_mapping;
752 	loff_t holebegin = round_up(newsize, PAGE_SIZE);
753 
754 	/*
755 	 * unmap_mapping_range is called twice, first simply for
756 	 * efficiency so that truncate_inode_pages does fewer
757 	 * single-page unmaps.  However after this first call, and
758 	 * before truncate_inode_pages finishes, it is possible for
759 	 * private pages to be COWed, which remain after
760 	 * truncate_inode_pages finishes, hence the second
761 	 * unmap_mapping_range call must be made for correctness.
762 	 */
763 	unmap_mapping_range(mapping, holebegin, 0, 1);
764 	truncate_inode_pages(mapping, newsize);
765 	unmap_mapping_range(mapping, holebegin, 0, 1);
766 }
767 EXPORT_SYMBOL(truncate_pagecache);
768 
769 /**
770  * truncate_setsize - update inode and pagecache for a new file size
771  * @inode: inode
772  * @newsize: new file size
773  *
774  * truncate_setsize updates i_size and performs pagecache truncation (if
775  * necessary) to @newsize. It will be typically be called from the filesystem's
776  * setattr function when ATTR_SIZE is passed in.
777  *
778  * Must be called with a lock serializing truncates and writes (generally
779  * i_rwsem but e.g. xfs uses a different lock) and before all filesystem
780  * specific block truncation has been performed.
781  */
truncate_setsize(struct inode * inode,loff_t newsize)782 void truncate_setsize(struct inode *inode, loff_t newsize)
783 {
784 	loff_t oldsize = inode->i_size;
785 
786 	i_size_write(inode, newsize);
787 	if (newsize > oldsize)
788 		pagecache_isize_extended(inode, oldsize, newsize);
789 	truncate_pagecache(inode, newsize);
790 }
791 EXPORT_SYMBOL(truncate_setsize);
792 
793 /**
794  * pagecache_isize_extended - update pagecache after extension of i_size
795  * @inode:	inode for which i_size was extended
796  * @from:	original inode size
797  * @to:		new inode size
798  *
799  * Handle extension of inode size either caused by extending truncate or by
800  * write starting after current i_size. We mark the page straddling current
801  * i_size RO so that page_mkwrite() is called on the nearest write access to
802  * the page.  This way filesystem can be sure that page_mkwrite() is called on
803  * the page before user writes to the page via mmap after the i_size has been
804  * changed.
805  *
806  * The function must be called after i_size is updated so that page fault
807  * coming after we unlock the page will already see the new i_size.
808  * The function must be called while we still hold i_rwsem - this not only
809  * makes sure i_size is stable but also that userspace cannot observe new
810  * i_size value before we are prepared to store mmap writes at new inode size.
811  */
pagecache_isize_extended(struct inode * inode,loff_t from,loff_t to)812 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
813 {
814 	int bsize = i_blocksize(inode);
815 	loff_t rounded_from;
816 	struct page *page;
817 	pgoff_t index;
818 
819 	WARN_ON(to > inode->i_size);
820 
821 	if (from >= to || bsize == PAGE_SIZE)
822 		return;
823 	/* Page straddling @from will not have any hole block created? */
824 	rounded_from = round_up(from, bsize);
825 	if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
826 		return;
827 
828 	index = from >> PAGE_SHIFT;
829 	page = find_lock_page(inode->i_mapping, index);
830 	/* Page not cached? Nothing to do */
831 	if (!page)
832 		return;
833 	/*
834 	 * See clear_page_dirty_for_io() for details why set_page_dirty()
835 	 * is needed.
836 	 */
837 	if (page_mkclean(page))
838 		set_page_dirty(page);
839 	unlock_page(page);
840 	put_page(page);
841 }
842 EXPORT_SYMBOL(pagecache_isize_extended);
843 
844 /**
845  * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
846  * @inode: inode
847  * @lstart: offset of beginning of hole
848  * @lend: offset of last byte of hole
849  *
850  * This function should typically be called before the filesystem
851  * releases resources associated with the freed range (eg. deallocates
852  * blocks). This way, pagecache will always stay logically coherent
853  * with on-disk format, and the filesystem would not have to deal with
854  * situations such as writepage being called for a page that has already
855  * had its underlying blocks deallocated.
856  */
truncate_pagecache_range(struct inode * inode,loff_t lstart,loff_t lend)857 void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
858 {
859 	struct address_space *mapping = inode->i_mapping;
860 	loff_t unmap_start = round_up(lstart, PAGE_SIZE);
861 	loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
862 	/*
863 	 * This rounding is currently just for example: unmap_mapping_range
864 	 * expands its hole outwards, whereas we want it to contract the hole
865 	 * inwards.  However, existing callers of truncate_pagecache_range are
866 	 * doing their own page rounding first.  Note that unmap_mapping_range
867 	 * allows holelen 0 for all, and we allow lend -1 for end of file.
868 	 */
869 
870 	/*
871 	 * Unlike in truncate_pagecache, unmap_mapping_range is called only
872 	 * once (before truncating pagecache), and without "even_cows" flag:
873 	 * hole-punching should not remove private COWed pages from the hole.
874 	 */
875 	if ((u64)unmap_end > (u64)unmap_start)
876 		unmap_mapping_range(mapping, unmap_start,
877 				    1 + unmap_end - unmap_start, 0);
878 	truncate_inode_pages_range(mapping, lstart, lend);
879 }
880 EXPORT_SYMBOL(truncate_pagecache_range);
881