1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PAGEMAP_H
3 #define _LINUX_PAGEMAP_H
4 
5 /*
6  * Copyright 1995 Linus Torvalds
7  */
8 #include <linux/mm.h>
9 #include <linux/fs.h>
10 #include <linux/list.h>
11 #include <linux/highmem.h>
12 #include <linux/compiler.h>
13 #include <linux/uaccess.h>
14 #include <linux/gfp.h>
15 #include <linux/bitops.h>
16 #include <linux/hardirq.h> /* for in_interrupt() */
17 #include <linux/hugetlb_inline.h>
18 
19 struct folio_batch;
20 
21 unsigned long invalidate_mapping_pages(struct address_space *mapping,
22 					pgoff_t start, pgoff_t end);
23 
invalidate_remote_inode(struct inode * inode)24 static inline void invalidate_remote_inode(struct inode *inode)
25 {
26 	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
27 	    S_ISLNK(inode->i_mode))
28 		invalidate_mapping_pages(inode->i_mapping, 0, -1);
29 }
30 int invalidate_inode_pages2(struct address_space *mapping);
31 int invalidate_inode_pages2_range(struct address_space *mapping,
32 		pgoff_t start, pgoff_t end);
33 int kiocb_invalidate_pages(struct kiocb *iocb, size_t count);
34 void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count);
35 int filemap_invalidate_pages(struct address_space *mapping,
36 			     loff_t pos, loff_t end, bool nowait);
37 
38 int write_inode_now(struct inode *, int sync);
39 int filemap_fdatawrite(struct address_space *);
40 int filemap_flush(struct address_space *);
41 int filemap_fdatawait_keep_errors(struct address_space *mapping);
42 int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend);
43 int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
44 		loff_t start_byte, loff_t end_byte);
45 int filemap_invalidate_inode(struct inode *inode, bool flush,
46 			     loff_t start, loff_t end);
47 
filemap_fdatawait(struct address_space * mapping)48 static inline int filemap_fdatawait(struct address_space *mapping)
49 {
50 	return filemap_fdatawait_range(mapping, 0, LLONG_MAX);
51 }
52 
53 bool filemap_range_has_page(struct address_space *, loff_t lstart, loff_t lend);
54 int filemap_write_and_wait_range(struct address_space *mapping,
55 		loff_t lstart, loff_t lend);
56 int __filemap_fdatawrite_range(struct address_space *mapping,
57 		loff_t start, loff_t end, int sync_mode);
58 int filemap_fdatawrite_range(struct address_space *mapping,
59 		loff_t start, loff_t end);
60 int filemap_check_errors(struct address_space *mapping);
61 void __filemap_set_wb_err(struct address_space *mapping, int err);
62 int filemap_fdatawrite_wbc(struct address_space *mapping,
63 			   struct writeback_control *wbc);
64 int kiocb_write_and_wait(struct kiocb *iocb, size_t count);
65 
filemap_write_and_wait(struct address_space * mapping)66 static inline int filemap_write_and_wait(struct address_space *mapping)
67 {
68 	return filemap_write_and_wait_range(mapping, 0, LLONG_MAX);
69 }
70 
71 /**
72  * filemap_set_wb_err - set a writeback error on an address_space
73  * @mapping: mapping in which to set writeback error
74  * @err: error to be set in mapping
75  *
76  * When writeback fails in some way, we must record that error so that
77  * userspace can be informed when fsync and the like are called.  We endeavor
78  * to report errors on any file that was open at the time of the error.  Some
79  * internal callers also need to know when writeback errors have occurred.
80  *
81  * When a writeback error occurs, most filesystems will want to call
82  * filemap_set_wb_err to record the error in the mapping so that it will be
83  * automatically reported whenever fsync is called on the file.
84  */
filemap_set_wb_err(struct address_space * mapping,int err)85 static inline void filemap_set_wb_err(struct address_space *mapping, int err)
86 {
87 	/* Fastpath for common case of no error */
88 	if (unlikely(err))
89 		__filemap_set_wb_err(mapping, err);
90 }
91 
92 /**
93  * filemap_check_wb_err - has an error occurred since the mark was sampled?
94  * @mapping: mapping to check for writeback errors
95  * @since: previously-sampled errseq_t
96  *
97  * Grab the errseq_t value from the mapping, and see if it has changed "since"
98  * the given value was sampled.
99  *
100  * If it has then report the latest error set, otherwise return 0.
101  */
filemap_check_wb_err(struct address_space * mapping,errseq_t since)102 static inline int filemap_check_wb_err(struct address_space *mapping,
103 					errseq_t since)
104 {
105 	return errseq_check(&mapping->wb_err, since);
106 }
107 
108 /**
109  * filemap_sample_wb_err - sample the current errseq_t to test for later errors
110  * @mapping: mapping to be sampled
111  *
112  * Writeback errors are always reported relative to a particular sample point
113  * in the past. This function provides those sample points.
114  */
filemap_sample_wb_err(struct address_space * mapping)115 static inline errseq_t filemap_sample_wb_err(struct address_space *mapping)
116 {
117 	return errseq_sample(&mapping->wb_err);
118 }
119 
120 /**
121  * file_sample_sb_err - sample the current errseq_t to test for later errors
122  * @file: file pointer to be sampled
123  *
124  * Grab the most current superblock-level errseq_t value for the given
125  * struct file.
126  */
file_sample_sb_err(struct file * file)127 static inline errseq_t file_sample_sb_err(struct file *file)
128 {
129 	return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err);
130 }
131 
132 /*
133  * Flush file data before changing attributes.  Caller must hold any locks
134  * required to prevent further writes to this file until we're done setting
135  * flags.
136  */
inode_drain_writes(struct inode * inode)137 static inline int inode_drain_writes(struct inode *inode)
138 {
139 	inode_dio_wait(inode);
140 	return filemap_write_and_wait(inode->i_mapping);
141 }
142 
mapping_empty(struct address_space * mapping)143 static inline bool mapping_empty(struct address_space *mapping)
144 {
145 	return xa_empty(&mapping->i_pages);
146 }
147 
148 extern void _trace_android_rvh_mapping_shrinkable(bool *shrinkable);
149 
150 /*
151  * mapping_shrinkable - test if page cache state allows inode reclaim
152  * @mapping: the page cache mapping
153  *
154  * This checks the mapping's cache state for the pupose of inode
155  * reclaim and LRU management.
156  *
157  * The caller is expected to hold the i_lock, but is not required to
158  * hold the i_pages lock, which usually protects cache state. That's
159  * because the i_lock and the list_lru lock that protect the inode and
160  * its LRU state don't nest inside the irq-safe i_pages lock.
161  *
162  * Cache deletions are performed under the i_lock, which ensures that
163  * when an inode goes empty, it will reliably get queued on the LRU.
164  *
165  * Cache additions do not acquire the i_lock and may race with this
166  * check, in which case we'll report the inode as shrinkable when it
167  * has cache pages. This is okay: the shrinker also checks the
168  * refcount and the referenced bit, which will be elevated or set in
169  * the process of adding new cache pages to an inode.
170  */
mapping_shrinkable(struct address_space * mapping)171 static inline bool mapping_shrinkable(struct address_space *mapping)
172 {
173 	void *head;
174 	bool shrinkable = false;
175 
176 	_trace_android_rvh_mapping_shrinkable(&shrinkable);
177 	if (shrinkable)
178 		return true;
179 	/*
180 	 * On highmem systems, there could be lowmem pressure from the
181 	 * inodes before there is highmem pressure from the page
182 	 * cache. Make inodes shrinkable regardless of cache state.
183 	 */
184 	if (IS_ENABLED(CONFIG_HIGHMEM))
185 		return true;
186 
187 	/* Cache completely empty? Shrink away. */
188 	head = rcu_access_pointer(mapping->i_pages.xa_head);
189 	if (!head)
190 		return true;
191 
192 	/*
193 	 * The xarray stores single offset-0 entries directly in the
194 	 * head pointer, which allows non-resident page cache entries
195 	 * to escape the shadow shrinker's list of xarray nodes. The
196 	 * inode shrinker needs to pick them up under memory pressure.
197 	 */
198 	if (!xa_is_node(head) && xa_is_value(head))
199 		return true;
200 
201 	return false;
202 }
203 
204 /*
205  * Bits in mapping->flags.
206  */
207 enum mapping_flags {
208 	AS_EIO		= 0,	/* IO error on async write */
209 	AS_ENOSPC	= 1,	/* ENOSPC on async write */
210 	AS_MM_ALL_LOCKS	= 2,	/* under mm_take_all_locks() */
211 	AS_UNEVICTABLE	= 3,	/* e.g., ramdisk, SHM_LOCK */
212 	AS_EXITING	= 4, 	/* final truncate in progress */
213 	/* writeback related tags are not used */
214 	AS_NO_WRITEBACK_TAGS = 5,
215 	AS_RELEASE_ALWAYS = 6,	/* Call ->release_folio(), even if no private data */
216 	AS_STABLE_WRITES = 7,	/* must wait for writeback before modifying
217 				   folio contents */
218 	AS_INACCESSIBLE = 8,	/* Do not attempt direct R/W access to the mapping */
219 	/* Bits 16-25 are used for FOLIO_ORDER */
220 	AS_FOLIO_ORDER_BITS = 5,
221 	AS_FOLIO_ORDER_MIN = 16,
222 	AS_FOLIO_ORDER_MAX = AS_FOLIO_ORDER_MIN + AS_FOLIO_ORDER_BITS,
223 };
224 
225 #define AS_FOLIO_ORDER_BITS_MASK ((1u << AS_FOLIO_ORDER_BITS) - 1)
226 #define AS_FOLIO_ORDER_MIN_MASK (AS_FOLIO_ORDER_BITS_MASK << AS_FOLIO_ORDER_MIN)
227 #define AS_FOLIO_ORDER_MAX_MASK (AS_FOLIO_ORDER_BITS_MASK << AS_FOLIO_ORDER_MAX)
228 #define AS_FOLIO_ORDER_MASK (AS_FOLIO_ORDER_MIN_MASK | AS_FOLIO_ORDER_MAX_MASK)
229 
230 /**
231  * mapping_set_error - record a writeback error in the address_space
232  * @mapping: the mapping in which an error should be set
233  * @error: the error to set in the mapping
234  *
235  * When writeback fails in some way, we must record that error so that
236  * userspace can be informed when fsync and the like are called.  We endeavor
237  * to report errors on any file that was open at the time of the error.  Some
238  * internal callers also need to know when writeback errors have occurred.
239  *
240  * When a writeback error occurs, most filesystems will want to call
241  * mapping_set_error to record the error in the mapping so that it can be
242  * reported when the application calls fsync(2).
243  */
mapping_set_error(struct address_space * mapping,int error)244 static inline void mapping_set_error(struct address_space *mapping, int error)
245 {
246 	if (likely(!error))
247 		return;
248 
249 	/* Record in wb_err for checkers using errseq_t based tracking */
250 	__filemap_set_wb_err(mapping, error);
251 
252 	/* Record it in superblock */
253 	if (mapping->host)
254 		errseq_set(&mapping->host->i_sb->s_wb_err, error);
255 
256 	/* Record it in flags for now, for legacy callers */
257 	if (error == -ENOSPC)
258 		set_bit(AS_ENOSPC, &mapping->flags);
259 	else
260 		set_bit(AS_EIO, &mapping->flags);
261 }
262 
mapping_set_unevictable(struct address_space * mapping)263 static inline void mapping_set_unevictable(struct address_space *mapping)
264 {
265 	set_bit(AS_UNEVICTABLE, &mapping->flags);
266 }
267 
mapping_clear_unevictable(struct address_space * mapping)268 static inline void mapping_clear_unevictable(struct address_space *mapping)
269 {
270 	clear_bit(AS_UNEVICTABLE, &mapping->flags);
271 }
272 
mapping_unevictable(struct address_space * mapping)273 static inline bool mapping_unevictable(struct address_space *mapping)
274 {
275 	return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags);
276 }
277 
mapping_set_exiting(struct address_space * mapping)278 static inline void mapping_set_exiting(struct address_space *mapping)
279 {
280 	set_bit(AS_EXITING, &mapping->flags);
281 }
282 
mapping_exiting(struct address_space * mapping)283 static inline int mapping_exiting(struct address_space *mapping)
284 {
285 	return test_bit(AS_EXITING, &mapping->flags);
286 }
287 
mapping_set_no_writeback_tags(struct address_space * mapping)288 static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
289 {
290 	set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
291 }
292 
mapping_use_writeback_tags(struct address_space * mapping)293 static inline int mapping_use_writeback_tags(struct address_space *mapping)
294 {
295 	return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
296 }
297 
mapping_release_always(const struct address_space * mapping)298 static inline bool mapping_release_always(const struct address_space *mapping)
299 {
300 	return test_bit(AS_RELEASE_ALWAYS, &mapping->flags);
301 }
302 
mapping_set_release_always(struct address_space * mapping)303 static inline void mapping_set_release_always(struct address_space *mapping)
304 {
305 	set_bit(AS_RELEASE_ALWAYS, &mapping->flags);
306 }
307 
mapping_clear_release_always(struct address_space * mapping)308 static inline void mapping_clear_release_always(struct address_space *mapping)
309 {
310 	clear_bit(AS_RELEASE_ALWAYS, &mapping->flags);
311 }
312 
mapping_stable_writes(const struct address_space * mapping)313 static inline bool mapping_stable_writes(const struct address_space *mapping)
314 {
315 	return test_bit(AS_STABLE_WRITES, &mapping->flags);
316 }
317 
mapping_set_stable_writes(struct address_space * mapping)318 static inline void mapping_set_stable_writes(struct address_space *mapping)
319 {
320 	set_bit(AS_STABLE_WRITES, &mapping->flags);
321 }
322 
mapping_clear_stable_writes(struct address_space * mapping)323 static inline void mapping_clear_stable_writes(struct address_space *mapping)
324 {
325 	clear_bit(AS_STABLE_WRITES, &mapping->flags);
326 }
327 
mapping_set_inaccessible(struct address_space * mapping)328 static inline void mapping_set_inaccessible(struct address_space *mapping)
329 {
330 	/*
331 	 * It's expected inaccessible mappings are also unevictable. Compaction
332 	 * migrate scanner (isolate_migratepages_block()) relies on this to
333 	 * reduce page locking.
334 	 */
335 	set_bit(AS_UNEVICTABLE, &mapping->flags);
336 	set_bit(AS_INACCESSIBLE, &mapping->flags);
337 }
338 
mapping_inaccessible(struct address_space * mapping)339 static inline bool mapping_inaccessible(struct address_space *mapping)
340 {
341 	return test_bit(AS_INACCESSIBLE, &mapping->flags);
342 }
343 
mapping_gfp_mask(struct address_space * mapping)344 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
345 {
346 	return mapping->gfp_mask;
347 }
348 
349 /* Restricts the given gfp_mask to what the mapping allows. */
mapping_gfp_constraint(struct address_space * mapping,gfp_t gfp_mask)350 static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
351 		gfp_t gfp_mask)
352 {
353 	return mapping_gfp_mask(mapping) & gfp_mask;
354 }
355 
356 /*
357  * This is non-atomic.  Only to be used before the mapping is activated.
358  * Probably needs a barrier...
359  */
mapping_set_gfp_mask(struct address_space * m,gfp_t mask)360 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
361 {
362 	m->gfp_mask = mask;
363 }
364 
365 /*
366  * There are some parts of the kernel which assume that PMD entries
367  * are exactly HPAGE_PMD_ORDER.  Those should be fixed, but until then,
368  * limit the maximum allocation order to PMD size.  I'm not aware of any
369  * assumptions about maximum order if THP are disabled, but 8 seems like
370  * a good order (that's 1MB if you're using 4kB pages)
371  */
372 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
373 #define PREFERRED_MAX_PAGECACHE_ORDER	HPAGE_PMD_ORDER
374 #else
375 #define PREFERRED_MAX_PAGECACHE_ORDER	8
376 #endif
377 
378 /*
379  * xas_split_alloc() does not support arbitrary orders. This implies no
380  * 512MB THP on ARM64 with 64KB base page size.
381  */
382 #define MAX_XAS_ORDER		(XA_CHUNK_SHIFT * 2 - 1)
383 #define MAX_PAGECACHE_ORDER	min(MAX_XAS_ORDER, PREFERRED_MAX_PAGECACHE_ORDER)
384 
385 /*
386  * mapping_max_folio_size_supported() - Check the max folio size supported
387  *
388  * The filesystem should call this function at mount time if there is a
389  * requirement on the folio mapping size in the page cache.
390  */
mapping_max_folio_size_supported(void)391 static inline size_t mapping_max_folio_size_supported(void)
392 {
393 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
394 		return 1U << (PAGE_SHIFT + MAX_PAGECACHE_ORDER);
395 	return PAGE_SIZE;
396 }
397 
398 /*
399  * mapping_set_folio_order_range() - Set the orders supported by a file.
400  * @mapping: The address space of the file.
401  * @min: Minimum folio order (between 0-MAX_PAGECACHE_ORDER inclusive).
402  * @max: Maximum folio order (between @min-MAX_PAGECACHE_ORDER inclusive).
403  *
404  * The filesystem should call this function in its inode constructor to
405  * indicate which base size (min) and maximum size (max) of folio the VFS
406  * can use to cache the contents of the file.  This should only be used
407  * if the filesystem needs special handling of folio sizes (ie there is
408  * something the core cannot know).
409  * Do not tune it based on, eg, i_size.
410  *
411  * Context: This should not be called while the inode is active as it
412  * is non-atomic.
413  */
mapping_set_folio_order_range(struct address_space * mapping,unsigned int min,unsigned int max)414 static inline void mapping_set_folio_order_range(struct address_space *mapping,
415 						 unsigned int min,
416 						 unsigned int max)
417 {
418 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
419 		return;
420 
421 	if (min > MAX_PAGECACHE_ORDER)
422 		min = MAX_PAGECACHE_ORDER;
423 
424 	if (max > MAX_PAGECACHE_ORDER)
425 		max = MAX_PAGECACHE_ORDER;
426 
427 	if (max < min)
428 		max = min;
429 
430 	mapping->flags = (mapping->flags & ~AS_FOLIO_ORDER_MASK) |
431 		(min << AS_FOLIO_ORDER_MIN) | (max << AS_FOLIO_ORDER_MAX);
432 }
433 
mapping_set_folio_min_order(struct address_space * mapping,unsigned int min)434 static inline void mapping_set_folio_min_order(struct address_space *mapping,
435 					       unsigned int min)
436 {
437 	mapping_set_folio_order_range(mapping, min, MAX_PAGECACHE_ORDER);
438 }
439 
440 /**
441  * mapping_set_large_folios() - Indicate the file supports large folios.
442  * @mapping: The address space of the file.
443  *
444  * The filesystem should call this function in its inode constructor to
445  * indicate that the VFS can use large folios to cache the contents of
446  * the file.
447  *
448  * Context: This should not be called while the inode is active as it
449  * is non-atomic.
450  */
mapping_set_large_folios(struct address_space * mapping)451 static inline void mapping_set_large_folios(struct address_space *mapping)
452 {
453 	mapping_set_folio_order_range(mapping, 0, MAX_PAGECACHE_ORDER);
454 }
455 
456 static inline unsigned int
mapping_max_folio_order(const struct address_space * mapping)457 mapping_max_folio_order(const struct address_space *mapping)
458 {
459 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
460 		return 0;
461 	return (mapping->flags & AS_FOLIO_ORDER_MAX_MASK) >> AS_FOLIO_ORDER_MAX;
462 }
463 
464 static inline unsigned int
mapping_min_folio_order(const struct address_space * mapping)465 mapping_min_folio_order(const struct address_space *mapping)
466 {
467 	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
468 		return 0;
469 	return (mapping->flags & AS_FOLIO_ORDER_MIN_MASK) >> AS_FOLIO_ORDER_MIN;
470 }
471 
472 static inline unsigned long
mapping_min_folio_nrpages(struct address_space * mapping)473 mapping_min_folio_nrpages(struct address_space *mapping)
474 {
475 	return 1UL << mapping_min_folio_order(mapping);
476 }
477 
478 /**
479  * mapping_align_index() - Align index for this mapping.
480  * @mapping: The address_space.
481  * @index: The page index.
482  *
483  * The index of a folio must be naturally aligned.  If you are adding a
484  * new folio to the page cache and need to know what index to give it,
485  * call this function.
486  */
mapping_align_index(struct address_space * mapping,pgoff_t index)487 static inline pgoff_t mapping_align_index(struct address_space *mapping,
488 					  pgoff_t index)
489 {
490 	return round_down(index, mapping_min_folio_nrpages(mapping));
491 }
492 
493 /*
494  * Large folio support currently depends on THP.  These dependencies are
495  * being worked on but are not yet fixed.
496  */
mapping_large_folio_support(struct address_space * mapping)497 static inline bool mapping_large_folio_support(struct address_space *mapping)
498 {
499 	/* AS_FOLIO_ORDER is only reasonable for pagecache folios */
500 	VM_WARN_ONCE((unsigned long)mapping & PAGE_MAPPING_ANON,
501 			"Anonymous mapping always supports large folio");
502 
503 	return mapping_max_folio_order(mapping) > 0;
504 }
505 
506 /* Return the maximum folio size for this pagecache mapping, in bytes. */
mapping_max_folio_size(const struct address_space * mapping)507 static inline size_t mapping_max_folio_size(const struct address_space *mapping)
508 {
509 	return PAGE_SIZE << mapping_max_folio_order(mapping);
510 }
511 
filemap_nr_thps(struct address_space * mapping)512 static inline int filemap_nr_thps(struct address_space *mapping)
513 {
514 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
515 	return atomic_read(&mapping->nr_thps);
516 #else
517 	return 0;
518 #endif
519 }
520 
filemap_nr_thps_inc(struct address_space * mapping)521 static inline void filemap_nr_thps_inc(struct address_space *mapping)
522 {
523 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
524 	if (!mapping_large_folio_support(mapping))
525 		atomic_inc(&mapping->nr_thps);
526 #else
527 	WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0);
528 #endif
529 }
530 
filemap_nr_thps_dec(struct address_space * mapping)531 static inline void filemap_nr_thps_dec(struct address_space *mapping)
532 {
533 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
534 	if (!mapping_large_folio_support(mapping))
535 		atomic_dec(&mapping->nr_thps);
536 #else
537 	WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0);
538 #endif
539 }
540 
541 struct address_space *folio_mapping(struct folio *);
542 struct address_space *swapcache_mapping(struct folio *);
543 
544 /**
545  * folio_file_mapping - Find the mapping this folio belongs to.
546  * @folio: The folio.
547  *
548  * For folios which are in the page cache, return the mapping that this
549  * page belongs to.  Folios in the swap cache return the mapping of the
550  * swap file or swap device where the data is stored.  This is different
551  * from the mapping returned by folio_mapping().  The only reason to
552  * use it is if, like NFS, you return 0 from ->activate_swapfile.
553  *
554  * Do not call this for folios which aren't in the page cache or swap cache.
555  */
folio_file_mapping(struct folio * folio)556 static inline struct address_space *folio_file_mapping(struct folio *folio)
557 {
558 	if (unlikely(folio_test_swapcache(folio)))
559 		return swapcache_mapping(folio);
560 
561 	return folio->mapping;
562 }
563 
564 /**
565  * folio_flush_mapping - Find the file mapping this folio belongs to.
566  * @folio: The folio.
567  *
568  * For folios which are in the page cache, return the mapping that this
569  * page belongs to.  Anonymous folios return NULL, even if they're in
570  * the swap cache.  Other kinds of folio also return NULL.
571  *
572  * This is ONLY used by architecture cache flushing code.  If you aren't
573  * writing cache flushing code, you want either folio_mapping() or
574  * folio_file_mapping().
575  */
folio_flush_mapping(struct folio * folio)576 static inline struct address_space *folio_flush_mapping(struct folio *folio)
577 {
578 	if (unlikely(folio_test_swapcache(folio)))
579 		return NULL;
580 
581 	return folio_mapping(folio);
582 }
583 
page_file_mapping(struct page * page)584 static inline struct address_space *page_file_mapping(struct page *page)
585 {
586 	return folio_file_mapping(page_folio(page));
587 }
588 
589 /**
590  * folio_inode - Get the host inode for this folio.
591  * @folio: The folio.
592  *
593  * For folios which are in the page cache, return the inode that this folio
594  * belongs to.
595  *
596  * Do not call this for folios which aren't in the page cache.
597  */
folio_inode(struct folio * folio)598 static inline struct inode *folio_inode(struct folio *folio)
599 {
600 	return folio->mapping->host;
601 }
602 
603 /**
604  * folio_attach_private - Attach private data to a folio.
605  * @folio: Folio to attach data to.
606  * @data: Data to attach to folio.
607  *
608  * Attaching private data to a folio increments the page's reference count.
609  * The data must be detached before the folio will be freed.
610  */
folio_attach_private(struct folio * folio,void * data)611 static inline void folio_attach_private(struct folio *folio, void *data)
612 {
613 	folio_get(folio);
614 	folio->private = data;
615 	folio_set_private(folio);
616 }
617 
618 /**
619  * folio_change_private - Change private data on a folio.
620  * @folio: Folio to change the data on.
621  * @data: Data to set on the folio.
622  *
623  * Change the private data attached to a folio and return the old
624  * data.  The page must previously have had data attached and the data
625  * must be detached before the folio will be freed.
626  *
627  * Return: Data that was previously attached to the folio.
628  */
folio_change_private(struct folio * folio,void * data)629 static inline void *folio_change_private(struct folio *folio, void *data)
630 {
631 	void *old = folio_get_private(folio);
632 
633 	folio->private = data;
634 	return old;
635 }
636 
637 /**
638  * folio_detach_private - Detach private data from a folio.
639  * @folio: Folio to detach data from.
640  *
641  * Removes the data that was previously attached to the folio and decrements
642  * the refcount on the page.
643  *
644  * Return: Data that was attached to the folio.
645  */
folio_detach_private(struct folio * folio)646 static inline void *folio_detach_private(struct folio *folio)
647 {
648 	void *data = folio_get_private(folio);
649 
650 	if (!folio_test_private(folio))
651 		return NULL;
652 	folio_clear_private(folio);
653 	folio->private = NULL;
654 	folio_put(folio);
655 
656 	return data;
657 }
658 
attach_page_private(struct page * page,void * data)659 static inline void attach_page_private(struct page *page, void *data)
660 {
661 	folio_attach_private(page_folio(page), data);
662 }
663 
detach_page_private(struct page * page)664 static inline void *detach_page_private(struct page *page)
665 {
666 	return folio_detach_private(page_folio(page));
667 }
668 
669 #ifdef CONFIG_NUMA
670 struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order);
671 #else
filemap_alloc_folio_noprof(gfp_t gfp,unsigned int order)672 static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order)
673 {
674 	return folio_alloc_noprof(gfp, order);
675 }
676 #endif
677 
678 #define filemap_alloc_folio(...)				\
679 	alloc_hooks(filemap_alloc_folio_noprof(__VA_ARGS__))
680 
__page_cache_alloc(gfp_t gfp)681 static inline struct page *__page_cache_alloc(gfp_t gfp)
682 {
683 	return &filemap_alloc_folio(gfp, 0)->page;
684 }
685 
__readahead_gfp_mask(struct address_space * x)686 static inline gfp_t __readahead_gfp_mask(struct address_space *x)
687 {
688 	return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
689 }
690 
691 gfp_t readahead_gfp_mask(struct address_space *x);
692 
693 typedef int filler_t(struct file *, struct folio *);
694 
695 pgoff_t page_cache_next_miss(struct address_space *mapping,
696 			     pgoff_t index, unsigned long max_scan);
697 pgoff_t page_cache_prev_miss(struct address_space *mapping,
698 			     pgoff_t index, unsigned long max_scan);
699 
700 /**
701  * typedef fgf_t - Flags for getting folios from the page cache.
702  *
703  * Most users of the page cache will not need to use these flags;
704  * there are convenience functions such as filemap_get_folio() and
705  * filemap_lock_folio().  For users which need more control over exactly
706  * what is done with the folios, these flags to __filemap_get_folio()
707  * are available.
708  *
709  * * %FGP_ACCESSED - The folio will be marked accessed.
710  * * %FGP_LOCK - The folio is returned locked.
711  * * %FGP_CREAT - If no folio is present then a new folio is allocated,
712  *   added to the page cache and the VM's LRU list.  The folio is
713  *   returned locked.
714  * * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the
715  *   folio is already in cache.  If the folio was allocated, unlock it
716  *   before returning so the caller can do the same dance.
717  * * %FGP_WRITE - The folio will be written to by the caller.
718  * * %FGP_NOFS - __GFP_FS will get cleared in gfp.
719  * * %FGP_NOWAIT - Don't block on the folio lock.
720  * * %FGP_STABLE - Wait for the folio to be stable (finished writeback)
721  * * %FGP_DONTCACHE - Uncached buffered IO
722  * * %FGP_WRITEBEGIN - The flags to use in a filesystem write_begin()
723  *   implementation.
724  */
725 typedef unsigned int __bitwise fgf_t;
726 
727 #define FGP_ACCESSED		((__force fgf_t)0x00000001)
728 #define FGP_LOCK		((__force fgf_t)0x00000002)
729 #define FGP_CREAT		((__force fgf_t)0x00000004)
730 #define FGP_WRITE		((__force fgf_t)0x00000008)
731 #define FGP_NOFS		((__force fgf_t)0x00000010)
732 #define FGP_NOWAIT		((__force fgf_t)0x00000020)
733 #define FGP_FOR_MMAP		((__force fgf_t)0x00000040)
734 #define FGP_STABLE		((__force fgf_t)0x00000080)
735 #define FGP_DONTCACHE		((__force fgf_t)0x00000100)
736 #define FGF_GET_ORDER(fgf)	(((__force unsigned)fgf) >> 26)	/* top 6 bits */
737 
738 #define FGP_WRITEBEGIN		(FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE)
739 
740 /**
741  * fgf_set_order - Encode a length in the fgf_t flags.
742  * @size: The suggested size of the folio to create.
743  *
744  * The caller of __filemap_get_folio() can use this to suggest a preferred
745  * size for the folio that is created.  If there is already a folio at
746  * the index, it will be returned, no matter what its size.  If a folio
747  * is freshly created, it may be of a different size than requested
748  * due to alignment constraints, memory pressure, or the presence of
749  * other folios at nearby indices.
750  */
fgf_set_order(size_t size)751 static inline fgf_t fgf_set_order(size_t size)
752 {
753 	unsigned int shift = ilog2(size);
754 
755 	if (shift <= PAGE_SHIFT)
756 		return 0;
757 	return (__force fgf_t)((shift - PAGE_SHIFT) << 26);
758 }
759 
760 void *filemap_get_entry(struct address_space *mapping, pgoff_t index);
761 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
762 		fgf_t fgp_flags, gfp_t gfp);
763 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
764 		fgf_t fgp_flags, gfp_t gfp);
765 
766 /**
767  * filemap_get_folio - Find and get a folio.
768  * @mapping: The address_space to search.
769  * @index: The page index.
770  *
771  * Looks up the page cache entry at @mapping & @index.  If a folio is
772  * present, it is returned with an increased refcount.
773  *
774  * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for
775  * this index.  Will not return a shadow, swap or DAX entry.
776  */
filemap_get_folio(struct address_space * mapping,pgoff_t index)777 static inline struct folio *filemap_get_folio(struct address_space *mapping,
778 					pgoff_t index)
779 {
780 	return __filemap_get_folio(mapping, index, 0, 0);
781 }
782 
783 /**
784  * filemap_lock_folio - Find and lock a folio.
785  * @mapping: The address_space to search.
786  * @index: The page index.
787  *
788  * Looks up the page cache entry at @mapping & @index.  If a folio is
789  * present, it is returned locked with an increased refcount.
790  *
791  * Context: May sleep.
792  * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for
793  * this index.  Will not return a shadow, swap or DAX entry.
794  */
filemap_lock_folio(struct address_space * mapping,pgoff_t index)795 static inline struct folio *filemap_lock_folio(struct address_space *mapping,
796 					pgoff_t index)
797 {
798 	return __filemap_get_folio(mapping, index, FGP_LOCK, 0);
799 }
800 
801 /**
802  * filemap_grab_folio - grab a folio from the page cache
803  * @mapping: The address space to search
804  * @index: The page index
805  *
806  * Looks up the page cache entry at @mapping & @index. If no folio is found,
807  * a new folio is created. The folio is locked, marked as accessed, and
808  * returned.
809  *
810  * Return: A found or created folio. ERR_PTR(-ENOMEM) if no folio is found
811  * and failed to create a folio.
812  */
filemap_grab_folio(struct address_space * mapping,pgoff_t index)813 static inline struct folio *filemap_grab_folio(struct address_space *mapping,
814 					pgoff_t index)
815 {
816 	return __filemap_get_folio(mapping, index,
817 			FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
818 			mapping_gfp_mask(mapping));
819 }
820 
821 /**
822  * find_get_page - find and get a page reference
823  * @mapping: the address_space to search
824  * @offset: the page index
825  *
826  * Looks up the page cache slot at @mapping & @offset.  If there is a
827  * page cache page, it is returned with an increased refcount.
828  *
829  * Otherwise, %NULL is returned.
830  */
find_get_page(struct address_space * mapping,pgoff_t offset)831 static inline struct page *find_get_page(struct address_space *mapping,
832 					pgoff_t offset)
833 {
834 	return pagecache_get_page(mapping, offset, 0, 0);
835 }
836 
find_get_page_flags(struct address_space * mapping,pgoff_t offset,fgf_t fgp_flags)837 static inline struct page *find_get_page_flags(struct address_space *mapping,
838 					pgoff_t offset, fgf_t fgp_flags)
839 {
840 	return pagecache_get_page(mapping, offset, fgp_flags, 0);
841 }
842 
843 /**
844  * find_lock_page - locate, pin and lock a pagecache page
845  * @mapping: the address_space to search
846  * @index: the page index
847  *
848  * Looks up the page cache entry at @mapping & @index.  If there is a
849  * page cache page, it is returned locked and with an increased
850  * refcount.
851  *
852  * Context: May sleep.
853  * Return: A struct page or %NULL if there is no page in the cache for this
854  * index.
855  */
find_lock_page(struct address_space * mapping,pgoff_t index)856 static inline struct page *find_lock_page(struct address_space *mapping,
857 					pgoff_t index)
858 {
859 	return pagecache_get_page(mapping, index, FGP_LOCK, 0);
860 }
861 
862 /**
863  * find_or_create_page - locate or add a pagecache page
864  * @mapping: the page's address_space
865  * @index: the page's index into the mapping
866  * @gfp_mask: page allocation mode
867  *
868  * Looks up the page cache slot at @mapping & @offset.  If there is a
869  * page cache page, it is returned locked and with an increased
870  * refcount.
871  *
872  * If the page is not present, a new page is allocated using @gfp_mask
873  * and added to the page cache and the VM's LRU list.  The page is
874  * returned locked and with an increased refcount.
875  *
876  * On memory exhaustion, %NULL is returned.
877  *
878  * find_or_create_page() may sleep, even if @gfp_flags specifies an
879  * atomic allocation!
880  */
find_or_create_page(struct address_space * mapping,pgoff_t index,gfp_t gfp_mask)881 static inline struct page *find_or_create_page(struct address_space *mapping,
882 					pgoff_t index, gfp_t gfp_mask)
883 {
884 	return pagecache_get_page(mapping, index,
885 					FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
886 					gfp_mask);
887 }
888 
889 /**
890  * grab_cache_page_nowait - returns locked page at given index in given cache
891  * @mapping: target address_space
892  * @index: the page index
893  *
894  * Same as grab_cache_page(), but do not wait if the page is unavailable.
895  * This is intended for speculative data generators, where the data can
896  * be regenerated if the page couldn't be grabbed.  This routine should
897  * be safe to call while holding the lock for another page.
898  *
899  * Clear __GFP_FS when allocating the page to avoid recursion into the fs
900  * and deadlock against the caller's locked page.
901  */
grab_cache_page_nowait(struct address_space * mapping,pgoff_t index)902 static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
903 				pgoff_t index)
904 {
905 	return pagecache_get_page(mapping, index,
906 			FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
907 			mapping_gfp_mask(mapping));
908 }
909 
910 extern pgoff_t __folio_swap_cache_index(struct folio *folio);
911 
912 /**
913  * folio_index - File index of a folio.
914  * @folio: The folio.
915  *
916  * For a folio which is either in the page cache or the swap cache,
917  * return its index within the address_space it belongs to.  If you know
918  * the page is definitely in the page cache, you can look at the folio's
919  * index directly.
920  *
921  * Return: The index (offset in units of pages) of a folio in its file.
922  */
folio_index(struct folio * folio)923 static inline pgoff_t folio_index(struct folio *folio)
924 {
925 	if (unlikely(folio_test_swapcache(folio)))
926 		return __folio_swap_cache_index(folio);
927 	return folio->index;
928 }
929 
930 /**
931  * folio_next_index - Get the index of the next folio.
932  * @folio: The current folio.
933  *
934  * Return: The index of the folio which follows this folio in the file.
935  */
folio_next_index(struct folio * folio)936 static inline pgoff_t folio_next_index(struct folio *folio)
937 {
938 	return folio->index + folio_nr_pages(folio);
939 }
940 
941 /**
942  * folio_file_page - The page for a particular index.
943  * @folio: The folio which contains this index.
944  * @index: The index we want to look up.
945  *
946  * Sometimes after looking up a folio in the page cache, we need to
947  * obtain the specific page for an index (eg a page fault).
948  *
949  * Return: The page containing the file data for this index.
950  */
folio_file_page(struct folio * folio,pgoff_t index)951 static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
952 {
953 	return folio_page(folio, index & (folio_nr_pages(folio) - 1));
954 }
955 
956 /**
957  * folio_contains - Does this folio contain this index?
958  * @folio: The folio.
959  * @index: The page index within the file.
960  *
961  * Context: The caller should have the page locked in order to prevent
962  * (eg) shmem from moving the page between the page cache and swap cache
963  * and changing its index in the middle of the operation.
964  * Return: true or false.
965  */
folio_contains(struct folio * folio,pgoff_t index)966 static inline bool folio_contains(struct folio *folio, pgoff_t index)
967 {
968 	return index - folio_index(folio) < folio_nr_pages(folio);
969 }
970 
971 /*
972  * Given the page we found in the page cache, return the page corresponding
973  * to this index in the file
974  */
find_subpage(struct page * head,pgoff_t index)975 static inline struct page *find_subpage(struct page *head, pgoff_t index)
976 {
977 	/* HugeTLBfs wants the head page regardless */
978 	if (PageHuge(head))
979 		return head;
980 
981 	return head + (index & (thp_nr_pages(head) - 1));
982 }
983 
984 unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
985 		pgoff_t end, struct folio_batch *fbatch);
986 unsigned filemap_get_folios_contig(struct address_space *mapping,
987 		pgoff_t *start, pgoff_t end, struct folio_batch *fbatch);
988 unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
989 		pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch);
990 
991 struct page *grab_cache_page_write_begin(struct address_space *mapping,
992 			pgoff_t index);
993 
994 /*
995  * Returns locked page at given index in given cache, creating it if needed.
996  */
grab_cache_page(struct address_space * mapping,pgoff_t index)997 static inline struct page *grab_cache_page(struct address_space *mapping,
998 								pgoff_t index)
999 {
1000 	return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
1001 }
1002 
1003 struct folio *read_cache_folio(struct address_space *, pgoff_t index,
1004 		filler_t *filler, struct file *file);
1005 struct folio *mapping_read_folio_gfp(struct address_space *, pgoff_t index,
1006 		gfp_t flags);
1007 struct page *read_cache_page(struct address_space *, pgoff_t index,
1008 		filler_t *filler, struct file *file);
1009 extern struct page * read_cache_page_gfp(struct address_space *mapping,
1010 				pgoff_t index, gfp_t gfp_mask);
1011 
read_mapping_page(struct address_space * mapping,pgoff_t index,struct file * file)1012 static inline struct page *read_mapping_page(struct address_space *mapping,
1013 				pgoff_t index, struct file *file)
1014 {
1015 	return read_cache_page(mapping, index, NULL, file);
1016 }
1017 
read_mapping_folio(struct address_space * mapping,pgoff_t index,struct file * file)1018 static inline struct folio *read_mapping_folio(struct address_space *mapping,
1019 				pgoff_t index, struct file *file)
1020 {
1021 	return read_cache_folio(mapping, index, NULL, file);
1022 }
1023 
1024 /*
1025  * Get the offset in PAGE_SIZE (even for hugetlb pages).
1026  */
page_to_pgoff(struct page * page)1027 static inline pgoff_t page_to_pgoff(struct page *page)
1028 {
1029 	struct page *head;
1030 
1031 	if (likely(!PageTransTail(page)))
1032 		return page->index;
1033 
1034 	head = compound_head(page);
1035 	/*
1036 	 *  We don't initialize ->index for tail pages: calculate based on
1037 	 *  head page
1038 	 */
1039 	return head->index + page - head;
1040 }
1041 
1042 /*
1043  * Return byte-offset into filesystem object for page.
1044  */
page_offset(struct page * page)1045 static inline loff_t page_offset(struct page *page)
1046 {
1047 	return ((loff_t)page->index) << PAGE_SHIFT;
1048 }
1049 
1050 /**
1051  * folio_pos - Returns the byte position of this folio in its file.
1052  * @folio: The folio.
1053  */
folio_pos(struct folio * folio)1054 static inline loff_t folio_pos(struct folio *folio)
1055 {
1056 	return page_offset(&folio->page);
1057 }
1058 
1059 /*
1060  * Get the offset in PAGE_SIZE (even for hugetlb folios).
1061  */
folio_pgoff(struct folio * folio)1062 static inline pgoff_t folio_pgoff(struct folio *folio)
1063 {
1064 	return folio->index;
1065 }
1066 
linear_page_index(struct vm_area_struct * vma,unsigned long address)1067 static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
1068 					unsigned long address)
1069 {
1070 	pgoff_t pgoff;
1071 	pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
1072 	pgoff += vma->vm_pgoff;
1073 	return pgoff;
1074 }
1075 
1076 struct wait_page_key {
1077 	struct folio *folio;
1078 	int bit_nr;
1079 	int page_match;
1080 };
1081 
1082 struct wait_page_queue {
1083 	struct folio *folio;
1084 	int bit_nr;
1085 	wait_queue_entry_t wait;
1086 };
1087 
wake_page_match(struct wait_page_queue * wait_page,struct wait_page_key * key)1088 static inline bool wake_page_match(struct wait_page_queue *wait_page,
1089 				  struct wait_page_key *key)
1090 {
1091 	if (wait_page->folio != key->folio)
1092 	       return false;
1093 	key->page_match = 1;
1094 
1095 	if (wait_page->bit_nr != key->bit_nr)
1096 		return false;
1097 
1098 	return true;
1099 }
1100 
1101 void __folio_lock(struct folio *folio);
1102 int __folio_lock_killable(struct folio *folio);
1103 vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf);
1104 void unlock_page(struct page *page);
1105 void folio_unlock(struct folio *folio);
1106 
1107 /**
1108  * folio_trylock() - Attempt to lock a folio.
1109  * @folio: The folio to attempt to lock.
1110  *
1111  * Sometimes it is undesirable to wait for a folio to be unlocked (eg
1112  * when the locks are being taken in the wrong order, or if making
1113  * progress through a batch of folios is more important than processing
1114  * them in order).  Usually folio_lock() is the correct function to call.
1115  *
1116  * Context: Any context.
1117  * Return: Whether the lock was successfully acquired.
1118  */
folio_trylock(struct folio * folio)1119 static inline bool folio_trylock(struct folio *folio)
1120 {
1121 	return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0)));
1122 }
1123 
1124 /*
1125  * Return true if the page was successfully locked
1126  */
trylock_page(struct page * page)1127 static inline bool trylock_page(struct page *page)
1128 {
1129 	return folio_trylock(page_folio(page));
1130 }
1131 
1132 /**
1133  * folio_lock() - Lock this folio.
1134  * @folio: The folio to lock.
1135  *
1136  * The folio lock protects against many things, probably more than it
1137  * should.  It is primarily held while a folio is being brought uptodate,
1138  * either from its backing file or from swap.  It is also held while a
1139  * folio is being truncated from its address_space, so holding the lock
1140  * is sufficient to keep folio->mapping stable.
1141  *
1142  * The folio lock is also held while write() is modifying the page to
1143  * provide POSIX atomicity guarantees (as long as the write does not
1144  * cross a page boundary).  Other modifications to the data in the folio
1145  * do not hold the folio lock and can race with writes, eg DMA and stores
1146  * to mapped pages.
1147  *
1148  * Context: May sleep.  If you need to acquire the locks of two or
1149  * more folios, they must be in order of ascending index, if they are
1150  * in the same address_space.  If they are in different address_spaces,
1151  * acquire the lock of the folio which belongs to the address_space which
1152  * has the lowest address in memory first.
1153  */
folio_lock(struct folio * folio)1154 static inline void folio_lock(struct folio *folio)
1155 {
1156 	might_sleep();
1157 	if (!folio_trylock(folio))
1158 		__folio_lock(folio);
1159 }
1160 
1161 /**
1162  * lock_page() - Lock the folio containing this page.
1163  * @page: The page to lock.
1164  *
1165  * See folio_lock() for a description of what the lock protects.
1166  * This is a legacy function and new code should probably use folio_lock()
1167  * instead.
1168  *
1169  * Context: May sleep.  Pages in the same folio share a lock, so do not
1170  * attempt to lock two pages which share a folio.
1171  */
lock_page(struct page * page)1172 static inline void lock_page(struct page *page)
1173 {
1174 	struct folio *folio;
1175 	might_sleep();
1176 
1177 	folio = page_folio(page);
1178 	if (!folio_trylock(folio))
1179 		__folio_lock(folio);
1180 }
1181 
1182 /**
1183  * folio_lock_killable() - Lock this folio, interruptible by a fatal signal.
1184  * @folio: The folio to lock.
1185  *
1186  * Attempts to lock the folio, like folio_lock(), except that the sleep
1187  * to acquire the lock is interruptible by a fatal signal.
1188  *
1189  * Context: May sleep; see folio_lock().
1190  * Return: 0 if the lock was acquired; -EINTR if a fatal signal was received.
1191  */
folio_lock_killable(struct folio * folio)1192 static inline int folio_lock_killable(struct folio *folio)
1193 {
1194 	might_sleep();
1195 	if (!folio_trylock(folio))
1196 		return __folio_lock_killable(folio);
1197 	return 0;
1198 }
1199 
1200 /*
1201  * folio_lock_or_retry - Lock the folio, unless this would block and the
1202  * caller indicated that it can handle a retry.
1203  *
1204  * Return value and mmap_lock implications depend on flags; see
1205  * __folio_lock_or_retry().
1206  */
folio_lock_or_retry(struct folio * folio,struct vm_fault * vmf)1207 static inline vm_fault_t folio_lock_or_retry(struct folio *folio,
1208 					     struct vm_fault *vmf)
1209 {
1210 	might_sleep();
1211 	if (!folio_trylock(folio))
1212 		return __folio_lock_or_retry(folio, vmf);
1213 	return 0;
1214 }
1215 
1216 /*
1217  * This is exported only for folio_wait_locked/folio_wait_writeback, etc.,
1218  * and should not be used directly.
1219  */
1220 void folio_wait_bit(struct folio *folio, int bit_nr);
1221 int folio_wait_bit_killable(struct folio *folio, int bit_nr);
1222 
1223 /*
1224  * Wait for a folio to be unlocked.
1225  *
1226  * This must be called with the caller "holding" the folio,
1227  * ie with increased folio reference count so that the folio won't
1228  * go away during the wait.
1229  */
folio_wait_locked(struct folio * folio)1230 static inline void folio_wait_locked(struct folio *folio)
1231 {
1232 	if (folio_test_locked(folio))
1233 		folio_wait_bit(folio, PG_locked);
1234 }
1235 
folio_wait_locked_killable(struct folio * folio)1236 static inline int folio_wait_locked_killable(struct folio *folio)
1237 {
1238 	if (!folio_test_locked(folio))
1239 		return 0;
1240 	return folio_wait_bit_killable(folio, PG_locked);
1241 }
1242 
wait_on_page_locked(struct page * page)1243 static inline void wait_on_page_locked(struct page *page)
1244 {
1245 	folio_wait_locked(page_folio(page));
1246 }
1247 
1248 void folio_end_read(struct folio *folio, bool success);
1249 void wait_on_page_writeback(struct page *page);
1250 void folio_wait_writeback(struct folio *folio);
1251 int folio_wait_writeback_killable(struct folio *folio);
1252 void end_page_writeback(struct page *page);
1253 void folio_end_writeback(struct folio *folio);
1254 void wait_for_stable_page(struct page *page);
1255 void folio_wait_stable(struct folio *folio);
1256 void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn);
1257 void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb);
1258 void __folio_cancel_dirty(struct folio *folio);
folio_cancel_dirty(struct folio * folio)1259 static inline void folio_cancel_dirty(struct folio *folio)
1260 {
1261 	/* Avoid atomic ops, locking, etc. when not actually needed. */
1262 	if (folio_test_dirty(folio))
1263 		__folio_cancel_dirty(folio);
1264 }
1265 bool folio_clear_dirty_for_io(struct folio *folio);
1266 bool clear_page_dirty_for_io(struct page *page);
1267 void folio_invalidate(struct folio *folio, size_t offset, size_t length);
1268 bool noop_dirty_folio(struct address_space *mapping, struct folio *folio);
1269 
1270 #ifdef CONFIG_MIGRATION
1271 int filemap_migrate_folio(struct address_space *mapping, struct folio *dst,
1272 		struct folio *src, enum migrate_mode mode);
1273 #else
1274 #define filemap_migrate_folio NULL
1275 #endif
1276 void folio_end_private_2(struct folio *folio);
1277 void folio_wait_private_2(struct folio *folio);
1278 int folio_wait_private_2_killable(struct folio *folio);
1279 
1280 /*
1281  * Add an arbitrary waiter to a page's wait queue
1282  */
1283 void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter);
1284 
1285 /*
1286  * Fault in userspace address range.
1287  */
1288 size_t fault_in_writeable(char __user *uaddr, size_t size);
1289 size_t fault_in_subpage_writeable(char __user *uaddr, size_t size);
1290 size_t fault_in_safe_writeable(const char __user *uaddr, size_t size);
1291 size_t fault_in_readable(const char __user *uaddr, size_t size);
1292 
1293 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
1294 		pgoff_t index, gfp_t gfp);
1295 int filemap_add_folio(struct address_space *mapping, struct folio *folio,
1296 		pgoff_t index, gfp_t gfp);
1297 void filemap_remove_folio(struct folio *folio);
1298 void __filemap_remove_folio(struct folio *folio, void *shadow);
1299 void replace_page_cache_folio(struct folio *old, struct folio *new);
1300 void delete_from_page_cache_batch(struct address_space *mapping,
1301 				  struct folio_batch *fbatch);
1302 bool filemap_release_folio(struct folio *folio, gfp_t gfp);
1303 loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end,
1304 		int whence);
1305 
1306 /* Must be non-static for BPF error injection */
1307 int __filemap_add_folio(struct address_space *mapping, struct folio *folio,
1308 		pgoff_t index, gfp_t gfp, void **shadowp);
1309 
1310 bool filemap_range_has_writeback(struct address_space *mapping,
1311 				 loff_t start_byte, loff_t end_byte);
1312 
1313 /**
1314  * filemap_range_needs_writeback - check if range potentially needs writeback
1315  * @mapping:           address space within which to check
1316  * @start_byte:        offset in bytes where the range starts
1317  * @end_byte:          offset in bytes where the range ends (inclusive)
1318  *
1319  * Find at least one page in the range supplied, usually used to check if
1320  * direct writing in this range will trigger a writeback. Used by O_DIRECT
1321  * read/write with IOCB_NOWAIT, to see if the caller needs to do
1322  * filemap_write_and_wait_range() before proceeding.
1323  *
1324  * Return: %true if the caller should do filemap_write_and_wait_range() before
1325  * doing O_DIRECT to a page in this range, %false otherwise.
1326  */
filemap_range_needs_writeback(struct address_space * mapping,loff_t start_byte,loff_t end_byte)1327 static inline bool filemap_range_needs_writeback(struct address_space *mapping,
1328 						 loff_t start_byte,
1329 						 loff_t end_byte)
1330 {
1331 	if (!mapping->nrpages)
1332 		return false;
1333 	if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
1334 	    !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
1335 		return false;
1336 	return filemap_range_has_writeback(mapping, start_byte, end_byte);
1337 }
1338 
1339 /**
1340  * struct readahead_control - Describes a readahead request.
1341  *
1342  * A readahead request is for consecutive pages.  Filesystems which
1343  * implement the ->readahead method should call readahead_page() or
1344  * readahead_page_batch() in a loop and attempt to start I/O against
1345  * each page in the request.
1346  *
1347  * Most of the fields in this struct are private and should be accessed
1348  * by the functions below.
1349  *
1350  * @file: The file, used primarily by network filesystems for authentication.
1351  *	  May be NULL if invoked internally by the filesystem.
1352  * @mapping: Readahead this filesystem object.
1353  * @ra: File readahead state.  May be NULL.
1354  */
1355 struct readahead_control {
1356 	struct file *file;
1357 	struct address_space *mapping;
1358 	struct file_ra_state *ra;
1359 /* private: use the readahead_* accessors instead */
1360 	pgoff_t _index;
1361 	unsigned int _nr_pages;
1362 	unsigned int _batch_count;
1363 	bool _workingset;
1364 	ANDROID_KABI_IGNORE(1, bool dropbehind);
1365 	unsigned long _pflags;
1366 	ANDROID_OEM_DATA(1);
1367 };
1368 
1369 #define DEFINE_READAHEAD(ractl, f, r, m, i)				\
1370 	struct readahead_control ractl = {				\
1371 		.file = f,						\
1372 		.mapping = m,						\
1373 		.ra = r,						\
1374 		._index = i,						\
1375 	}
1376 
1377 #define VM_READAHEAD_PAGES	(SZ_128K / PAGE_SIZE)
1378 
1379 void page_cache_ra_unbounded(struct readahead_control *,
1380 		unsigned long nr_to_read, unsigned long lookahead_count);
1381 void page_cache_sync_ra(struct readahead_control *, unsigned long req_count);
1382 void page_cache_async_ra(struct readahead_control *, struct folio *,
1383 		unsigned long req_count);
1384 void readahead_expand(struct readahead_control *ractl,
1385 		      loff_t new_start, size_t new_len);
1386 
1387 /**
1388  * page_cache_sync_readahead - generic file readahead
1389  * @mapping: address_space which holds the pagecache and I/O vectors
1390  * @ra: file_ra_state which holds the readahead state
1391  * @file: Used by the filesystem for authentication.
1392  * @index: Index of first page to be read.
1393  * @req_count: Total number of pages being read by the caller.
1394  *
1395  * page_cache_sync_readahead() should be called when a cache miss happened:
1396  * it will submit the read.  The readahead logic may decide to piggyback more
1397  * pages onto the read request if access patterns suggest it will improve
1398  * performance.
1399  */
1400 static inline
page_cache_sync_readahead(struct address_space * mapping,struct file_ra_state * ra,struct file * file,pgoff_t index,unsigned long req_count)1401 void page_cache_sync_readahead(struct address_space *mapping,
1402 		struct file_ra_state *ra, struct file *file, pgoff_t index,
1403 		unsigned long req_count)
1404 {
1405 	DEFINE_READAHEAD(ractl, file, ra, mapping, index);
1406 	page_cache_sync_ra(&ractl, req_count);
1407 }
1408 
1409 /**
1410  * page_cache_async_readahead - file readahead for marked pages
1411  * @mapping: address_space which holds the pagecache and I/O vectors
1412  * @ra: file_ra_state which holds the readahead state
1413  * @file: Used by the filesystem for authentication.
1414  * @folio: The folio which triggered the readahead call.
1415  * @req_count: Total number of pages being read by the caller.
1416  *
1417  * page_cache_async_readahead() should be called when a page is used which
1418  * is marked as PageReadahead; this is a marker to suggest that the application
1419  * has used up enough of the readahead window that we should start pulling in
1420  * more pages.
1421  */
1422 static inline
page_cache_async_readahead(struct address_space * mapping,struct file_ra_state * ra,struct file * file,struct folio * folio,unsigned long req_count)1423 void page_cache_async_readahead(struct address_space *mapping,
1424 		struct file_ra_state *ra, struct file *file,
1425 		struct folio *folio, unsigned long req_count)
1426 {
1427 	DEFINE_READAHEAD(ractl, file, ra, mapping, folio->index);
1428 	page_cache_async_ra(&ractl, folio, req_count);
1429 }
1430 
__readahead_folio(struct readahead_control * ractl)1431 static inline struct folio *__readahead_folio(struct readahead_control *ractl)
1432 {
1433 	struct folio *folio;
1434 
1435 	BUG_ON(ractl->_batch_count > ractl->_nr_pages);
1436 	ractl->_nr_pages -= ractl->_batch_count;
1437 	ractl->_index += ractl->_batch_count;
1438 
1439 	if (!ractl->_nr_pages) {
1440 		ractl->_batch_count = 0;
1441 		return NULL;
1442 	}
1443 
1444 	folio = xa_load(&ractl->mapping->i_pages, ractl->_index);
1445 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1446 	ractl->_batch_count = folio_nr_pages(folio);
1447 
1448 	return folio;
1449 }
1450 
1451 /**
1452  * readahead_page - Get the next page to read.
1453  * @ractl: The current readahead request.
1454  *
1455  * Context: The page is locked and has an elevated refcount.  The caller
1456  * should decreases the refcount once the page has been submitted for I/O
1457  * and unlock the page once all I/O to that page has completed.
1458  * Return: A pointer to the next page, or %NULL if we are done.
1459  */
readahead_page(struct readahead_control * ractl)1460 static inline struct page *readahead_page(struct readahead_control *ractl)
1461 {
1462 	struct folio *folio = __readahead_folio(ractl);
1463 
1464 	return &folio->page;
1465 }
1466 
1467 /**
1468  * readahead_folio - Get the next folio to read.
1469  * @ractl: The current readahead request.
1470  *
1471  * Context: The folio is locked.  The caller should unlock the folio once
1472  * all I/O to that folio has completed.
1473  * Return: A pointer to the next folio, or %NULL if we are done.
1474  */
readahead_folio(struct readahead_control * ractl)1475 static inline struct folio *readahead_folio(struct readahead_control *ractl)
1476 {
1477 	struct folio *folio = __readahead_folio(ractl);
1478 
1479 	if (folio)
1480 		folio_put(folio);
1481 	return folio;
1482 }
1483 
__readahead_batch(struct readahead_control * rac,struct page ** array,unsigned int array_sz)1484 static inline unsigned int __readahead_batch(struct readahead_control *rac,
1485 		struct page **array, unsigned int array_sz)
1486 {
1487 	unsigned int i = 0;
1488 	XA_STATE(xas, &rac->mapping->i_pages, 0);
1489 	struct page *page;
1490 
1491 	BUG_ON(rac->_batch_count > rac->_nr_pages);
1492 	rac->_nr_pages -= rac->_batch_count;
1493 	rac->_index += rac->_batch_count;
1494 	rac->_batch_count = 0;
1495 
1496 	xas_set(&xas, rac->_index);
1497 	rcu_read_lock();
1498 	xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) {
1499 		if (xas_retry(&xas, page))
1500 			continue;
1501 		VM_BUG_ON_PAGE(!PageLocked(page), page);
1502 		VM_BUG_ON_PAGE(PageTail(page), page);
1503 		array[i++] = page;
1504 		rac->_batch_count += thp_nr_pages(page);
1505 		if (i == array_sz)
1506 			break;
1507 	}
1508 	rcu_read_unlock();
1509 
1510 	return i;
1511 }
1512 
1513 /**
1514  * readahead_page_batch - Get a batch of pages to read.
1515  * @rac: The current readahead request.
1516  * @array: An array of pointers to struct page.
1517  *
1518  * Context: The pages are locked and have an elevated refcount.  The caller
1519  * should decreases the refcount once the page has been submitted for I/O
1520  * and unlock the page once all I/O to that page has completed.
1521  * Return: The number of pages placed in the array.  0 indicates the request
1522  * is complete.
1523  */
1524 #define readahead_page_batch(rac, array)				\
1525 	__readahead_batch(rac, array, ARRAY_SIZE(array))
1526 
1527 /**
1528  * readahead_pos - The byte offset into the file of this readahead request.
1529  * @rac: The readahead request.
1530  */
readahead_pos(struct readahead_control * rac)1531 static inline loff_t readahead_pos(struct readahead_control *rac)
1532 {
1533 	return (loff_t)rac->_index * PAGE_SIZE;
1534 }
1535 
1536 /**
1537  * readahead_length - The number of bytes in this readahead request.
1538  * @rac: The readahead request.
1539  */
readahead_length(struct readahead_control * rac)1540 static inline size_t readahead_length(struct readahead_control *rac)
1541 {
1542 	return rac->_nr_pages * PAGE_SIZE;
1543 }
1544 
1545 /**
1546  * readahead_index - The index of the first page in this readahead request.
1547  * @rac: The readahead request.
1548  */
readahead_index(struct readahead_control * rac)1549 static inline pgoff_t readahead_index(struct readahead_control *rac)
1550 {
1551 	return rac->_index;
1552 }
1553 
1554 /**
1555  * readahead_count - The number of pages in this readahead request.
1556  * @rac: The readahead request.
1557  */
readahead_count(struct readahead_control * rac)1558 static inline unsigned int readahead_count(struct readahead_control *rac)
1559 {
1560 	return rac->_nr_pages;
1561 }
1562 
1563 /**
1564  * readahead_batch_length - The number of bytes in the current batch.
1565  * @rac: The readahead request.
1566  */
readahead_batch_length(struct readahead_control * rac)1567 static inline size_t readahead_batch_length(struct readahead_control *rac)
1568 {
1569 	return rac->_batch_count * PAGE_SIZE;
1570 }
1571 
dir_pages(struct inode * inode)1572 static inline unsigned long dir_pages(struct inode *inode)
1573 {
1574 	return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
1575 			       PAGE_SHIFT;
1576 }
1577 
1578 /**
1579  * folio_mkwrite_check_truncate - check if folio was truncated
1580  * @folio: the folio to check
1581  * @inode: the inode to check the folio against
1582  *
1583  * Return: the number of bytes in the folio up to EOF,
1584  * or -EFAULT if the folio was truncated.
1585  */
folio_mkwrite_check_truncate(struct folio * folio,struct inode * inode)1586 static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
1587 					      struct inode *inode)
1588 {
1589 	loff_t size = i_size_read(inode);
1590 	pgoff_t index = size >> PAGE_SHIFT;
1591 	size_t offset = offset_in_folio(folio, size);
1592 
1593 	if (!folio->mapping)
1594 		return -EFAULT;
1595 
1596 	/* folio is wholly inside EOF */
1597 	if (folio_next_index(folio) - 1 < index)
1598 		return folio_size(folio);
1599 	/* folio is wholly past EOF */
1600 	if (folio->index > index || !offset)
1601 		return -EFAULT;
1602 	/* folio is partially inside EOF */
1603 	return offset;
1604 }
1605 
1606 /**
1607  * page_mkwrite_check_truncate - check if page was truncated
1608  * @page: the page to check
1609  * @inode: the inode to check the page against
1610  *
1611  * Returns the number of bytes in the page up to EOF,
1612  * or -EFAULT if the page was truncated.
1613  */
page_mkwrite_check_truncate(struct page * page,struct inode * inode)1614 static inline int page_mkwrite_check_truncate(struct page *page,
1615 					      struct inode *inode)
1616 {
1617 	loff_t size = i_size_read(inode);
1618 	pgoff_t index = size >> PAGE_SHIFT;
1619 	int offset = offset_in_page(size);
1620 
1621 	if (page->mapping != inode->i_mapping)
1622 		return -EFAULT;
1623 
1624 	/* page is wholly inside EOF */
1625 	if (page->index < index)
1626 		return PAGE_SIZE;
1627 	/* page is wholly past EOF */
1628 	if (page->index > index || !offset)
1629 		return -EFAULT;
1630 	/* page is partially inside EOF */
1631 	return offset;
1632 }
1633 
1634 /**
1635  * i_blocks_per_folio - How many blocks fit in this folio.
1636  * @inode: The inode which contains the blocks.
1637  * @folio: The folio.
1638  *
1639  * If the block size is larger than the size of this folio, return zero.
1640  *
1641  * Context: The caller should hold a refcount on the folio to prevent it
1642  * from being split.
1643  * Return: The number of filesystem blocks covered by this folio.
1644  */
1645 static inline
i_blocks_per_folio(struct inode * inode,struct folio * folio)1646 unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio)
1647 {
1648 	return folio_size(folio) >> inode->i_blkbits;
1649 }
1650 #endif /* _LINUX_PAGEMAP_H */
1651