• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mm/readahead.c - address_space-level file readahead.
4  *
5  * Copyright (C) 2002, Linus Torvalds
6  *
7  * 09Apr2002	Andrew Morton
8  *		Initial version.
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/dax.h>
13 #include <linux/gfp.h>
14 #include <linux/export.h>
15 #include <linux/blkdev.h>
16 #include <linux/backing-dev.h>
17 #include <linux/task_io_accounting_ops.h>
18 #include <linux/pagevec.h>
19 #include <linux/pagemap.h>
20 #include <linux/syscalls.h>
21 #include <linux/file.h>
22 #include <linux/mm_inline.h>
23 #include <linux/blk-cgroup.h>
24 #include <linux/fadvise.h>
25 #include <linux/sched/mm.h>
26 #include <trace/hooks/mm.h>
27 
28 #include "internal.h"
29 
30 /*
31  * Initialise a struct file's readahead state.  Assumes that the caller has
32  * memset *ra to zero.
33  */
34 void
file_ra_state_init(struct file_ra_state * ra,struct address_space * mapping)35 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
36 {
37 	ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages;
38 	ra->prev_pos = -1;
39 }
40 EXPORT_SYMBOL_GPL(file_ra_state_init);
41 
42 /*
43  * see if a page needs releasing upon read_cache_pages() failure
44  * - the caller of read_cache_pages() may have set PG_private or PG_fscache
45  *   before calling, such as the NFS fs marking pages that are cached locally
46  *   on disk, thus we need to give the fs a chance to clean up in the event of
47  *   an error
48  */
read_cache_pages_invalidate_page(struct address_space * mapping,struct page * page)49 static void read_cache_pages_invalidate_page(struct address_space *mapping,
50 					     struct page *page)
51 {
52 	if (page_has_private(page)) {
53 		if (!trylock_page(page))
54 			BUG();
55 		page->mapping = mapping;
56 		do_invalidatepage(page, 0, PAGE_SIZE);
57 		page->mapping = NULL;
58 		unlock_page(page);
59 	}
60 	put_page(page);
61 }
62 
63 /*
64  * release a list of pages, invalidating them first if need be
65  */
read_cache_pages_invalidate_pages(struct address_space * mapping,struct list_head * pages)66 static void read_cache_pages_invalidate_pages(struct address_space *mapping,
67 					      struct list_head *pages)
68 {
69 	struct page *victim;
70 
71 	while (!list_empty(pages)) {
72 		victim = lru_to_page(pages);
73 		list_del(&victim->lru);
74 		read_cache_pages_invalidate_page(mapping, victim);
75 	}
76 }
77 
78 /**
79  * read_cache_pages - populate an address space with some pages & start reads against them
80  * @mapping: the address_space
81  * @pages: The address of a list_head which contains the target pages.  These
82  *   pages have their ->index populated and are otherwise uninitialised.
83  * @filler: callback routine for filling a single page.
84  * @data: private data for the callback routine.
85  *
86  * Hides the details of the LRU cache etc from the filesystems.
87  *
88  * Returns: %0 on success, error return by @filler otherwise
89  */
read_cache_pages(struct address_space * mapping,struct list_head * pages,int (* filler)(void *,struct page *),void * data)90 int read_cache_pages(struct address_space *mapping, struct list_head *pages,
91 			int (*filler)(void *, struct page *), void *data)
92 {
93 	struct page *page;
94 	int ret = 0;
95 
96 	while (!list_empty(pages)) {
97 		page = lru_to_page(pages);
98 		list_del(&page->lru);
99 		if (add_to_page_cache_lru(page, mapping, page->index,
100 				readahead_gfp_mask(mapping))) {
101 			read_cache_pages_invalidate_page(mapping, page);
102 			continue;
103 		}
104 		put_page(page);
105 
106 		ret = filler(data, page);
107 		if (unlikely(ret)) {
108 			read_cache_pages_invalidate_pages(mapping, pages);
109 			break;
110 		}
111 		task_io_account_read(PAGE_SIZE);
112 	}
113 	return ret;
114 }
115 
116 EXPORT_SYMBOL(read_cache_pages);
117 
readahead_gfp_mask(struct address_space * x)118 gfp_t readahead_gfp_mask(struct address_space *x)
119 {
120 	gfp_t mask = mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
121 
122 	trace_android_rvh_set_readahead_gfp_mask(&mask);
123 	return mask;
124 }
125 EXPORT_SYMBOL_GPL(readahead_gfp_mask);
126 
read_pages(struct readahead_control * rac,struct list_head * pages,bool skip_page)127 static void read_pages(struct readahead_control *rac, struct list_head *pages,
128 		bool skip_page)
129 {
130 	const struct address_space_operations *aops = rac->mapping->a_ops;
131 	struct page *page;
132 	struct blk_plug plug;
133 
134 	if (!readahead_count(rac))
135 		goto out;
136 
137 	blk_start_plug(&plug);
138 
139 	if (aops->readahead) {
140 		aops->readahead(rac);
141 		/* Clean up the remaining pages */
142 		while ((page = readahead_page(rac))) {
143 			unlock_page(page);
144 			put_page(page);
145 		}
146 	} else if (aops->readpages) {
147 		aops->readpages(rac->file, rac->mapping, pages,
148 				readahead_count(rac));
149 		/* Clean up the remaining pages */
150 		put_pages_list(pages);
151 		rac->_index += rac->_nr_pages;
152 		rac->_nr_pages = 0;
153 	} else {
154 		while ((page = readahead_page(rac))) {
155 			aops->readpage(rac->file, page);
156 			put_page(page);
157 		}
158 	}
159 
160 	blk_finish_plug(&plug);
161 
162 	BUG_ON(!list_empty(pages));
163 	BUG_ON(readahead_count(rac));
164 
165 out:
166 	if (skip_page)
167 		rac->_index++;
168 }
169 
170 /**
171  * page_cache_ra_unbounded - Start unchecked readahead.
172  * @ractl: Readahead control.
173  * @nr_to_read: The number of pages to read.
174  * @lookahead_size: Where to start the next readahead.
175  *
176  * This function is for filesystems to call when they want to start
177  * readahead beyond a file's stated i_size.  This is almost certainly
178  * not the function you want to call.  Use page_cache_async_readahead()
179  * or page_cache_sync_readahead() instead.
180  *
181  * Context: File is referenced by caller.  Mutexes may be held by caller.
182  * May sleep, but will not reenter filesystem to reclaim memory.
183  */
page_cache_ra_unbounded(struct readahead_control * ractl,unsigned long nr_to_read,unsigned long lookahead_size)184 void page_cache_ra_unbounded(struct readahead_control *ractl,
185 		unsigned long nr_to_read, unsigned long lookahead_size)
186 {
187 	struct address_space *mapping = ractl->mapping;
188 	unsigned long index = readahead_index(ractl);
189 	LIST_HEAD(page_pool);
190 	gfp_t gfp_mask = readahead_gfp_mask(mapping);
191 	unsigned long i;
192 
193 	/*
194 	 * Partway through the readahead operation, we will have added
195 	 * locked pages to the page cache, but will not yet have submitted
196 	 * them for I/O.  Adding another page may need to allocate memory,
197 	 * which can trigger memory reclaim.  Telling the VM we're in
198 	 * the middle of a filesystem operation will cause it to not
199 	 * touch file-backed pages, preventing a deadlock.  Most (all?)
200 	 * filesystems already specify __GFP_NOFS in their mapping's
201 	 * gfp_mask, but let's be explicit here.
202 	 */
203 	unsigned int nofs = memalloc_nofs_save();
204 
205 	filemap_invalidate_lock_shared(mapping);
206 	/*
207 	 * Preallocate as many pages as we will need.
208 	 */
209 	for (i = 0; i < nr_to_read; i++) {
210 		struct page *page = xa_load(&mapping->i_pages, index + i);
211 
212 		if (page && !xa_is_value(page)) {
213 			/*
214 			 * Page already present?  Kick off the current batch
215 			 * of contiguous pages before continuing with the
216 			 * next batch.  This page may be the one we would
217 			 * have intended to mark as Readahead, but we don't
218 			 * have a stable reference to this page, and it's
219 			 * not worth getting one just for that.
220 			 */
221 			read_pages(ractl, &page_pool, true);
222 			i = ractl->_index + ractl->_nr_pages - index - 1;
223 			continue;
224 		}
225 
226 		page = __page_cache_alloc(gfp_mask);
227 		if (!page)
228 			break;
229 		if (mapping->a_ops->readpages) {
230 			page->index = index + i;
231 			list_add(&page->lru, &page_pool);
232 		} else if (add_to_page_cache_lru(page, mapping, index + i,
233 					gfp_mask) < 0) {
234 			put_page(page);
235 			read_pages(ractl, &page_pool, true);
236 			i = ractl->_index + ractl->_nr_pages - index - 1;
237 			continue;
238 		}
239 		if (i == nr_to_read - lookahead_size)
240 			SetPageReadahead(page);
241 		ractl->_nr_pages++;
242 	}
243 
244 	/*
245 	 * Now start the IO.  We ignore I/O errors - if the page is not
246 	 * uptodate then the caller will launch readpage again, and
247 	 * will then handle the error.
248 	 */
249 	read_pages(ractl, &page_pool, false);
250 	filemap_invalidate_unlock_shared(mapping);
251 	memalloc_nofs_restore(nofs);
252 }
253 EXPORT_SYMBOL_GPL(page_cache_ra_unbounded);
254 
255 /*
256  * do_page_cache_ra() actually reads a chunk of disk.  It allocates
257  * the pages first, then submits them for I/O. This avoids the very bad
258  * behaviour which would occur if page allocations are causing VM writeback.
259  * We really don't want to intermingle reads and writes like that.
260  */
do_page_cache_ra(struct readahead_control * ractl,unsigned long nr_to_read,unsigned long lookahead_size)261 void do_page_cache_ra(struct readahead_control *ractl,
262 		unsigned long nr_to_read, unsigned long lookahead_size)
263 {
264 	struct inode *inode = ractl->mapping->host;
265 	unsigned long index = readahead_index(ractl);
266 	loff_t isize = i_size_read(inode);
267 	pgoff_t end_index;	/* The last page we want to read */
268 
269 	if (isize == 0)
270 		return;
271 
272 	end_index = (isize - 1) >> PAGE_SHIFT;
273 	if (index > end_index)
274 		return;
275 	/* Don't read past the page containing the last byte of the file */
276 	if (nr_to_read > end_index - index)
277 		nr_to_read = end_index - index + 1;
278 
279 	page_cache_ra_unbounded(ractl, nr_to_read, lookahead_size);
280 }
281 
282 /*
283  * Chunk the readahead into 2 megabyte units, so that we don't pin too much
284  * memory at once.
285  */
force_page_cache_ra(struct readahead_control * ractl,unsigned long nr_to_read)286 void force_page_cache_ra(struct readahead_control *ractl,
287 		unsigned long nr_to_read)
288 {
289 	struct address_space *mapping = ractl->mapping;
290 	struct file_ra_state *ra = ractl->ra;
291 	struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
292 	unsigned long max_pages, index;
293 
294 	if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages &&
295 			!mapping->a_ops->readahead))
296 		return;
297 
298 	/*
299 	 * If the request exceeds the readahead window, allow the read to
300 	 * be up to the optimal hardware IO size
301 	 */
302 	index = readahead_index(ractl);
303 	max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages);
304 	nr_to_read = min_t(unsigned long, nr_to_read, max_pages);
305 	while (nr_to_read) {
306 		unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE;
307 
308 		if (this_chunk > nr_to_read)
309 			this_chunk = nr_to_read;
310 		ractl->_index = index;
311 		do_page_cache_ra(ractl, this_chunk, 0);
312 
313 		index += this_chunk;
314 		nr_to_read -= this_chunk;
315 	}
316 }
317 
318 /*
319  * Set the initial window size, round to next power of 2 and square
320  * for small size, x 4 for medium, and x 2 for large
321  * for 128k (32 page) max ra
322  * 1-8 page = 32k initial, > 8 page = 128k initial
323  */
get_init_ra_size(unsigned long size,unsigned long max)324 static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
325 {
326 	unsigned long newsize = roundup_pow_of_two(size);
327 
328 	if (newsize <= max / 32)
329 		newsize = newsize * 4;
330 	else if (newsize <= max / 4)
331 		newsize = newsize * 2;
332 	else
333 		newsize = max;
334 
335 	return newsize;
336 }
337 
338 /*
339  *  Get the previous window size, ramp it up, and
340  *  return it as the new window size.
341  */
get_next_ra_size(struct file_ra_state * ra,unsigned long max)342 static unsigned long get_next_ra_size(struct file_ra_state *ra,
343 				      unsigned long max)
344 {
345 	unsigned long cur = ra->size;
346 
347 	if (cur < max / 16)
348 		return 4 * cur;
349 	if (cur <= max / 2)
350 		return 2 * cur;
351 	return max;
352 }
353 
354 /*
355  * On-demand readahead design.
356  *
357  * The fields in struct file_ra_state represent the most-recently-executed
358  * readahead attempt:
359  *
360  *                        |<----- async_size ---------|
361  *     |------------------- size -------------------->|
362  *     |==================#===========================|
363  *     ^start             ^page marked with PG_readahead
364  *
365  * To overlap application thinking time and disk I/O time, we do
366  * `readahead pipelining': Do not wait until the application consumed all
367  * readahead pages and stalled on the missing page at readahead_index;
368  * Instead, submit an asynchronous readahead I/O as soon as there are
369  * only async_size pages left in the readahead window. Normally async_size
370  * will be equal to size, for maximum pipelining.
371  *
372  * In interleaved sequential reads, concurrent streams on the same fd can
373  * be invalidating each other's readahead state. So we flag the new readahead
374  * page at (start+size-async_size) with PG_readahead, and use it as readahead
375  * indicator. The flag won't be set on already cached pages, to avoid the
376  * readahead-for-nothing fuss, saving pointless page cache lookups.
377  *
378  * prev_pos tracks the last visited byte in the _previous_ read request.
379  * It should be maintained by the caller, and will be used for detecting
380  * small random reads. Note that the readahead algorithm checks loosely
381  * for sequential patterns. Hence interleaved reads might be served as
382  * sequential ones.
383  *
384  * There is a special-case: if the first page which the application tries to
385  * read happens to be the first page of the file, it is assumed that a linear
386  * read is about to happen and the window is immediately set to the initial size
387  * based on I/O request size and the max_readahead.
388  *
389  * The code ramps up the readahead size aggressively at first, but slow down as
390  * it approaches max_readhead.
391  */
392 
393 /*
394  * Count contiguously cached pages from @index-1 to @index-@max,
395  * this count is a conservative estimation of
396  * 	- length of the sequential read sequence, or
397  * 	- thrashing threshold in memory tight systems
398  */
count_history_pages(struct address_space * mapping,pgoff_t index,unsigned long max)399 static pgoff_t count_history_pages(struct address_space *mapping,
400 				   pgoff_t index, unsigned long max)
401 {
402 	pgoff_t head;
403 
404 	rcu_read_lock();
405 	head = page_cache_prev_miss(mapping, index - 1, max);
406 	rcu_read_unlock();
407 
408 	return index - 1 - head;
409 }
410 
411 /*
412  * page cache context based read-ahead
413  */
try_context_readahead(struct address_space * mapping,struct file_ra_state * ra,pgoff_t index,unsigned long req_size,unsigned long max)414 static int try_context_readahead(struct address_space *mapping,
415 				 struct file_ra_state *ra,
416 				 pgoff_t index,
417 				 unsigned long req_size,
418 				 unsigned long max)
419 {
420 	pgoff_t size;
421 
422 	size = count_history_pages(mapping, index, max);
423 
424 	/*
425 	 * not enough history pages:
426 	 * it could be a random read
427 	 */
428 	if (size <= req_size)
429 		return 0;
430 
431 	/*
432 	 * starts from beginning of file:
433 	 * it is a strong indication of long-run stream (or whole-file-read)
434 	 */
435 	if (size >= index)
436 		size *= 2;
437 
438 	ra->start = index;
439 	ra->size = min(size + req_size, max);
440 	ra->async_size = 1;
441 
442 	return 1;
443 }
444 
445 /*
446  * A minimal readahead algorithm for trivial sequential/random reads.
447  */
ondemand_readahead(struct readahead_control * ractl,bool hit_readahead_marker,unsigned long req_size)448 static void ondemand_readahead(struct readahead_control *ractl,
449 		bool hit_readahead_marker, unsigned long req_size)
450 {
451 	struct backing_dev_info *bdi = inode_to_bdi(ractl->mapping->host);
452 	struct file_ra_state *ra = ractl->ra;
453 	unsigned long max_pages = ra->ra_pages;
454 	unsigned long add_pages;
455 	unsigned long index = readahead_index(ractl);
456 	pgoff_t prev_index;
457 
458 	/*
459 	 * If the request exceeds the readahead window, allow the read to
460 	 * be up to the optimal hardware IO size
461 	 */
462 	if (req_size > max_pages && bdi->io_pages > max_pages)
463 		max_pages = min(req_size, bdi->io_pages);
464 
465 	/*
466 	 * start of file
467 	 */
468 	if (!index)
469 		goto initial_readahead;
470 
471 	/*
472 	 * It's the expected callback index, assume sequential access.
473 	 * Ramp up sizes, and push forward the readahead window.
474 	 */
475 	if ((index == (ra->start + ra->size - ra->async_size) ||
476 	     index == (ra->start + ra->size))) {
477 		ra->start += ra->size;
478 		ra->size = get_next_ra_size(ra, max_pages);
479 		ra->async_size = ra->size;
480 		goto readit;
481 	}
482 
483 	/*
484 	 * Hit a marked page without valid readahead state.
485 	 * E.g. interleaved reads.
486 	 * Query the pagecache for async_size, which normally equals to
487 	 * readahead size. Ramp it up and use it as the new readahead size.
488 	 */
489 	if (hit_readahead_marker) {
490 		pgoff_t start;
491 
492 		rcu_read_lock();
493 		start = page_cache_next_miss(ractl->mapping, index + 1,
494 				max_pages);
495 		rcu_read_unlock();
496 
497 		if (!start || start - index > max_pages)
498 			return;
499 
500 		ra->start = start;
501 		ra->size = start - index;	/* old async_size */
502 		ra->size += req_size;
503 		ra->size = get_next_ra_size(ra, max_pages);
504 		ra->async_size = ra->size;
505 		goto readit;
506 	}
507 
508 	/*
509 	 * oversize read
510 	 */
511 	if (req_size > max_pages)
512 		goto initial_readahead;
513 
514 	/*
515 	 * sequential cache miss
516 	 * trivial case: (index - prev_index) == 1
517 	 * unaligned reads: (index - prev_index) == 0
518 	 */
519 	prev_index = (unsigned long long)ra->prev_pos >> PAGE_SHIFT;
520 	if (index - prev_index <= 1UL)
521 		goto initial_readahead;
522 
523 	/*
524 	 * Query the page cache and look for the traces(cached history pages)
525 	 * that a sequential stream would leave behind.
526 	 */
527 	if (try_context_readahead(ractl->mapping, ra, index, req_size,
528 			max_pages))
529 		goto readit;
530 
531 	/*
532 	 * standalone, small random read
533 	 * Read as is, and do not pollute the readahead state.
534 	 */
535 	do_page_cache_ra(ractl, req_size, 0);
536 	return;
537 
538 initial_readahead:
539 	ra->start = index;
540 	ra->size = get_init_ra_size(req_size, max_pages);
541 	ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
542 
543 readit:
544 	/*
545 	 * Will this read hit the readahead marker made by itself?
546 	 * If so, trigger the readahead marker hit now, and merge
547 	 * the resulted next readahead window into the current one.
548 	 * Take care of maximum IO pages as above.
549 	 */
550 	if (index == ra->start && ra->size == ra->async_size) {
551 		add_pages = get_next_ra_size(ra, max_pages);
552 		if (ra->size + add_pages <= max_pages) {
553 			ra->async_size = add_pages;
554 			ra->size += add_pages;
555 		} else {
556 			ra->size = max_pages;
557 			ra->async_size = max_pages >> 1;
558 		}
559 	}
560 
561 	ractl->_index = ra->start;
562 	do_page_cache_ra(ractl, ra->size, ra->async_size);
563 }
564 
page_cache_sync_ra(struct readahead_control * ractl,unsigned long req_count)565 void page_cache_sync_ra(struct readahead_control *ractl,
566 		unsigned long req_count)
567 {
568 	bool do_forced_ra = ractl->file && (ractl->file->f_mode & FMODE_RANDOM);
569 
570 	/*
571 	 * Even if read-ahead is disabled, issue this request as read-ahead
572 	 * as we'll need it to satisfy the requested range. The forced
573 	 * read-ahead will do the right thing and limit the read to just the
574 	 * requested range, which we'll set to 1 page for this case.
575 	 */
576 	if (!ractl->ra->ra_pages || blk_cgroup_congested()) {
577 		if (!ractl->file)
578 			return;
579 		req_count = 1;
580 		do_forced_ra = true;
581 	}
582 
583 	/* be dumb */
584 	if (do_forced_ra) {
585 		force_page_cache_ra(ractl, req_count);
586 		return;
587 	}
588 
589 	/* do read-ahead */
590 	ondemand_readahead(ractl, false, req_count);
591 }
592 EXPORT_SYMBOL_GPL(page_cache_sync_ra);
593 
page_cache_async_ra(struct readahead_control * ractl,struct page * page,unsigned long req_count)594 void page_cache_async_ra(struct readahead_control *ractl,
595 		struct page *page, unsigned long req_count)
596 {
597 	/* no read-ahead */
598 	if (!ractl->ra->ra_pages)
599 		return;
600 
601 	/*
602 	 * Same bit is used for PG_readahead and PG_reclaim.
603 	 */
604 	if (PageWriteback(page))
605 		return;
606 
607 	ClearPageReadahead(page);
608 
609 	/*
610 	 * Defer asynchronous read-ahead on IO congestion.
611 	 */
612 	if (inode_read_congested(ractl->mapping->host))
613 		return;
614 
615 	if (blk_cgroup_congested())
616 		return;
617 
618 	/* do read-ahead */
619 	ondemand_readahead(ractl, true, req_count);
620 }
621 EXPORT_SYMBOL_GPL(page_cache_async_ra);
622 
ksys_readahead(int fd,loff_t offset,size_t count)623 ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
624 {
625 	ssize_t ret;
626 	struct fd f;
627 
628 	ret = -EBADF;
629 	f = fdget(fd);
630 	if (!f.file || !(f.file->f_mode & FMODE_READ))
631 		goto out;
632 
633 	/*
634 	 * The readahead() syscall is intended to run only on files
635 	 * that can execute readahead. If readahead is not possible
636 	 * on this file, then we must return -EINVAL.
637 	 */
638 	ret = -EINVAL;
639 	if (!f.file->f_mapping || !f.file->f_mapping->a_ops ||
640 	    (!S_ISREG(file_inode(f.file)->i_mode) &&
641 	    !S_ISBLK(file_inode(f.file)->i_mode)))
642 		goto out;
643 
644 	ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED);
645 out:
646 	fdput(f);
647 	return ret;
648 }
649 
SYSCALL_DEFINE3(readahead,int,fd,loff_t,offset,size_t,count)650 SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count)
651 {
652 	return ksys_readahead(fd, offset, count);
653 }
654 
655 /**
656  * readahead_expand - Expand a readahead request
657  * @ractl: The request to be expanded
658  * @new_start: The revised start
659  * @new_len: The revised size of the request
660  *
661  * Attempt to expand a readahead request outwards from the current size to the
662  * specified size by inserting locked pages before and after the current window
663  * to increase the size to the new window.  This may involve the insertion of
664  * THPs, in which case the window may get expanded even beyond what was
665  * requested.
666  *
667  * The algorithm will stop if it encounters a conflicting page already in the
668  * pagecache and leave a smaller expansion than requested.
669  *
670  * The caller must check for this by examining the revised @ractl object for a
671  * different expansion than was requested.
672  */
readahead_expand(struct readahead_control * ractl,loff_t new_start,size_t new_len)673 void readahead_expand(struct readahead_control *ractl,
674 		      loff_t new_start, size_t new_len)
675 {
676 	struct address_space *mapping = ractl->mapping;
677 	struct file_ra_state *ra = ractl->ra;
678 	pgoff_t new_index, new_nr_pages;
679 	gfp_t gfp_mask = readahead_gfp_mask(mapping);
680 
681 	new_index = new_start / PAGE_SIZE;
682 
683 	/* Expand the leading edge downwards */
684 	while (ractl->_index > new_index) {
685 		unsigned long index = ractl->_index - 1;
686 		struct page *page = xa_load(&mapping->i_pages, index);
687 
688 		if (page && !xa_is_value(page))
689 			return; /* Page apparently present */
690 
691 		page = __page_cache_alloc(gfp_mask);
692 		if (!page)
693 			return;
694 		if (add_to_page_cache_lru(page, mapping, index, gfp_mask) < 0) {
695 			put_page(page);
696 			return;
697 		}
698 
699 		ractl->_nr_pages++;
700 		ractl->_index = page->index;
701 	}
702 
703 	new_len += new_start - readahead_pos(ractl);
704 	new_nr_pages = DIV_ROUND_UP(new_len, PAGE_SIZE);
705 
706 	/* Expand the trailing edge upwards */
707 	while (ractl->_nr_pages < new_nr_pages) {
708 		unsigned long index = ractl->_index + ractl->_nr_pages;
709 		struct page *page = xa_load(&mapping->i_pages, index);
710 
711 		if (page && !xa_is_value(page))
712 			return; /* Page apparently present */
713 
714 		page = __page_cache_alloc(gfp_mask);
715 		if (!page)
716 			return;
717 		if (add_to_page_cache_lru(page, mapping, index, gfp_mask) < 0) {
718 			put_page(page);
719 			return;
720 		}
721 		ractl->_nr_pages++;
722 		if (ra) {
723 			ra->size++;
724 			ra->async_size++;
725 		}
726 	}
727 }
728 EXPORT_SYMBOL(readahead_expand);
729