• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mm/readahead.c - address_space-level file readahead.
4  *
5  * Copyright (C) 2002, Linus Torvalds
6  *
7  * 09Apr2002	Andrew Morton
8  *		Initial version.
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/dax.h>
13 #include <linux/gfp.h>
14 #include <linux/export.h>
15 #include <linux/blkdev.h>
16 #include <linux/backing-dev.h>
17 #include <linux/task_io_accounting_ops.h>
18 #include <linux/pagevec.h>
19 #include <linux/pagemap.h>
20 #include <linux/syscalls.h>
21 #include <linux/file.h>
22 #include <linux/mm_inline.h>
23 #include <linux/blk-cgroup.h>
24 #include <linux/fadvise.h>
25 #include <linux/sched/mm.h>
26 #include <trace/hooks/mm.h>
27 
28 #include "internal.h"
29 
30 /*
31  * Initialise a struct file's readahead state.  Assumes that the caller has
32  * memset *ra to zero.
33  */
34 void
file_ra_state_init(struct file_ra_state * ra,struct address_space * mapping)35 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
36 {
37 	ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages;
38 	ra->prev_pos = -1;
39 }
40 EXPORT_SYMBOL_GPL(file_ra_state_init);
41 
42 /*
43  * see if a page needs releasing upon read_cache_pages() failure
44  * - the caller of read_cache_pages() may have set PG_private or PG_fscache
45  *   before calling, such as the NFS fs marking pages that are cached locally
46  *   on disk, thus we need to give the fs a chance to clean up in the event of
47  *   an error
48  */
read_cache_pages_invalidate_page(struct address_space * mapping,struct page * page)49 static void read_cache_pages_invalidate_page(struct address_space *mapping,
50 					     struct page *page)
51 {
52 	if (page_has_private(page)) {
53 		if (!trylock_page(page))
54 			BUG();
55 		page->mapping = mapping;
56 		do_invalidatepage(page, 0, PAGE_SIZE);
57 		page->mapping = NULL;
58 		unlock_page(page);
59 	}
60 	put_page(page);
61 }
62 
63 /*
64  * release a list of pages, invalidating them first if need be
65  */
read_cache_pages_invalidate_pages(struct address_space * mapping,struct list_head * pages)66 static void read_cache_pages_invalidate_pages(struct address_space *mapping,
67 					      struct list_head *pages)
68 {
69 	struct page *victim;
70 
71 	while (!list_empty(pages)) {
72 		victim = lru_to_page(pages);
73 		list_del(&victim->lru);
74 		read_cache_pages_invalidate_page(mapping, victim);
75 	}
76 }
77 
78 /**
79  * read_cache_pages - populate an address space with some pages & start reads against them
80  * @mapping: the address_space
81  * @pages: The address of a list_head which contains the target pages.  These
82  *   pages have their ->index populated and are otherwise uninitialised.
83  * @filler: callback routine for filling a single page.
84  * @data: private data for the callback routine.
85  *
86  * Hides the details of the LRU cache etc from the filesystems.
87  *
88  * Returns: %0 on success, error return by @filler otherwise
89  */
read_cache_pages(struct address_space * mapping,struct list_head * pages,int (* filler)(void *,struct page *),void * data)90 int read_cache_pages(struct address_space *mapping, struct list_head *pages,
91 			int (*filler)(void *, struct page *), void *data)
92 {
93 	struct page *page;
94 	int ret = 0;
95 
96 	while (!list_empty(pages)) {
97 		page = lru_to_page(pages);
98 		list_del(&page->lru);
99 		if (add_to_page_cache_lru(page, mapping, page->index,
100 				readahead_gfp_mask(mapping))) {
101 			read_cache_pages_invalidate_page(mapping, page);
102 			continue;
103 		}
104 		put_page(page);
105 
106 		ret = filler(data, page);
107 		if (unlikely(ret)) {
108 			read_cache_pages_invalidate_pages(mapping, pages);
109 			break;
110 		}
111 		task_io_account_read(PAGE_SIZE);
112 	}
113 	return ret;
114 }
115 
116 EXPORT_SYMBOL(read_cache_pages);
117 
readahead_gfp_mask(struct address_space * x)118 gfp_t readahead_gfp_mask(struct address_space *x)
119 {
120 	gfp_t mask = mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
121 
122 	trace_android_rvh_set_readahead_gfp_mask(&mask);
123 	trace_android_rvh_update_readahead_gfp_mask(x, &mask);
124 	return mask;
125 }
126 EXPORT_SYMBOL_GPL(readahead_gfp_mask);
127 
read_pages(struct readahead_control * rac,struct list_head * pages,bool skip_page)128 static void read_pages(struct readahead_control *rac, struct list_head *pages,
129 		bool skip_page)
130 {
131 	const struct address_space_operations *aops = rac->mapping->a_ops;
132 	struct page *page;
133 	struct blk_plug plug;
134 
135 	if (!readahead_count(rac))
136 		goto out;
137 
138 	blk_start_plug(&plug);
139 
140 	if (aops->readahead) {
141 		aops->readahead(rac);
142 		/* Clean up the remaining pages */
143 		while ((page = readahead_page(rac))) {
144 			unlock_page(page);
145 			put_page(page);
146 		}
147 	} else if (aops->readpages) {
148 		aops->readpages(rac->file, rac->mapping, pages,
149 				readahead_count(rac));
150 		/* Clean up the remaining pages */
151 		put_pages_list(pages);
152 		rac->_index += rac->_nr_pages;
153 		rac->_nr_pages = 0;
154 	} else {
155 		while ((page = readahead_page(rac))) {
156 			aops->readpage(rac->file, page);
157 			put_page(page);
158 		}
159 	}
160 
161 	blk_finish_plug(&plug);
162 
163 	BUG_ON(!list_empty(pages));
164 	BUG_ON(readahead_count(rac));
165 
166 out:
167 	if (skip_page)
168 		rac->_index++;
169 }
170 
171 /**
172  * page_cache_ra_unbounded - Start unchecked readahead.
173  * @ractl: Readahead control.
174  * @nr_to_read: The number of pages to read.
175  * @lookahead_size: Where to start the next readahead.
176  *
177  * This function is for filesystems to call when they want to start
178  * readahead beyond a file's stated i_size.  This is almost certainly
179  * not the function you want to call.  Use page_cache_async_readahead()
180  * or page_cache_sync_readahead() instead.
181  *
182  * Context: File is referenced by caller.  Mutexes may be held by caller.
183  * May sleep, but will not reenter filesystem to reclaim memory.
184  */
page_cache_ra_unbounded(struct readahead_control * ractl,unsigned long nr_to_read,unsigned long lookahead_size)185 void page_cache_ra_unbounded(struct readahead_control *ractl,
186 		unsigned long nr_to_read, unsigned long lookahead_size)
187 {
188 	struct address_space *mapping = ractl->mapping;
189 	unsigned long index = readahead_index(ractl);
190 	LIST_HEAD(page_pool);
191 	gfp_t gfp_mask = readahead_gfp_mask(mapping);
192 	unsigned long i;
193 
194 	/*
195 	 * Partway through the readahead operation, we will have added
196 	 * locked pages to the page cache, but will not yet have submitted
197 	 * them for I/O.  Adding another page may need to allocate memory,
198 	 * which can trigger memory reclaim.  Telling the VM we're in
199 	 * the middle of a filesystem operation will cause it to not
200 	 * touch file-backed pages, preventing a deadlock.  Most (all?)
201 	 * filesystems already specify __GFP_NOFS in their mapping's
202 	 * gfp_mask, but let's be explicit here.
203 	 */
204 	unsigned int nofs = memalloc_nofs_save();
205 
206 	filemap_invalidate_lock_shared(mapping);
207 	/*
208 	 * Preallocate as many pages as we will need.
209 	 */
210 	for (i = 0; i < nr_to_read; i++) {
211 		struct page *page = xa_load(&mapping->i_pages, index + i);
212 
213 		if (page && !xa_is_value(page)) {
214 			/*
215 			 * Page already present?  Kick off the current batch
216 			 * of contiguous pages before continuing with the
217 			 * next batch.  This page may be the one we would
218 			 * have intended to mark as Readahead, but we don't
219 			 * have a stable reference to this page, and it's
220 			 * not worth getting one just for that.
221 			 */
222 			read_pages(ractl, &page_pool, true);
223 			i = ractl->_index + ractl->_nr_pages - index - 1;
224 			continue;
225 		}
226 
227 		page = __page_cache_alloc(gfp_mask);
228 		if (!page)
229 			break;
230 		if (mapping->a_ops->readpages) {
231 			page->index = index + i;
232 			list_add(&page->lru, &page_pool);
233 		} else if (add_to_page_cache_lru(page, mapping, index + i,
234 					gfp_mask) < 0) {
235 			put_page(page);
236 			read_pages(ractl, &page_pool, true);
237 			i = ractl->_index + ractl->_nr_pages - index - 1;
238 			continue;
239 		}
240 		if (i == nr_to_read - lookahead_size)
241 			SetPageReadahead(page);
242 		ractl->_nr_pages++;
243 	}
244 
245 	/*
246 	 * Now start the IO.  We ignore I/O errors - if the page is not
247 	 * uptodate then the caller will launch readpage again, and
248 	 * will then handle the error.
249 	 */
250 	read_pages(ractl, &page_pool, false);
251 	filemap_invalidate_unlock_shared(mapping);
252 	memalloc_nofs_restore(nofs);
253 }
254 EXPORT_SYMBOL_GPL(page_cache_ra_unbounded);
255 
256 /*
257  * do_page_cache_ra() actually reads a chunk of disk.  It allocates
258  * the pages first, then submits them for I/O. This avoids the very bad
259  * behaviour which would occur if page allocations are causing VM writeback.
260  * We really don't want to intermingle reads and writes like that.
261  */
do_page_cache_ra(struct readahead_control * ractl,unsigned long nr_to_read,unsigned long lookahead_size)262 void do_page_cache_ra(struct readahead_control *ractl,
263 		unsigned long nr_to_read, unsigned long lookahead_size)
264 {
265 	struct inode *inode = ractl->mapping->host;
266 	unsigned long index = readahead_index(ractl);
267 	loff_t isize = i_size_read(inode);
268 	pgoff_t end_index;	/* The last page we want to read */
269 
270 	if (isize == 0)
271 		return;
272 
273 	end_index = (isize - 1) >> PAGE_SHIFT;
274 	if (index > end_index)
275 		return;
276 	/* Don't read past the page containing the last byte of the file */
277 	if (nr_to_read > end_index - index)
278 		nr_to_read = end_index - index + 1;
279 
280 	page_cache_ra_unbounded(ractl, nr_to_read, lookahead_size);
281 }
282 
283 /*
284  * Chunk the readahead into 2 megabyte units, so that we don't pin too much
285  * memory at once.
286  */
force_page_cache_ra(struct readahead_control * ractl,unsigned long nr_to_read)287 void force_page_cache_ra(struct readahead_control *ractl,
288 		unsigned long nr_to_read)
289 {
290 	struct address_space *mapping = ractl->mapping;
291 	struct file_ra_state *ra = ractl->ra;
292 	struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
293 	unsigned long max_pages, index;
294 
295 	if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages &&
296 			!mapping->a_ops->readahead))
297 		return;
298 
299 	/*
300 	 * If the request exceeds the readahead window, allow the read to
301 	 * be up to the optimal hardware IO size
302 	 */
303 	index = readahead_index(ractl);
304 	max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages);
305 	nr_to_read = min_t(unsigned long, nr_to_read, max_pages);
306 	while (nr_to_read) {
307 		unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE;
308 
309 		if (this_chunk > nr_to_read)
310 			this_chunk = nr_to_read;
311 		ractl->_index = index;
312 		do_page_cache_ra(ractl, this_chunk, 0);
313 
314 		index += this_chunk;
315 		nr_to_read -= this_chunk;
316 	}
317 }
318 
319 /*
320  * Set the initial window size, round to next power of 2 and square
321  * for small size, x 4 for medium, and x 2 for large
322  * for 128k (32 page) max ra
323  * 1-8 page = 32k initial, > 8 page = 128k initial
324  */
get_init_ra_size(unsigned long size,unsigned long max)325 static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
326 {
327 	unsigned long newsize = roundup_pow_of_two(size);
328 
329 	if (newsize <= max / 32)
330 		newsize = newsize * 4;
331 	else if (newsize <= max / 4)
332 		newsize = newsize * 2;
333 	else
334 		newsize = max;
335 
336 	return newsize;
337 }
338 
339 /*
340  *  Get the previous window size, ramp it up, and
341  *  return it as the new window size.
342  */
get_next_ra_size(struct file_ra_state * ra,unsigned long max)343 static unsigned long get_next_ra_size(struct file_ra_state *ra,
344 				      unsigned long max)
345 {
346 	unsigned long cur = ra->size;
347 
348 	if (cur < max / 16)
349 		return 4 * cur;
350 	if (cur <= max / 2)
351 		return 2 * cur;
352 	return max;
353 }
354 
355 /*
356  * On-demand readahead design.
357  *
358  * The fields in struct file_ra_state represent the most-recently-executed
359  * readahead attempt:
360  *
361  *                        |<----- async_size ---------|
362  *     |------------------- size -------------------->|
363  *     |==================#===========================|
364  *     ^start             ^page marked with PG_readahead
365  *
366  * To overlap application thinking time and disk I/O time, we do
367  * `readahead pipelining': Do not wait until the application consumed all
368  * readahead pages and stalled on the missing page at readahead_index;
369  * Instead, submit an asynchronous readahead I/O as soon as there are
370  * only async_size pages left in the readahead window. Normally async_size
371  * will be equal to size, for maximum pipelining.
372  *
373  * In interleaved sequential reads, concurrent streams on the same fd can
374  * be invalidating each other's readahead state. So we flag the new readahead
375  * page at (start+size-async_size) with PG_readahead, and use it as readahead
376  * indicator. The flag won't be set on already cached pages, to avoid the
377  * readahead-for-nothing fuss, saving pointless page cache lookups.
378  *
379  * prev_pos tracks the last visited byte in the _previous_ read request.
380  * It should be maintained by the caller, and will be used for detecting
381  * small random reads. Note that the readahead algorithm checks loosely
382  * for sequential patterns. Hence interleaved reads might be served as
383  * sequential ones.
384  *
385  * There is a special-case: if the first page which the application tries to
386  * read happens to be the first page of the file, it is assumed that a linear
387  * read is about to happen and the window is immediately set to the initial size
388  * based on I/O request size and the max_readahead.
389  *
390  * The code ramps up the readahead size aggressively at first, but slow down as
391  * it approaches max_readhead.
392  */
393 
394 /*
395  * Count contiguously cached pages from @index-1 to @index-@max,
396  * this count is a conservative estimation of
397  * 	- length of the sequential read sequence, or
398  * 	- thrashing threshold in memory tight systems
399  */
count_history_pages(struct address_space * mapping,pgoff_t index,unsigned long max)400 static pgoff_t count_history_pages(struct address_space *mapping,
401 				   pgoff_t index, unsigned long max)
402 {
403 	pgoff_t head;
404 
405 	rcu_read_lock();
406 	head = page_cache_prev_miss(mapping, index - 1, max);
407 	rcu_read_unlock();
408 
409 	return index - 1 - head;
410 }
411 
412 /*
413  * page cache context based read-ahead
414  */
try_context_readahead(struct address_space * mapping,struct file_ra_state * ra,pgoff_t index,unsigned long req_size,unsigned long max)415 static int try_context_readahead(struct address_space *mapping,
416 				 struct file_ra_state *ra,
417 				 pgoff_t index,
418 				 unsigned long req_size,
419 				 unsigned long max)
420 {
421 	pgoff_t size;
422 
423 	size = count_history_pages(mapping, index, max);
424 
425 	/*
426 	 * not enough history pages:
427 	 * it could be a random read
428 	 */
429 	if (size <= req_size)
430 		return 0;
431 
432 	/*
433 	 * starts from beginning of file:
434 	 * it is a strong indication of long-run stream (or whole-file-read)
435 	 */
436 	if (size >= index)
437 		size *= 2;
438 
439 	ra->start = index;
440 	ra->size = min(size + req_size, max);
441 	ra->async_size = 1;
442 
443 	return 1;
444 }
445 
446 /*
447  * A minimal readahead algorithm for trivial sequential/random reads.
448  */
ondemand_readahead(struct readahead_control * ractl,bool hit_readahead_marker,unsigned long req_size)449 static void ondemand_readahead(struct readahead_control *ractl,
450 		bool hit_readahead_marker, unsigned long req_size)
451 {
452 	struct backing_dev_info *bdi = inode_to_bdi(ractl->mapping->host);
453 	struct file_ra_state *ra = ractl->ra;
454 	unsigned long max_pages = ra->ra_pages;
455 	unsigned long add_pages;
456 	unsigned long index = readahead_index(ractl);
457 	pgoff_t prev_index;
458 
459 	/*
460 	 * If the request exceeds the readahead window, allow the read to
461 	 * be up to the optimal hardware IO size
462 	 */
463 	if (req_size > max_pages && bdi->io_pages > max_pages)
464 		max_pages = min(req_size, bdi->io_pages);
465 
466 	trace_android_vh_ra_tuning_max_page(ractl, &max_pages);
467 
468 	/*
469 	 * start of file
470 	 */
471 	if (!index)
472 		goto initial_readahead;
473 
474 	/*
475 	 * It's the expected callback index, assume sequential access.
476 	 * Ramp up sizes, and push forward the readahead window.
477 	 */
478 	if ((index == (ra->start + ra->size - ra->async_size) ||
479 	     index == (ra->start + ra->size))) {
480 		ra->start += ra->size;
481 		ra->size = get_next_ra_size(ra, max_pages);
482 		ra->async_size = ra->size;
483 		goto readit;
484 	}
485 
486 	/*
487 	 * Hit a marked page without valid readahead state.
488 	 * E.g. interleaved reads.
489 	 * Query the pagecache for async_size, which normally equals to
490 	 * readahead size. Ramp it up and use it as the new readahead size.
491 	 */
492 	if (hit_readahead_marker) {
493 		pgoff_t start;
494 
495 		rcu_read_lock();
496 		start = page_cache_next_miss(ractl->mapping, index + 1,
497 				max_pages);
498 		rcu_read_unlock();
499 
500 		if (!start || start - index > max_pages)
501 			return;
502 
503 		ra->start = start;
504 		ra->size = start - index;	/* old async_size */
505 		ra->size += req_size;
506 		ra->size = get_next_ra_size(ra, max_pages);
507 		ra->async_size = ra->size;
508 		goto readit;
509 	}
510 
511 	/*
512 	 * oversize read
513 	 */
514 	if (req_size > max_pages)
515 		goto initial_readahead;
516 
517 	/*
518 	 * sequential cache miss
519 	 * trivial case: (index - prev_index) == 1
520 	 * unaligned reads: (index - prev_index) == 0
521 	 */
522 	prev_index = (unsigned long long)ra->prev_pos >> PAGE_SHIFT;
523 	if (index - prev_index <= 1UL)
524 		goto initial_readahead;
525 
526 	/*
527 	 * Query the page cache and look for the traces(cached history pages)
528 	 * that a sequential stream would leave behind.
529 	 */
530 	if (try_context_readahead(ractl->mapping, ra, index, req_size,
531 			max_pages))
532 		goto readit;
533 
534 	/*
535 	 * standalone, small random read
536 	 * Read as is, and do not pollute the readahead state.
537 	 */
538 	do_page_cache_ra(ractl, req_size, 0);
539 	return;
540 
541 initial_readahead:
542 	ra->start = index;
543 	ra->size = get_init_ra_size(req_size, max_pages);
544 	ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
545 
546 readit:
547 	/*
548 	 * Will this read hit the readahead marker made by itself?
549 	 * If so, trigger the readahead marker hit now, and merge
550 	 * the resulted next readahead window into the current one.
551 	 * Take care of maximum IO pages as above.
552 	 */
553 	if (index == ra->start && ra->size == ra->async_size) {
554 		add_pages = get_next_ra_size(ra, max_pages);
555 		if (ra->size + add_pages <= max_pages) {
556 			ra->async_size = add_pages;
557 			ra->size += add_pages;
558 		} else {
559 			ra->size = max_pages;
560 			ra->async_size = max_pages >> 1;
561 		}
562 	}
563 
564 	ractl->_index = ra->start;
565 	do_page_cache_ra(ractl, ra->size, ra->async_size);
566 }
567 
page_cache_sync_ra(struct readahead_control * ractl,unsigned long req_count)568 void page_cache_sync_ra(struct readahead_control *ractl,
569 		unsigned long req_count)
570 {
571 	bool do_forced_ra = ractl->file && (ractl->file->f_mode & FMODE_RANDOM);
572 
573 	/*
574 	 * Even if read-ahead is disabled, issue this request as read-ahead
575 	 * as we'll need it to satisfy the requested range. The forced
576 	 * read-ahead will do the right thing and limit the read to just the
577 	 * requested range, which we'll set to 1 page for this case.
578 	 */
579 	if (!ractl->ra->ra_pages || blk_cgroup_congested()) {
580 		if (!ractl->file)
581 			return;
582 		req_count = 1;
583 		do_forced_ra = true;
584 	}
585 
586 	trace_android_vh_page_cache_forced_ra(ractl, req_count, &do_forced_ra);
587 
588 	/* be dumb */
589 	if (do_forced_ra) {
590 		force_page_cache_ra(ractl, req_count);
591 		return;
592 	}
593 
594 	/* do read-ahead */
595 	ondemand_readahead(ractl, false, req_count);
596 }
597 EXPORT_SYMBOL_GPL(page_cache_sync_ra);
598 
page_cache_async_ra(struct readahead_control * ractl,struct page * page,unsigned long req_count)599 void page_cache_async_ra(struct readahead_control *ractl,
600 		struct page *page, unsigned long req_count)
601 {
602 	/* no read-ahead */
603 	if (!ractl->ra->ra_pages)
604 		return;
605 
606 	/*
607 	 * Same bit is used for PG_readahead and PG_reclaim.
608 	 */
609 	if (PageWriteback(page))
610 		return;
611 
612 	ClearPageReadahead(page);
613 
614 	/*
615 	 * Defer asynchronous read-ahead on IO congestion.
616 	 */
617 	if (inode_read_congested(ractl->mapping->host))
618 		return;
619 
620 	if (blk_cgroup_congested())
621 		return;
622 
623 	/* do read-ahead */
624 	ondemand_readahead(ractl, true, req_count);
625 }
626 EXPORT_SYMBOL_GPL(page_cache_async_ra);
627 
ksys_readahead(int fd,loff_t offset,size_t count)628 ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
629 {
630 	ssize_t ret;
631 	struct fd f;
632 
633 	ret = -EBADF;
634 	f = fdget(fd);
635 	if (!f.file || !(f.file->f_mode & FMODE_READ))
636 		goto out;
637 
638 	/*
639 	 * The readahead() syscall is intended to run only on files
640 	 * that can execute readahead. If readahead is not possible
641 	 * on this file, then we must return -EINVAL.
642 	 */
643 	ret = -EINVAL;
644 	if (!f.file->f_mapping || !f.file->f_mapping->a_ops ||
645 	    (!S_ISREG(file_inode(f.file)->i_mode) &&
646 	    !S_ISBLK(file_inode(f.file)->i_mode)))
647 		goto out;
648 
649 	ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED);
650 out:
651 	fdput(f);
652 	return ret;
653 }
654 
SYSCALL_DEFINE3(readahead,int,fd,loff_t,offset,size_t,count)655 SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count)
656 {
657 	return ksys_readahead(fd, offset, count);
658 }
659 
660 /**
661  * readahead_expand - Expand a readahead request
662  * @ractl: The request to be expanded
663  * @new_start: The revised start
664  * @new_len: The revised size of the request
665  *
666  * Attempt to expand a readahead request outwards from the current size to the
667  * specified size by inserting locked pages before and after the current window
668  * to increase the size to the new window.  This may involve the insertion of
669  * THPs, in which case the window may get expanded even beyond what was
670  * requested.
671  *
672  * The algorithm will stop if it encounters a conflicting page already in the
673  * pagecache and leave a smaller expansion than requested.
674  *
675  * The caller must check for this by examining the revised @ractl object for a
676  * different expansion than was requested.
677  */
readahead_expand(struct readahead_control * ractl,loff_t new_start,size_t new_len)678 void readahead_expand(struct readahead_control *ractl,
679 		      loff_t new_start, size_t new_len)
680 {
681 	struct address_space *mapping = ractl->mapping;
682 	struct file_ra_state *ra = ractl->ra;
683 	pgoff_t new_index, new_nr_pages;
684 	gfp_t gfp_mask = readahead_gfp_mask(mapping);
685 
686 	new_index = new_start / PAGE_SIZE;
687 
688 	/* Expand the leading edge downwards */
689 	while (ractl->_index > new_index) {
690 		unsigned long index = ractl->_index - 1;
691 		struct page *page = xa_load(&mapping->i_pages, index);
692 
693 		if (page && !xa_is_value(page))
694 			return; /* Page apparently present */
695 
696 		page = __page_cache_alloc(gfp_mask);
697 		if (!page)
698 			return;
699 		if (add_to_page_cache_lru(page, mapping, index, gfp_mask) < 0) {
700 			put_page(page);
701 			return;
702 		}
703 
704 		ractl->_nr_pages++;
705 		ractl->_index = page->index;
706 	}
707 
708 	new_len += new_start - readahead_pos(ractl);
709 	new_nr_pages = DIV_ROUND_UP(new_len, PAGE_SIZE);
710 
711 	/* Expand the trailing edge upwards */
712 	while (ractl->_nr_pages < new_nr_pages) {
713 		unsigned long index = ractl->_index + ractl->_nr_pages;
714 		struct page *page = xa_load(&mapping->i_pages, index);
715 
716 		if (page && !xa_is_value(page))
717 			return; /* Page apparently present */
718 
719 		page = __page_cache_alloc(gfp_mask);
720 		if (!page)
721 			return;
722 		if (add_to_page_cache_lru(page, mapping, index, gfp_mask) < 0) {
723 			put_page(page);
724 			return;
725 		}
726 		ractl->_nr_pages++;
727 		if (ra) {
728 			ra->size++;
729 			ra->async_size++;
730 		}
731 	}
732 }
733 EXPORT_SYMBOL(readahead_expand);
734