• Home
  • Raw
  • Download

Lines Matching +full:start +full:- +full:up

2  * mm/readahead.c - address_space-level file readahead.
15 #include <linux/backing-dev.h>
22 #include <linux/blk-cgroup.h>
34 ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; in file_ra_state_init()
35 ra->prev_pos = -1; in file_ra_state_init()
41 * - the caller of read_cache_pages() may have set PG_private or PG_fscache
43 * on disk, thus we need to give the fs a chance to clean up in the event of
52 page->mapping = mapping; in read_cache_pages_invalidate_page()
54 page->mapping = NULL; in read_cache_pages_invalidate_page()
70 list_del(&victim->lru); in read_cache_pages_invalidate_pages()
76 * read_cache_pages - populate an address space with some pages & start reads against them
79 * pages have their ->index populated and are otherwise uninitialised.
93 list_del(&page->lru); in read_cache_pages()
94 if (add_to_page_cache_lru(page, mapping, page->index, in read_cache_pages()
122 if (mapping->a_ops->readpages) { in read_pages()
123 ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages); in read_pages()
124 /* Clean up the remaining pages */ in read_pages()
131 list_del(&page->lru); in read_pages()
132 if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) in read_pages()
133 mapping->a_ops->readpage(filp, page); in read_pages()
156 struct inode *inode = mapping->host; in __do_page_cache_readahead()
168 end_index = ((isize - 1) >> PAGE_SHIFT); in __do_page_cache_readahead()
180 page = radix_tree_lookup(&mapping->i_pages, page_offset); in __do_page_cache_readahead()
198 page->index = page_offset; in __do_page_cache_readahead()
199 list_add(&page->lru, &page_pool); in __do_page_cache_readahead()
200 if (page_idx == nr_to_read - lookahead_size) in __do_page_cache_readahead()
206 * Now start the IO. We ignore I/O errors - if the page is not in __do_page_cache_readahead()
224 struct backing_dev_info *bdi = inode_to_bdi(mapping->host); in force_page_cache_readahead()
225 struct file_ra_state *ra = &filp->f_ra; in force_page_cache_readahead()
228 if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages)) in force_page_cache_readahead()
229 return -EINVAL; in force_page_cache_readahead()
233 * be up to the optimal hardware IO size in force_page_cache_readahead()
235 max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages); in force_page_cache_readahead()
245 nr_to_read -= this_chunk; in force_page_cache_readahead()
254 * 1-8 page = 32k initial, > 8 page = 128k initial
271 * Get the previous window size, ramp it up, and
277 unsigned long cur = ra->size; in get_next_ra_size()
289 * On-demand readahead design.
291 * The fields in struct file_ra_state represent the most-recently-executed
294 * |<----- async_size ---------|
295 * |------------------- size -------------------->|
297 * ^start ^page marked with PG_readahead
308 * page at (start+size-async_size) with PG_readahead, and use it as readahead
310 * readahead-for-nothing fuss, saving pointless page cache lookups.
318 * There is a special-case: if the first page which the application tries to
323 * The code ramps up the readahead size aggressively at first, but slow down as
328 * Count contiguously cached pages from @offset-1 to @offset-@max,
330 * - length of the sequential read sequence, or
331 * - thrashing threshold in memory tight systems
339 head = page_cache_prev_hole(mapping, offset - 1, max); in count_history_pages()
342 return offset - 1 - head; in count_history_pages()
346 * page cache context based read-ahead
367 * it is a strong indication of long-run stream (or whole-file-read) in try_context_readahead()
372 ra->start = offset; in try_context_readahead()
373 ra->size = min(size + req_size, max); in try_context_readahead()
374 ra->async_size = 1; in try_context_readahead()
388 struct backing_dev_info *bdi = inode_to_bdi(mapping->host); in ondemand_readahead()
389 unsigned long max_pages = ra->ra_pages; in ondemand_readahead()
395 * be up to the optimal hardware IO size in ondemand_readahead()
397 if (req_size > max_pages && bdi->io_pages > max_pages) in ondemand_readahead()
398 max_pages = min(req_size, bdi->io_pages); in ondemand_readahead()
401 * start of file in ondemand_readahead()
408 * Ramp up sizes, and push forward the readahead window. in ondemand_readahead()
410 if ((offset == (ra->start + ra->size - ra->async_size) || in ondemand_readahead()
411 offset == (ra->start + ra->size))) { in ondemand_readahead()
412 ra->start += ra->size; in ondemand_readahead()
413 ra->size = get_next_ra_size(ra, max_pages); in ondemand_readahead()
414 ra->async_size = ra->size; in ondemand_readahead()
422 * readahead size. Ramp it up and use it as the new readahead size. in ondemand_readahead()
425 pgoff_t start; in ondemand_readahead() local
428 start = page_cache_next_hole(mapping, offset + 1, max_pages); in ondemand_readahead()
431 if (!start || start - offset > max_pages) in ondemand_readahead()
434 ra->start = start; in ondemand_readahead()
435 ra->size = start - offset; /* old async_size */ in ondemand_readahead()
436 ra->size += req_size; in ondemand_readahead()
437 ra->size = get_next_ra_size(ra, max_pages); in ondemand_readahead()
438 ra->async_size = ra->size; in ondemand_readahead()
450 * trivial case: (offset - prev_offset) == 1 in ondemand_readahead()
451 * unaligned reads: (offset - prev_offset) == 0 in ondemand_readahead()
453 prev_offset = (unsigned long long)ra->prev_pos >> PAGE_SHIFT; in ondemand_readahead()
454 if (offset - prev_offset <= 1UL) in ondemand_readahead()
471 ra->start = offset; in ondemand_readahead()
472 ra->size = get_init_ra_size(req_size, max_pages); in ondemand_readahead()
473 ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size; in ondemand_readahead()
482 if (offset == ra->start && ra->size == ra->async_size) { in ondemand_readahead()
484 if (ra->size + add_pages <= max_pages) { in ondemand_readahead()
485 ra->async_size = add_pages; in ondemand_readahead()
486 ra->size += add_pages; in ondemand_readahead()
488 ra->size = max_pages; in ondemand_readahead()
489 ra->async_size = max_pages >> 1; in ondemand_readahead()
497 * page_cache_sync_readahead - generic file readahead
500 * @filp: passed on to ->readpage() and ->readpages()
501 * @offset: start offset into @mapping, in pagecache page-sized units
514 /* no read-ahead */ in page_cache_sync_readahead()
515 if (!ra->ra_pages) in page_cache_sync_readahead()
522 if (filp && (filp->f_mode & FMODE_RANDOM)) { in page_cache_sync_readahead()
527 /* do read-ahead */ in page_cache_sync_readahead()
533 * page_cache_async_readahead - file readahead for marked pages
536 * @filp: passed on to ->readpage() and ->readpages()
538 * @offset: start offset into @mapping, in pagecache page-sized units
544 * has used up enough of the readahead window that we should start pulling in
553 /* no read-ahead */ in page_cache_async_readahead()
554 if (!ra->ra_pages) in page_cache_async_readahead()
566 * Defer asynchronous read-ahead on IO congestion. in page_cache_async_readahead()
568 if (inode_read_congested(mapping->host)) in page_cache_async_readahead()
574 /* do read-ahead */ in page_cache_async_readahead()
584 ret = -EBADF; in ksys_readahead()
586 if (!f.file || !(f.file->f_mode & FMODE_READ)) in ksys_readahead()
592 * on this file, then we must return -EINVAL. in ksys_readahead()
594 ret = -EINVAL; in ksys_readahead()
595 if (!f.file->f_mapping || !f.file->f_mapping->a_ops || in ksys_readahead()
596 !S_ISREG(file_inode(f.file)->i_mode)) in ksys_readahead()