• Home
  • Raw
  • Download

Lines Matching full:ra

136  * memset *ra to zero.
139 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) in file_ra_state_init() argument
141 ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; in file_ra_state_init()
142 ra->prev_pos = -1; in file_ra_state_init()
162 * Clean up the remaining folios. The sizes in ->ra in read_pages()
170 rac->ra->size -= nr; in read_pages()
171 if (rac->ra->async_size >= nr) { in read_pages()
172 rac->ra->async_size -= nr; in read_pages()
310 struct file_ra_state *ra = ractl->ra; in force_page_cache_ra() local
322 max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages); in force_page_cache_ra()
340 * for 128k (32 page) max ra
361 static unsigned long get_next_ra_size(struct file_ra_state *ra, in get_next_ra_size() argument
364 unsigned long cur = ra->size; in get_next_ra_size()
434 struct file_ra_state *ra, in try_context_readahead() argument
457 ra->start = index; in try_context_readahead()
458 ra->size = min(size + req_size, max); in try_context_readahead()
459 ra->async_size = 1; in try_context_readahead()
487 struct file_ra_state *ra, unsigned int new_order) in page_cache_ra_order() argument
492 pgoff_t mark = index + ra->size - ra->async_size; in page_cache_ra_order()
496 if (!mapping_large_folio_support(mapping) || ra->size < 4) in page_cache_ra_order()
499 limit = min(limit, index + ra->size - 1); in page_cache_ra_order()
505 while ((1 << new_order) > ra->size) in page_cache_ra_order()
531 ra->size += index - limit - 1; in page_cache_ra_order()
532 ra->async_size += index - limit - 1; in page_cache_ra_order()
546 do_page_cache_ra(ractl, ra->size, ra->async_size); in page_cache_ra_order()
556 struct file_ra_state *ra = ractl->ra; in ondemand_readahead() local
557 unsigned long max_pages = ra->ra_pages; in ondemand_readahead()
580 expected = round_down(ra->start + ra->size - ra->async_size, in ondemand_readahead()
582 if (index == expected || index == (ra->start + ra->size)) { in ondemand_readahead()
583 ra->start += ra->size; in ondemand_readahead()
584 ra->size = get_next_ra_size(ra, max_pages); in ondemand_readahead()
585 ra->async_size = ra->size; in ondemand_readahead()
606 ra->start = start; in ondemand_readahead()
607 ra->size = start - index; /* old async_size */ in ondemand_readahead()
608 ra->size += req_size; in ondemand_readahead()
609 ra->size = get_next_ra_size(ra, max_pages); in ondemand_readahead()
610 ra->async_size = ra->size; in ondemand_readahead()
625 prev_index = (unsigned long long)ra->prev_pos >> PAGE_SHIFT; in ondemand_readahead()
633 if (try_context_readahead(ractl->mapping, ra, index, req_size, in ondemand_readahead()
645 ra->start = index; in ondemand_readahead()
646 ra->size = get_init_ra_size(req_size, max_pages); in ondemand_readahead()
647 ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size; in ondemand_readahead()
656 if (index == ra->start && ra->size == ra->async_size) { in ondemand_readahead()
657 add_pages = get_next_ra_size(ra, max_pages); in ondemand_readahead()
658 if (ra->size + add_pages <= max_pages) { in ondemand_readahead()
659 ra->async_size = add_pages; in ondemand_readahead()
660 ra->size += add_pages; in ondemand_readahead()
662 ra->size = max_pages; in ondemand_readahead()
663 ra->async_size = max_pages >> 1; in ondemand_readahead()
667 ractl->_index = ra->start; in ondemand_readahead()
668 page_cache_ra_order(ractl, ra, order); in ondemand_readahead()
682 if (!ractl->ra->ra_pages || blk_cgroup_congested()) { in page_cache_sync_ra()
703 if (!ractl->ra->ra_pages) in page_cache_async_ra()
782 struct file_ra_state *ra = ractl->ra; in readahead_expand() local
836 if (ra) { in readahead_expand()
837 ra->size++; in readahead_expand()
838 ra->async_size++; in readahead_expand()