Lines Matching +full:inactive +full:- +full:delay +full:- +full:ms
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
13 #include <linux/tracepoint-defs.h>
120 /* Incremented by the number of inactive pages that were scanned */
163 return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)); in can_madv_lru_vma()
179 force_page_cache_ra(&ractl, &file->f_ra, nr_to_read); in force_page_cache_readahead()
186 * page_evictable - test whether a page is evictable
189 * Test whether page is evictable--i.e., should be placed on active/inactive
209 * Turn a non-refcounted page (->_refcount == 0) into refcounted with
318 if (zone->contiguous) in pageblock_pfn_to_page()
402 * general, page_zone(page)->lock must be held by the caller to prevent the
404 * If a caller does not hold page_zone(page)->lock, it must guarantee that the
437 * Executable code area - executable, not writable, not stack
445 * Stack area - atomatically grows in one direction
456 * Data area - private, writable, not stack
475 munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); in munlock_vma_pages_all()
486 * we want to unconditionally remove a page from the pagecache -- e.g.,
491 * is revert to lazy LRU behaviour -- semantics are not broken.
496 * mlock_migrate_page - called only from migrate_misplaced_transhuge_page()
506 __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); in mlock_migrate_page()
516 * Returns -EFAULT if all of the page is outside the range of vma.
525 VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */ in vma_address()
527 if (pgoff >= vma->vm_pgoff) { in vma_address()
528 address = vma->vm_start + in vma_address()
529 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in vma_address()
531 if (address < vma->vm_start || address >= vma->vm_end) in vma_address()
532 address = -EFAULT; in vma_address()
534 pgoff + compound_nr(page) - 1 >= vma->vm_pgoff) { in vma_address()
535 /* Test above avoids possibility of wrap to 0 on 32-bit */ in vma_address()
536 address = vma->vm_start; in vma_address()
538 address = -EFAULT; in vma_address()
554 VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */ in vma_address_end()
556 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in vma_address_end()
558 if (address < vma->vm_start || address > vma->vm_end) in vma_address_end()
559 address = vma->vm_end; in vma_address_end()
566 int flags = vmf->flags; in maybe_unlock_mmap_for_io()
578 fpin = get_file(vmf->vma->vm_file); in maybe_unlock_mmap_for_io()
579 mmap_read_unlock(vmf->vma->vm_mm); in maybe_unlock_mmap_for_io()
610 if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) { in mem_map_next()
669 #define NODE_RECLAIM_NOSCAN -2
670 #define NODE_RECLAIM_FULL -1
700 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
707 #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
710 * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
780 #define DELAY_LV0 5000000 /* 5ms */
781 #define DELAY_LV1 10000000 /* 10ms */
782 #define DELAY_LV2 50000000 /* 50ms */
783 #define DELAY_LV3 100000000 /* 100ms */
784 #define DELAY_LV4 2000000000 /* 2000ms */
785 #define DELAY_LV5 50000000000 /* 50000ms */
790 u64 delay[NR_RA_STUBS]; member