• Home
  • Raw
  • Download

Lines Matching +full:async +full:- +full:prefix

1 /* SPDX-License-Identifier: GPL-2.0-or-later */
14 #include <linux/tracepoint-defs.h>
62 #define FOLIO_PAGES_MAPPED (COMPOUND_MAPPED - 1)
76 return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED; in folio_nr_pages_mapped()
81 unsigned long mapping = (unsigned long)folio->mapping; in folio_raw_mapping()
91 int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled); in acct_reclaim_writeback()
101 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED]; in wake_throttle_isolated()
129 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index); in force_page_cache_readahead()
146 * folio_evictable - Test whether a folio is evictable.
149 * Test whether @folio is evictable -- i.e., should be placed on
169 * Turn a non-refcounted page (->_refcount == 0) into refcounted with
180 * Return true if a folio needs ->release_folio() calling upon it.
215 #define K(x) ((x) << (PAGE_SHIFT-10))
264 * general, page_zone(page)->lock must be held by the caller to prevent the
266 * If a caller does not hold page_zone(page)->lock, it must guarantee that the
298 * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
350 * function is used in the performance-critical __free_one_page().
366 buddy = page + (__buddy_pfn - pfn); in find_buddy_page_pfn()
381 if (zone->contiguous) in pageblock_pfn_to_page()
391 zone->contiguous = false; in clear_zone_contiguous()
403 * caller passes in a non-large folio.
410 folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order; in folio_set_order()
412 folio->_folio_nr_pages = 1U << order; in folio_set_order()
423 atomic_set(&folio->_entire_mapcount, -1); in prep_compound_head()
424 atomic_set(&folio->_nr_pages_mapped, 0); in prep_compound_head()
425 atomic_set(&folio->_pincount, 0); in prep_compound_head()
432 p->mapping = TAIL_MAPPING; in prep_compound_tail()
498 enum migrate_mode mode; /* Async or sync migration mode */
543 return list_empty(&area->free_list[migratetype]); in free_area_empty()
551 * Executable code area - executable, not writable, not stack
570 * Data area - private, writable, not stack
608 * 1) VM_IO check prevents migration from double-counting during mlock. in mlock_vma_folio()
611 * file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may in mlock_vma_folio()
614 if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED) && in mlock_vma_folio()
623 if (unlikely(vma->vm_flags & VM_LOCKED) && in munlock_vma_folio()
645 if (pgoff >= vma->vm_pgoff) { in vma_pgoff_address()
646 address = vma->vm_start + in vma_pgoff_address()
647 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in vma_pgoff_address()
649 if (address < vma->vm_start || address >= vma->vm_end) in vma_pgoff_address()
650 address = -EFAULT; in vma_pgoff_address()
651 } else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) { in vma_pgoff_address()
652 /* Test above avoids possibility of wrap to 0 on 32-bit */ in vma_pgoff_address()
653 address = vma->vm_start; in vma_pgoff_address()
655 address = -EFAULT; in vma_pgoff_address()
662 * Returns -EFAULT if all of the page is outside the range of vma.
668 VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */ in vma_address()
678 struct vm_area_struct *vma = pvmw->vma; in vma_address_end()
682 /* Common case, plus ->pgoff is invalid for KSM */ in vma_address_end()
683 if (pvmw->nr_pages == 1) in vma_address_end()
684 return pvmw->address + PAGE_SIZE; in vma_address_end()
686 pgoff = pvmw->pgoff + pvmw->nr_pages; in vma_address_end()
687 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in vma_address_end()
689 if (address < vma->vm_start || address > vma->vm_end) in vma_address_end()
690 address = vma->vm_end; in vma_address_end()
697 int flags = vmf->flags; in maybe_unlock_mmap_for_io()
709 fpin = get_file(vmf->vma->vm_file); in maybe_unlock_mmap_for_io()
742 #define mminit_dprintk(level, prefix, fmt, arg...) \ argument
746 pr_warn("mminit::" prefix " " fmt, ##arg); \
748 printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
757 const char *prefix, const char *fmt, ...) in mminit_dprintk() argument
770 #define NODE_RECLAIM_NOSCAN -2
771 #define NODE_RECLAIM_FULL -1
791 * mm/memory-failure.c
810 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
817 #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
820 * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
921 return -EINVAL; in vmap_pages_range_noflush()
957 /* we are working on non-current tsk/mm */
961 /* gup_fast: prevent fall-back to slow gup */
968 * Indicates for which pages that are write-protected in the page table,
976 * * GUP-fast and fork(): mm->write_protect_seq
977 * * GUP-fast and KSM or temporary unmapping (swap, migration): see
982 * PTE-mapped THP.
984 * If the vma is NULL, we're coming from the GUP-fast path and might have
992 * has to be writable -- and if it references (part of) an anonymous in gup_must_unshare()
1003 * We only care about R/O long-term pining: R/O short-term in gup_must_unshare()
1018 return is_cow_mapping(vma->vm_flags); in gup_must_unshare()
1026 * During GUP-fast we might not get called on the head page for a in gup_must_unshare()
1027 * hugetlb page that is mapped using cont-PTE, because GUP-fast does in gup_must_unshare()
1030 * page (as it cannot be partially COW-shared), so lookup the head page. in gup_must_unshare()
1048 * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty in vma_soft_dirty_enabled()
1049 * enablements, because when without soft-dirty being compiled in, in vma_soft_dirty_enabled()
1057 * Soft-dirty is kind of special: its tracking is enabled when the in vma_soft_dirty_enabled()
1060 return !(vma->vm_flags & VM_SOFTDIRTY); in vma_soft_dirty_enabled()
1066 MAS_BUG_ON(&vmi->mas, vmi->mas.node != MAS_START && in vma_iter_config()
1067 (vmi->mas.index > index || vmi->mas.last < index)); in vma_iter_config()
1068 __mas_set_range(&vmi->mas, index, last - 1); in vma_iter_config()
1077 return mas_preallocate(&vmi->mas, vma, GFP_KERNEL); in vma_iter_prealloc()
1082 mas_store_prealloc(&vmi->mas, NULL); in vma_iter_clear()
1088 __mas_set_range(&vmi->mas, start, end - 1); in vma_iter_clear_gfp()
1089 mas_store_gfp(&vmi->mas, NULL, gfp); in vma_iter_clear_gfp()
1090 if (unlikely(mas_is_err(&vmi->mas))) in vma_iter_clear_gfp()
1091 return -ENOMEM; in vma_iter_clear_gfp()
1098 return mas_walk(&vmi->mas); in vma_iter_load()
1107 if (MAS_WARN_ON(&vmi->mas, vmi->mas.node != MAS_START && in vma_iter_store()
1108 vmi->mas.index > vma->vm_start)) { in vma_iter_store()
1109 pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n", in vma_iter_store()
1110 vmi->mas.index, vma->vm_start, vma->vm_start, in vma_iter_store()
1111 vma->vm_end, vmi->mas.index, vmi->mas.last); in vma_iter_store()
1113 if (MAS_WARN_ON(&vmi->mas, vmi->mas.node != MAS_START && in vma_iter_store()
1114 vmi->mas.last < vma->vm_start)) { in vma_iter_store()
1115 pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n", in vma_iter_store()
1116 vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end, in vma_iter_store()
1117 vmi->mas.index, vmi->mas.last); in vma_iter_store()
1121 if (vmi->mas.node != MAS_START && in vma_iter_store()
1122 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start))) in vma_iter_store()
1125 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); in vma_iter_store()
1126 mas_store_prealloc(&vmi->mas, vma); in vma_iter_store()
1132 if (vmi->mas.node != MAS_START && in vma_iter_store_gfp()
1133 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start))) in vma_iter_store_gfp()
1136 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); in vma_iter_store_gfp()
1137 mas_store_gfp(&vmi->mas, vma, gfp); in vma_iter_store_gfp()
1138 if (unlikely(mas_is_err(&vmi->mas))) in vma_iter_store_gfp()
1139 return -ENOMEM; in vma_iter_store_gfp()