Lines Matching +full:compound +full:- +full:device
2 * mm/rmap.c - physical to virtual reverse mappings
23 * inode->i_rwsem (while writing or truncating, not reading or faulting)
24 * mm->mmap_lock
25 * mapping->invalidate_lock (in filemap_fault)
26 * page->flags PG_locked (lock_page)
29 * mapping->i_mmap_rwsem
30 * anon_vma->rwsem
31 * mm->page_table_lock or pte_lock
34 * mapping->private_lock (in block_dirty_folio)
37 * lruvec->lru_lock (in folio_lruvec_lock_irq)
38 * inode->i_lock (in set_page_dirty's __mark_inode_dirty)
39 * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
40 * sb_lock (within inode_lock in fs/fs-writeback.c)
42 * in arch-dependent flush_dcache_mmap_lock,
43 * within bdi.wb->list_lock in __sync_single_inode)
45 * anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon)
46 * ->tasklist_lock
52 * mapping->i_mmap_rwsem (also used for hugetlb pmd sharing)
53 * page->flags PG_locked (lock_page)
73 #include <linux/backing-dev.h>
97 atomic_set(&anon_vma->refcount, 1); in anon_vma_alloc()
98 anon_vma->num_children = 0; in anon_vma_alloc()
99 anon_vma->num_active_vmas = 0; in anon_vma_alloc()
100 anon_vma->parent = anon_vma; in anon_vma_alloc()
105 anon_vma->root = anon_vma; in anon_vma_alloc()
113 VM_BUG_ON(atomic_read(&anon_vma->refcount)); in anon_vma_free()
133 if (rwsem_is_locked(&anon_vma->root->rwsem)) { in anon_vma_free()
155 avc->vma = vma; in anon_vma_chain_link()
156 avc->anon_vma = anon_vma; in anon_vma_chain_link()
157 list_add(&avc->same_vma, &vma->anon_vma_chain); in anon_vma_chain_link()
158 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); in anon_vma_chain_link()
162 * __anon_vma_prepare - attach an anon_vma to a memory region
172 * can re-use the anon_vma from (very common when the only
176 * Anon-vma allocations are very subtle, because we may have
191 struct mm_struct *mm = vma->vm_mm; in __anon_vma_prepare()
207 anon_vma->num_children++; /* self-parent link for new root */ in __anon_vma_prepare()
213 spin_lock(&mm->page_table_lock); in __anon_vma_prepare()
214 if (likely(!vma->anon_vma)) { in __anon_vma_prepare()
215 vma->anon_vma = anon_vma; in __anon_vma_prepare()
217 anon_vma->num_active_vmas++; in __anon_vma_prepare()
221 spin_unlock(&mm->page_table_lock); in __anon_vma_prepare()
234 return -ENOMEM; in __anon_vma_prepare()
239 * we traverse the vma->anon_vma_chain, looping over anon_vma's that
247 struct anon_vma *new_root = anon_vma->root; in lock_anon_vma_root()
250 up_write(&root->rwsem); in lock_anon_vma_root()
252 down_write(&root->rwsem); in lock_anon_vma_root()
260 up_write(&root->rwsem); in unlock_anon_vma_root()
265 * Returns 0 on success, -ENOMEM on failure.
270 * prevent endless growth of anon_vma. Since dst->anon_vma is set to NULL before
271 * call, we can identify this case by checking (!dst->anon_vma &&
272 * src->anon_vma).
274 * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find
287 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { in anon_vma_clone()
298 anon_vma = pavc->anon_vma; in anon_vma_clone()
307 * it has self-parent reference and at least one child. in anon_vma_clone()
309 if (!dst->anon_vma && src->anon_vma && in anon_vma_clone()
310 anon_vma->num_children < 2 && in anon_vma_clone()
311 anon_vma->num_active_vmas == 0) in anon_vma_clone()
312 dst->anon_vma = anon_vma; in anon_vma_clone()
314 if (dst->anon_vma) in anon_vma_clone()
315 dst->anon_vma->num_active_vmas++; in anon_vma_clone()
321 * dst->anon_vma is dropped here otherwise its num_active_vmas can in anon_vma_clone()
324 * about dst->anon_vma if anon_vma_clone() failed. in anon_vma_clone()
326 dst->anon_vma = NULL; in anon_vma_clone()
328 return -ENOMEM; in anon_vma_clone()
334 * Returns 0 on success, non-zero on failure.
343 if (!pvma->anon_vma) in anon_vma_fork()
347 vma->anon_vma = NULL; in anon_vma_fork()
351 * so rmap can find non-COWed pages in child processes. in anon_vma_fork()
358 if (vma->anon_vma) in anon_vma_fork()
365 anon_vma->num_active_vmas++; in anon_vma_fork()
374 anon_vma->root = pvma->anon_vma->root; in anon_vma_fork()
375 anon_vma->parent = pvma->anon_vma; in anon_vma_fork()
381 get_anon_vma(anon_vma->root); in anon_vma_fork()
383 vma->anon_vma = anon_vma; in anon_vma_fork()
386 anon_vma->parent->num_children++; in anon_vma_fork()
395 return -ENOMEM; in anon_vma_fork()
407 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { in unlink_anon_vmas()
408 struct anon_vma *anon_vma = avc->anon_vma; in unlink_anon_vmas()
411 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); in unlink_anon_vmas()
414 * Leave empty anon_vmas on the list - we'll need in unlink_anon_vmas()
417 if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { in unlink_anon_vmas()
418 anon_vma->parent->num_children--; in unlink_anon_vmas()
422 list_del(&avc->same_vma); in unlink_anon_vmas()
425 if (vma->anon_vma) { in unlink_anon_vmas()
426 vma->anon_vma->num_active_vmas--; in unlink_anon_vmas()
432 vma->anon_vma = NULL; in unlink_anon_vmas()
439 * needing to write-acquire the anon_vma->root->rwsem. in unlink_anon_vmas()
441 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { in unlink_anon_vmas()
442 struct anon_vma *anon_vma = avc->anon_vma; in unlink_anon_vmas()
444 VM_WARN_ON(anon_vma->num_children); in unlink_anon_vmas()
445 VM_WARN_ON(anon_vma->num_active_vmas); in unlink_anon_vmas()
448 list_del(&avc->same_vma); in unlink_anon_vmas()
457 init_rwsem(&anon_vma->rwsem); in anon_vma_ctor()
458 atomic_set(&anon_vma->refcount, 0); in anon_vma_ctor()
459 anon_vma->rb_root = RB_ROOT_CACHED; in anon_vma_ctor()
491 * page_remove_rmap() that the anon_vma pointer from page->mapping is valid
501 anon_mapping = (unsigned long)READ_ONCE(folio->mapping); in folio_get_anon_vma()
507 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); in folio_get_anon_vma()
508 if (!atomic_inc_not_zero(&anon_vma->refcount)) { in folio_get_anon_vma()
517 * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() in folio_get_anon_vma()
535 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a
537 * on !rwc->try_lock case.
547 anon_mapping = (unsigned long)READ_ONCE(folio->mapping); in folio_lock_anon_vma_read()
553 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); in folio_lock_anon_vma_read()
554 root_anon_vma = READ_ONCE(anon_vma->root); in folio_lock_anon_vma_read()
555 if (down_read_trylock(&root_anon_vma->rwsem)) { in folio_lock_anon_vma_read()
562 up_read(&root_anon_vma->rwsem); in folio_lock_anon_vma_read()
568 if (rwc && rwc->try_lock) { in folio_lock_anon_vma_read()
570 rwc->contended = true; in folio_lock_anon_vma_read()
575 if (!atomic_inc_not_zero(&anon_vma->refcount)) { in folio_lock_anon_vma_read()
590 if (atomic_dec_and_test(&anon_vma->refcount)) { in folio_lock_anon_vma_read()
593 * and bail -- can't simply use put_anon_vma() because in folio_lock_anon_vma_read()
617 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; in try_to_unmap_flush()
619 if (!tlb_ubc->flush_required) in try_to_unmap_flush()
622 arch_tlbbatch_flush(&tlb_ubc->arch); in try_to_unmap_flush()
623 tlb_ubc->flush_required = false; in try_to_unmap_flush()
624 tlb_ubc->writable = false; in try_to_unmap_flush()
630 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; in try_to_unmap_flush_dirty()
632 if (tlb_ubc->writable) in try_to_unmap_flush_dirty()
637 * Bits 0-14 of mm->tlb_flush_batched record pending generations.
638 * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations.
642 ((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1)
649 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; in set_tlb_ubc_flush_pending()
656 arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr); in set_tlb_ubc_flush_pending()
657 tlb_ubc->flush_required = true; in set_tlb_ubc_flush_pending()
660 * Ensure compiler does not re-order the setting of tlb_flush_batched in set_tlb_ubc_flush_pending()
664 batch = atomic_read(&mm->tlb_flush_batched); in set_tlb_ubc_flush_pending()
672 if (!atomic_try_cmpxchg(&mm->tlb_flush_batched, &batch, 1)) in set_tlb_ubc_flush_pending()
675 atomic_inc(&mm->tlb_flush_batched); in set_tlb_ubc_flush_pending()
684 tlb_ubc->writable = true; in set_tlb_ubc_flush_pending()
716 int batch = atomic_read(&mm->tlb_flush_batched); in flush_tlb_batched_pending()
724 * mm->tlb_flush_batched as is, to avoid losing flushing. in flush_tlb_batched_pending()
726 atomic_cmpxchg(&mm->tlb_flush_batched, batch, in flush_tlb_batched_pending()
755 if (!vma->anon_vma || !page__anon_vma || in page_address_in_vma()
756 vma->anon_vma->root != page__anon_vma->root) in page_address_in_vma()
757 return -EFAULT; in page_address_in_vma()
758 } else if (!vma->vm_file) { in page_address_in_vma()
759 return -EFAULT; in page_address_in_vma()
760 } else if (vma->vm_file->f_mapping != folio->mapping) { in page_address_in_vma()
761 return -EFAULT; in page_address_in_vma()
816 if (!(vma->vm_flags & VM_PURGEABLE)) in folio_referenced_one()
817 pra->vm_flags &= ~VM_PURGEABLE; in folio_referenced_one()
819 if ((vma->vm_flags & VM_LOCKED) && in folio_referenced_one()
824 pra->vm_flags |= VM_LOCKED; in folio_referenced_one()
843 /* unexpected pmd-mapped folio? */ in folio_referenced_one()
847 pra->mapcount--; in folio_referenced_one()
856 pra->referenced++; in folio_referenced_one()
857 pra->vm_flags |= vma->vm_flags & ~VM_LOCKED; in folio_referenced_one()
859 pra->vm_flags |= vma->vm_flags & ~VM_PURGEABLE; in folio_referenced_one()
863 if (!pra->mapcount) in folio_referenced_one()
872 struct mem_cgroup *memcg = pra->memcg; in invalid_folio_referenced_vma()
887 if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) in invalid_folio_referenced_vma()
894 * folio_referenced() - Test if the folio was referenced.
898 * @vm_flags: A combination of all the vma->vm_flags which referenced the folio.
902 * Return: The number of mappings which referenced the folio. Return -1 if
943 return rwc.contended ? -1 : pra.referenced; in folio_referenced()
949 struct vm_area_struct *vma = pvmw->vma; in page_vma_mkclean_one()
951 unsigned long address = pvmw->address; in page_vma_mkclean_one()
958 vma->vm_mm, address, vma_address_end(pvmw)); in page_vma_mkclean_one()
964 address = pvmw->address; in page_vma_mkclean_one()
965 if (pvmw->pte) { in page_vma_mkclean_one()
966 pte_t *pte = pvmw->pte; in page_vma_mkclean_one()
976 set_pte_at(vma->vm_mm, address, pte, entry); in page_vma_mkclean_one()
980 pmd_t *pmd = pvmw->pmd; in page_vma_mkclean_one()
991 set_pmd_at(vma->vm_mm, address, pmd, entry); in page_vma_mkclean_one()
994 /* unexpected pmd-mapped folio? */ in page_vma_mkclean_one()
1021 if (vma->vm_flags & VM_SHARED) in invalid_mkclean_vma()
1053 * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of
1079 VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma); in pfn_mkclean_range()
1100 mapcount += atomic_read(&folio_page(folio, i)->_mapcount); in folio_total_mapcount()
1102 /* But each of those _mapcounts was based on -1 */ in folio_total_mapcount()
1108 * page_move_anon_rmap - move a page to our anon_vma
1119 void *anon_vma = vma->anon_vma; in page_move_anon_rmap()
1131 WRITE_ONCE(folio->mapping, anon_vma); in page_move_anon_rmap()
1136 * __page_set_anon_rmap - set up new anonymous rmap
1146 struct anon_vma *anon_vma = vma->anon_vma; in __page_set_anon_rmap()
1159 anon_vma = anon_vma->root; in __page_set_anon_rmap()
1162 * page_idle does a lockless/optimistic rmap scan on folio->mapping. in __page_set_anon_rmap()
1168 WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma); in __page_set_anon_rmap()
1169 folio->index = linear_page_index(vma, address); in __page_set_anon_rmap()
1176 * __page_check_anon_rmap - sanity check anonymous rmap addition
1186 * The page's anon-rmap details (mapping and index) are guaranteed to in __page_check_anon_rmap()
1196 VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, in __page_check_anon_rmap()
1203 * page_add_anon_rmap - add pte mapping to an anonymous page
1218 atomic_t *mapped = &folio->_nr_pages_mapped; in page_add_anon_rmap()
1220 bool compound = flags & RMAP_COMPOUND; in page_add_anon_rmap() local
1224 if (likely(!compound)) { in page_add_anon_rmap()
1225 first = atomic_inc_and_test(&page->_mapcount); in page_add_anon_rmap()
1234 first = atomic_inc_and_test(&folio->_entire_mapcount); in page_add_anon_rmap()
1239 nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); in page_add_anon_rmap()
1267 mlock_vma_folio(folio, vma, compound); in page_add_anon_rmap()
1271 * folio_add_new_anon_rmap - Add mapping to a new anonymous folio.
1277 * This means the inc-and-test can be bypassed.
1288 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); in folio_add_new_anon_rmap()
1292 /* increment count (starts at -1) */ in folio_add_new_anon_rmap()
1293 atomic_set(&folio->_mapcount, 0); in folio_add_new_anon_rmap()
1296 /* increment count (starts at -1) */ in folio_add_new_anon_rmap()
1297 atomic_set(&folio->_entire_mapcount, 0); in folio_add_new_anon_rmap()
1298 atomic_set(&folio->_nr_pages_mapped, COMPOUND_MAPPED); in folio_add_new_anon_rmap()
1304 __page_set_anon_rmap(folio, &folio->page, vma, address, 1); in folio_add_new_anon_rmap()
1308 * folio_add_file_rmap_range - add pte mapping to page range of a folio
1313 * @compound: charge the page as compound or small page
1321 bool compound) in folio_add_file_rmap_range() argument
1323 atomic_t *mapped = &folio->_nr_pages_mapped; in folio_add_file_rmap_range()
1327 VM_WARN_ON_FOLIO(compound && !folio_test_pmd_mappable(folio), folio); in folio_add_file_rmap_range()
1330 if (likely(!compound)) { in folio_add_file_rmap_range()
1332 first = atomic_inc_and_test(&page->_mapcount); in folio_add_file_rmap_range()
1340 } while (page++, --nr_pages > 0); in folio_add_file_rmap_range()
1344 first = atomic_inc_and_test(&folio->_entire_mapcount); in folio_add_file_rmap_range()
1349 nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); in folio_add_file_rmap_range()
1366 mlock_vma_folio(folio, vma, compound); in folio_add_file_rmap_range()
1370 * page_add_file_rmap - add pte mapping to a file page
1373 * @compound: charge the page as compound or small page
1378 bool compound) in page_add_file_rmap() argument
1383 VM_WARN_ON_ONCE_PAGE(compound && !PageTransHuge(page), page); in page_add_file_rmap()
1385 if (likely(!compound)) in page_add_file_rmap()
1390 folio_add_file_rmap_range(folio, page, nr_pages, vma, compound); in page_add_file_rmap()
1394 * page_remove_rmap - take down pte mapping from a page
1397 * @compound: uncharge the page as compound or small page
1402 bool compound) in page_remove_rmap() argument
1405 atomic_t *mapped = &folio->_nr_pages_mapped; in page_remove_rmap()
1410 VM_BUG_ON_PAGE(compound && !PageHead(page), page); in page_remove_rmap()
1415 atomic_dec(&folio->_entire_mapcount); in page_remove_rmap()
1420 if (likely(!compound)) { in page_remove_rmap()
1421 last = atomic_add_negative(-1, &page->_mapcount); in page_remove_rmap()
1430 last = atomic_add_negative(-1, &folio->_entire_mapcount); in page_remove_rmap()
1435 nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); in page_remove_rmap()
1453 __lruvec_stat_mod_folio(folio, idx, -nr_pmdmapped); in page_remove_rmap()
1457 __lruvec_stat_mod_folio(folio, idx, -nr); in page_remove_rmap()
1465 if (!compound || nr < nr_pmdmapped) in page_remove_rmap()
1477 munlock_vma_folio(folio, vma, compound); in page_remove_rmap()
1486 struct mm_struct *mm = vma->vm_mm; in try_to_unmap_one()
1517 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, in try_to_unmap_one()
1533 /* Unexpected PMD-mapped THP? */ in try_to_unmap_one()
1537 if ((vma->vm_flags & VM_PURGEABLE) && !lock_uxpte(vma, address)) { in try_to_unmap_one()
1547 (vma->vm_flags & VM_LOCKED)) { in try_to_unmap_one()
1556 subpage = folio_page(folio, pfn - folio_pfn(folio)); in try_to_unmap_one()
1623 * architecture must guarantee that a clear->dirty in try_to_unmap_one()
1636 * Now the pte is cleared. If this pte was uffd-wp armed, in try_to_unmap_one()
1638 * it's file-backed, so we don't lose the tracking info. in try_to_unmap_one()
1656 dec_mm_counter(mm, mm_counter(&folio->page)); in try_to_unmap_one()
1661 } else if ((vma->vm_flags & VM_PURGEABLE) || (pte_unused(pteval) && in try_to_unmap_one()
1667 if (vma->vm_flags & VM_PURGEABLE) in try_to_unmap_one()
1681 dec_mm_counter(mm, mm_counter(&folio->page)); in try_to_unmap_one()
1703 * - clear PTE; barrier; read refcount in try_to_unmap_one()
1704 * - inc refcount; barrier; read PTE in try_to_unmap_one()
1761 if (list_empty(&mm->mmlist)) { in try_to_unmap_one()
1763 if (list_empty(&mm->mmlist)) in try_to_unmap_one()
1764 list_add(&mm->mmlist, &init_mm.mmlist); in try_to_unmap_one()
1779 * This is a locked file-backed folio, in try_to_unmap_one()
1784 * to point at a new folio while a device is in try_to_unmap_one()
1789 dec_mm_counter(mm, mm_counter_file(&folio->page)); in try_to_unmap_one()
1793 if (vma->vm_flags & VM_LOCKED) in try_to_unmap_one()
1814 * try_to_unmap - Try to remove all page table mappings to a folio.
1848 struct mm_struct *mm = vma->vm_mm; in try_to_migrate_one()
1883 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, in try_to_migrate_one()
1900 /* PMD-mapped THP migration entry */ in try_to_migrate_one()
1903 pmd_pfn(*pvmw.pmd) - folio_pfn(folio)); in try_to_migrate_one()
1916 /* Unexpected PMD-mapped THP? */ in try_to_migrate_one()
1923 * Our PTE is a non-present device exclusive entry and in try_to_migrate_one()
1929 * changed when hugepage migrations to device private in try_to_migrate_one()
1933 subpage = &folio->page; in try_to_migrate_one()
1935 subpage = folio_page(folio, pfn - folio_pfn(folio)); in try_to_migrate_one()
2000 * architecture must guarantee that a clear->dirty in try_to_migrate_one()
2042 * pteval maps a zone device page and is therefore in try_to_migrate_one()
2051 compound_order(&folio->page)); in try_to_migrate_one()
2063 dec_mm_counter(mm, mm_counter(&folio->page)); in try_to_migrate_one()
2078 dec_mm_counter(mm, mm_counter(&folio->page)); in try_to_migrate_one()
2138 compound_order(&folio->page)); in try_to_migrate_one()
2146 if (vma->vm_flags & VM_LOCKED) in try_to_migrate_one()
2157 * try_to_migrate - try to replace all page table mappings with swap entries
2213 struct mm_struct *mm = vma->vm_mm; in page_make_device_exclusive_one()
2225 vma->vm_mm, address, min(vma->vm_end, in page_make_device_exclusive_one()
2227 args->owner); in page_make_device_exclusive_one()
2231 /* Unexpected PMD-mapped THP? */ in page_make_device_exclusive_one()
2242 pte_pfn(ptent) - folio_pfn(folio)); in page_make_device_exclusive_one()
2257 if (args->mm == mm && args->address == address && in page_make_device_exclusive_one()
2259 args->valid = true; in page_make_device_exclusive_one()
2293 * folio_make_device_exclusive - Mark the folio exclusively owned by a device.
2300 * folio and replace them with special device exclusive swap entries to
2301 * grant a device exclusive access to the folio.
2336 * make_device_exclusive_range() - Mark a range for exclusive use by a device
2338 * @start: start of the region to mark for exclusive device access
2344 * exclusive access only if the page pointer is non-NULL.
2351 * A driver using this to program access from a device must use a mmu notifier
2352 * critical section to hold a device specific lock during programming. Once
2360 long npages = (end - start) >> PAGE_SHIFT; in make_device_exclusive_range()
2391 struct anon_vma *root = anon_vma->root; in __put_anon_vma()
2394 if (root != anon_vma && atomic_dec_and_test(&root->refcount)) in __put_anon_vma()
2403 if (rwc->anon_lock) in rmap_walk_anon_lock()
2404 return rwc->anon_lock(folio, rwc); in rmap_walk_anon_lock()
2419 if (rwc->try_lock) { in rmap_walk_anon_lock()
2421 rwc->contended = true; in rmap_walk_anon_lock()
2431 * rmap_walk_anon - do something to anonymous page using the object-based
2458 pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; in rmap_walk_anon()
2459 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, in rmap_walk_anon()
2461 struct vm_area_struct *vma = avc->vma; in rmap_walk_anon()
2462 unsigned long address = vma_address(&folio->page, vma); in rmap_walk_anon()
2464 VM_BUG_ON_VMA(address == -EFAULT, vma); in rmap_walk_anon()
2467 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) in rmap_walk_anon()
2470 if (!rwc->rmap_one(folio, vma, address, rwc->arg)) in rmap_walk_anon()
2472 if (rwc->done && rwc->done(folio)) in rmap_walk_anon()
2481 * rmap_walk_file - do something to file page using the object-based rmap method
2497 * The page lock not only makes sure that page->mapping cannot in rmap_walk_file()
2500 * so we can safely take mapping->i_mmap_rwsem. in rmap_walk_file()
2508 pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; in rmap_walk_file()
2513 if (rwc->try_lock) { in rmap_walk_file()
2514 rwc->contended = true; in rmap_walk_file()
2521 vma_interval_tree_foreach(vma, &mapping->i_mmap, in rmap_walk_file()
2523 unsigned long address = vma_address(&folio->page, vma); in rmap_walk_file()
2525 VM_BUG_ON_VMA(address == -EFAULT, vma); in rmap_walk_file()
2528 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) in rmap_walk_file()
2531 if (!rwc->rmap_one(folio, vma, address, rwc->arg)) in rmap_walk_file()
2533 if (rwc->done && rwc->done(folio)) in rmap_walk_file()
2575 struct anon_vma *anon_vma = vma->anon_vma; in hugepage_add_anon_rmap()
2581 first = atomic_inc_and_test(&folio->_entire_mapcount); in hugepage_add_anon_rmap()
2592 BUG_ON(address < vma->vm_start || address >= vma->vm_end); in hugepage_add_new_anon_rmap()
2593 /* increment count (starts at -1) */ in hugepage_add_new_anon_rmap()
2594 atomic_set(&folio->_entire_mapcount, 0); in hugepage_add_new_anon_rmap()
2596 __page_set_anon_rmap(folio, &folio->page, vma, address, 1); in hugepage_add_new_anon_rmap()