Lines Matching +full:compound +full:- +full:device
2 * mm/rmap.c - physical to virtual reverse mappings
23 * inode->i_mutex (while writing or truncating, not reading or faulting)
24 * mm->mmap_lock
25 * page->flags PG_locked (lock_page) * (see huegtlbfs below)
27 * mapping->i_mmap_rwsem
29 * anon_vma->rwsem
30 * mm->page_table_lock or pte_lock
31 * pgdat->lru_lock (in mark_page_accessed, isolate_lru_page)
34 * mapping->private_lock (in __set_page_dirty_buffers)
35 * mem_cgroup_{begin,end}_page_stat (memcg->move_lock)
37 * inode->i_lock (in set_page_dirty's __mark_inode_dirty)
38 * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
39 * sb_lock (within inode_lock in fs/fs-writeback.c)
41 * in arch-dependent flush_dcache_mmap_lock,
42 * within bdi.wb->list_lock in __sync_single_inode)
44 * anon_vma->rwsem,mapping->i_mutex (memory_failure, collect_procs_anon)
45 * ->tasklist_lock
49 * mapping->i_mmap_rwsem
51 * page->flags PG_locked (lock_page)
71 #include <linux/backing-dev.h>
92 atomic_set(&anon_vma->refcount, 1); in anon_vma_alloc()
93 anon_vma->num_children = 0; in anon_vma_alloc()
94 anon_vma->num_active_vmas = 0; in anon_vma_alloc()
95 anon_vma->parent = anon_vma; in anon_vma_alloc()
100 anon_vma->root = anon_vma; in anon_vma_alloc()
108 VM_BUG_ON(atomic_read(&anon_vma->refcount)); in anon_vma_free()
128 if (rwsem_is_locked(&anon_vma->root->rwsem)) { in anon_vma_free()
150 avc->vma = vma; in anon_vma_chain_link()
151 avc->anon_vma = anon_vma; in anon_vma_chain_link()
152 list_add(&avc->same_vma, &vma->anon_vma_chain); in anon_vma_chain_link()
153 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); in anon_vma_chain_link()
157 * __anon_vma_prepare - attach an anon_vma to a memory region
167 * can re-use the anon_vma from (very common when the only
171 * Anon-vma allocations are very subtle, because we may have
186 struct mm_struct *mm = vma->vm_mm; in __anon_vma_prepare()
202 anon_vma->num_children++; /* self-parent link for new root */ in __anon_vma_prepare()
208 spin_lock(&mm->page_table_lock); in __anon_vma_prepare()
209 if (likely(!vma->anon_vma)) { in __anon_vma_prepare()
210 vma->anon_vma = anon_vma; in __anon_vma_prepare()
212 anon_vma->num_active_vmas++; in __anon_vma_prepare()
216 spin_unlock(&mm->page_table_lock); in __anon_vma_prepare()
229 return -ENOMEM; in __anon_vma_prepare()
234 * we traverse the vma->anon_vma_chain, looping over anon_vma's that
242 struct anon_vma *new_root = anon_vma->root; in lock_anon_vma_root()
245 up_write(&root->rwsem); in lock_anon_vma_root()
247 down_write(&root->rwsem); in lock_anon_vma_root()
255 up_write(&root->rwsem); in unlock_anon_vma_root()
260 * Returns 0 on success, -ENOMEM on failure.
265 * endless growth of anon_vma. Since dst->anon_vma is set to NULL before call,
266 * we can identify this case by checking (!dst->anon_vma && src->anon_vma).
268 * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find
281 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { in anon_vma_clone()
292 anon_vma = pavc->anon_vma; in anon_vma_clone()
301 * it has self-parent reference and at least one child. in anon_vma_clone()
303 if (!dst->anon_vma && src->anon_vma && in anon_vma_clone()
304 anon_vma->num_children < 2 && in anon_vma_clone()
305 anon_vma->num_active_vmas == 0) in anon_vma_clone()
306 dst->anon_vma = anon_vma; in anon_vma_clone()
308 if (dst->anon_vma) in anon_vma_clone()
309 dst->anon_vma->num_active_vmas++; in anon_vma_clone()
315 * dst->anon_vma is dropped here otherwise its degree can be incorrectly in anon_vma_clone()
318 * about dst->anon_vma if anon_vma_clone() failed. in anon_vma_clone()
320 dst->anon_vma = NULL; in anon_vma_clone()
322 return -ENOMEM; in anon_vma_clone()
328 * Returns 0 on success, non-zero on failure.
337 if (!pvma->anon_vma) in anon_vma_fork()
341 vma->anon_vma = NULL; in anon_vma_fork()
345 * so rmap can find non-COWed pages in child processes. in anon_vma_fork()
352 if (vma->anon_vma) in anon_vma_fork()
359 anon_vma->num_active_vmas++; in anon_vma_fork()
368 anon_vma->root = pvma->anon_vma->root; in anon_vma_fork()
369 anon_vma->parent = pvma->anon_vma; in anon_vma_fork()
375 get_anon_vma(anon_vma->root); in anon_vma_fork()
377 vma->anon_vma = anon_vma; in anon_vma_fork()
380 anon_vma->parent->num_children++; in anon_vma_fork()
389 return -ENOMEM; in anon_vma_fork()
401 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { in unlink_anon_vmas()
402 struct anon_vma *anon_vma = avc->anon_vma; in unlink_anon_vmas()
405 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); in unlink_anon_vmas()
408 * Leave empty anon_vmas on the list - we'll need in unlink_anon_vmas()
411 if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { in unlink_anon_vmas()
412 anon_vma->parent->num_children--; in unlink_anon_vmas()
416 list_del(&avc->same_vma); in unlink_anon_vmas()
419 if (vma->anon_vma) { in unlink_anon_vmas()
420 vma->anon_vma->num_active_vmas--; in unlink_anon_vmas()
426 vma->anon_vma = NULL; in unlink_anon_vmas()
433 * needing to write-acquire the anon_vma->root->rwsem. in unlink_anon_vmas()
435 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { in unlink_anon_vmas()
436 struct anon_vma *anon_vma = avc->anon_vma; in unlink_anon_vmas()
438 VM_WARN_ON(anon_vma->num_children); in unlink_anon_vmas()
439 VM_WARN_ON(anon_vma->num_active_vmas); in unlink_anon_vmas()
442 list_del(&avc->same_vma); in unlink_anon_vmas()
451 init_rwsem(&anon_vma->rwsem); in anon_vma_ctor()
452 atomic_set(&anon_vma->refcount, 0); in anon_vma_ctor()
453 anon_vma->rb_root = RB_ROOT_CACHED; in anon_vma_ctor()
485 * page_remove_rmap() that the anon_vma pointer from page->mapping is valid
495 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_get_anon_vma()
501 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); in page_get_anon_vma()
502 if (!atomic_inc_not_zero(&anon_vma->refcount)) { in page_get_anon_vma()
511 * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() in page_get_anon_vma()
529 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a
539 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_lock_anon_vma_read()
545 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); in page_lock_anon_vma_read()
546 root_anon_vma = READ_ONCE(anon_vma->root); in page_lock_anon_vma_read()
547 if (down_read_trylock(&root_anon_vma->rwsem)) { in page_lock_anon_vma_read()
554 up_read(&root_anon_vma->rwsem); in page_lock_anon_vma_read()
561 if (!atomic_inc_not_zero(&anon_vma->refcount)) { in page_lock_anon_vma_read()
576 if (atomic_dec_and_test(&anon_vma->refcount)) { in page_lock_anon_vma_read()
579 * and bail -- can't simply use put_anon_vma() because in page_lock_anon_vma_read()
608 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; in try_to_unmap_flush()
610 if (!tlb_ubc->flush_required) in try_to_unmap_flush()
613 arch_tlbbatch_flush(&tlb_ubc->arch); in try_to_unmap_flush()
614 tlb_ubc->flush_required = false; in try_to_unmap_flush()
615 tlb_ubc->writable = false; in try_to_unmap_flush()
621 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; in try_to_unmap_flush_dirty()
623 if (tlb_ubc->writable) in try_to_unmap_flush_dirty()
629 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; in set_tlb_ubc_flush_pending()
631 arch_tlbbatch_add_mm(&tlb_ubc->arch, mm); in set_tlb_ubc_flush_pending()
632 tlb_ubc->flush_required = true; in set_tlb_ubc_flush_pending()
635 * Ensure compiler does not re-order the setting of tlb_flush_batched in set_tlb_ubc_flush_pending()
639 mm->tlb_flush_batched = true; in set_tlb_ubc_flush_pending()
647 tlb_ubc->writable = true; in set_tlb_ubc_flush_pending()
686 if (data_race(mm->tlb_flush_batched)) { in flush_tlb_batched_pending()
690 * Do not allow the compiler to re-order the clearing of in flush_tlb_batched_pending()
694 mm->tlb_flush_batched = false; in flush_tlb_batched_pending()
720 if (!vma->anon_vma || !page__anon_vma || in page_address_in_vma()
721 vma->anon_vma->root != page__anon_vma->root) in page_address_in_vma()
722 return -EFAULT; in page_address_in_vma()
723 } else if (!vma->vm_file) { in page_address_in_vma()
724 return -EFAULT; in page_address_in_vma()
725 } else if (vma->vm_file->f_mapping != compound_head(page)->mapping) { in page_address_in_vma()
726 return -EFAULT; in page_address_in_vma()
790 if (!(vma->vm_flags & VM_PURGEABLE)) in page_referenced_one()
791 pra->vm_flags &= ~VM_PURGEABLE; in page_referenced_one()
793 if (vma->vm_flags & VM_LOCKED) { in page_referenced_one()
795 pra->vm_flags |= VM_LOCKED; in page_referenced_one()
810 if (likely(!(vma->vm_flags & VM_SEQ_READ))) in page_referenced_one()
818 /* unexpected pmd-mapped page? */ in page_referenced_one()
822 pra->mapcount--; in page_referenced_one()
831 pra->referenced++; in page_referenced_one()
832 pra->vm_flags |= vma->vm_flags & ~VM_PURGEABLE; in page_referenced_one()
835 if (!pra->mapcount) in page_referenced_one()
844 struct mem_cgroup *memcg = pra->memcg; in invalid_page_referenced_vma()
846 if (!mm_match_cgroup(vma->vm_mm, memcg)) in invalid_page_referenced_vma()
853 * page_referenced - test if the page was referenced
857 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
927 0, vma, vma->vm_mm, address, in page_mkclean_one()
946 set_pte_at(vma->vm_mm, address, pte, entry); in page_mkclean_one()
960 set_pmd_at(vma->vm_mm, address, pmd, entry); in page_mkclean_one()
963 /* unexpected pmd-mapped page? */ in page_mkclean_one()
986 if (vma->vm_flags & VM_SHARED) in invalid_mkclean_vma()
1018 * page_move_anon_rmap - move a page to our anon_vma
1029 struct anon_vma *anon_vma = vma->anon_vma; in page_move_anon_rmap()
1042 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); in page_move_anon_rmap()
1046 * __page_set_anon_rmap - set up new anonymous rmap
1055 struct anon_vma *anon_vma = vma->anon_vma; in __page_set_anon_rmap()
1068 anon_vma = anon_vma->root; in __page_set_anon_rmap()
1071 page->mapping = (struct address_space *) anon_vma; in __page_set_anon_rmap()
1072 page->index = linear_page_index(vma, address); in __page_set_anon_rmap()
1076 * __page_check_anon_rmap - sanity check anonymous rmap addition
1085 * The page's anon-rmap details (mapping and index) are guaranteed to in __page_check_anon_rmap()
1096 VM_BUG_ON_PAGE(page_anon_vma(page)->root != vma->anon_vma->root, page); in __page_check_anon_rmap()
1102 * page_add_anon_rmap - add pte mapping to an anonymous page
1106 * @compound: charge the page as compound or small page
1114 struct vm_area_struct *vma, unsigned long address, bool compound) in page_add_anon_rmap() argument
1116 do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0); in page_add_anon_rmap()
1127 bool compound = flags & RMAP_COMPOUND; in do_page_add_anon_rmap() local
1135 if (compound) { in do_page_add_anon_rmap()
1142 first = atomic_inc_and_test(&page->_mapcount); in do_page_add_anon_rmap()
1146 int nr = compound ? thp_nr_pages(page) : 1; in do_page_add_anon_rmap()
1148 * We use the irq-unsafe __{inc|mod}_zone_page_stat because in do_page_add_anon_rmap()
1153 if (compound) in do_page_add_anon_rmap()
1172 * page_add_new_anon_rmap - add pte mapping to a new anonymous page
1176 * @compound: charge the page as compound or small page
1179 * This means the inc-and-test can be bypassed.
1183 struct vm_area_struct *vma, unsigned long address, bool compound) in page_add_new_anon_rmap() argument
1185 int nr = compound ? thp_nr_pages(page) : 1; in page_add_new_anon_rmap()
1187 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); in page_add_new_anon_rmap()
1189 if (compound) { in page_add_new_anon_rmap()
1191 /* increment count (starts at -1) */ in page_add_new_anon_rmap()
1200 /* increment count (starts at -1) */ in page_add_new_anon_rmap()
1201 atomic_set(&page->_mapcount, 0); in page_add_new_anon_rmap()
1208 * page_add_file_rmap - add pte mapping to a file page
1210 * @compound: charge the page as compound or small page
1214 void page_add_file_rmap(struct page *page, bool compound) in page_add_file_rmap() argument
1218 VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); in page_add_file_rmap()
1220 if (compound && PageTransHuge(page)) { in page_add_file_rmap()
1239 if (!atomic_inc_and_test(&page->_mapcount)) in page_add_file_rmap()
1247 static void page_remove_file_rmap(struct page *page, bool compound) in page_remove_file_rmap() argument
1251 VM_BUG_ON_PAGE(compound && !PageHead(page), page); in page_remove_file_rmap()
1261 if (compound && PageTransHuge(page)) { in page_remove_file_rmap()
1263 if (atomic_add_negative(-1, &page[i]._mapcount)) in page_remove_file_rmap()
1266 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) in page_remove_file_rmap()
1273 if (!atomic_add_negative(-1, &page->_mapcount)) in page_remove_file_rmap()
1278 * We use the irq-unsafe __{inc|mod}_lruvec_page_state because in page_remove_file_rmap()
1282 __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr); in page_remove_file_rmap()
1292 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) in page_remove_anon_compound_rmap()
1310 if (atomic_add_negative(-1, &page[i]._mapcount)) in page_remove_anon_compound_rmap()
1316 * page of the compound page is unmapped, but at least one in page_remove_anon_compound_rmap()
1329 __mod_lruvec_page_state(page, NR_ANON_MAPPED, -nr); in page_remove_anon_compound_rmap()
1333 * page_remove_rmap - take down pte mapping from a page
1335 * @compound: uncharge the page as compound or small page
1339 void page_remove_rmap(struct page *page, bool compound) in page_remove_rmap() argument
1344 page_remove_file_rmap(page, compound); in page_remove_rmap()
1348 if (compound) { in page_remove_rmap()
1354 if (!atomic_add_negative(-1, &page->_mapcount)) in page_remove_rmap()
1358 * We use the irq-unsafe __{inc|mod}_zone_page_stat because in page_remove_rmap()
1389 struct mm_struct *mm = vma->vm_mm; in try_to_unmap_one()
1410 /* munlock has nothing to gain from examining un-locked vmas */ in try_to_unmap_one()
1411 if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED)) in try_to_unmap_one()
1433 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in try_to_unmap_one()
1447 /* PMD-mapped THP migration entry */ in try_to_unmap_one()
1456 if ((vma->vm_flags & VM_PURGEABLE) && !lock_uxpte(vma, address)) { in try_to_unmap_one()
1469 if (vma->vm_flags & VM_LOCKED) { in try_to_unmap_one()
1470 /* PTE-mapped THP are never mlocked */ in try_to_unmap_one()
1486 /* Unexpected PMD-mapped THP? */ in try_to_unmap_one()
1489 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); in try_to_unmap_one()
1543 * pteval maps a zone device page and is therefore in try_to_unmap_one()
1559 * changed when hugepage migrations to device private in try_to_unmap_one()
1573 * architecture must guarantee that a clear->dirty in try_to_unmap_one()
1603 } else if ((vma->vm_flags & VM_PURGEABLE) || (pte_unused(pteval) && in try_to_unmap_one()
1605 if (vma->vm_flags & VM_PURGEABLE) in try_to_unmap_one()
1673 * - clear PTE; barrier; read refcount in try_to_unmap_one()
1674 * - inc refcount; barrier; read PTE in try_to_unmap_one()
1723 if (list_empty(&mm->mmlist)) { in try_to_unmap_one()
1725 if (list_empty(&mm->mmlist)) in try_to_unmap_one()
1726 list_add(&mm->mmlist, &init_mm.mmlist); in try_to_unmap_one()
1742 * This is a locked file-backed page, thus it cannot in try_to_unmap_one()
1746 * point at new page while a device still is using this in try_to_unmap_one()
1781 * try_to_unmap - try to remove all page table mappings to a page
1826 * try_to_munlock - try to munlock a page
1852 struct anon_vma *root = anon_vma->root; in __put_anon_vma()
1855 if (root != anon_vma && atomic_dec_and_test(&root->refcount)) in __put_anon_vma()
1864 if (rwc->anon_lock) in rmap_walk_anon_lock()
1865 return rwc->anon_lock(page); in rmap_walk_anon_lock()
1882 * rmap_walk_anon - do something to anonymous page using the object-based
1913 pgoff_end = pgoff_start + thp_nr_pages(page) - 1; in rmap_walk_anon()
1914 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, in rmap_walk_anon()
1916 struct vm_area_struct *vma = avc->vma; in rmap_walk_anon()
1919 VM_BUG_ON_VMA(address == -EFAULT, vma); in rmap_walk_anon()
1922 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) in rmap_walk_anon()
1925 if (!rwc->rmap_one(page, vma, address, rwc->arg)) in rmap_walk_anon()
1927 if (rwc->done && rwc->done(page)) in rmap_walk_anon()
1936 * rmap_walk_file - do something to file page using the object-based rmap method
1956 * The page lock not only makes sure that page->mapping cannot in rmap_walk_file()
1959 * so we can safely take mapping->i_mmap_rwsem. in rmap_walk_file()
1967 pgoff_end = pgoff_start + thp_nr_pages(page) - 1; in rmap_walk_file()
1970 vma_interval_tree_foreach(vma, &mapping->i_mmap, in rmap_walk_file()
1974 VM_BUG_ON_VMA(address == -EFAULT, vma); in rmap_walk_file()
1977 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) in rmap_walk_file()
1980 if (!rwc->rmap_one(page, vma, address, rwc->arg)) in rmap_walk_file()
1982 if (rwc->done && rwc->done(page)) in rmap_walk_file()
2021 struct anon_vma *anon_vma = vma->anon_vma; in hugepage_add_anon_rmap()
2035 BUG_ON(address < vma->vm_start || address >= vma->vm_end); in hugepage_add_new_anon_rmap()