/mm/ |
D | truncate.c | 28 static void clear_shadow_entry(struct address_space *mapping, pgoff_t index, in clear_shadow_entry() argument 34 spin_lock_irq(&mapping->tree_lock); in clear_shadow_entry() 40 if (!__radix_tree_lookup(&mapping->page_tree, index, &node, &slot)) in clear_shadow_entry() 44 __radix_tree_replace(&mapping->page_tree, node, slot, NULL, in clear_shadow_entry() 45 workingset_update_node, mapping); in clear_shadow_entry() 46 mapping->nrexceptional--; in clear_shadow_entry() 48 spin_unlock_irq(&mapping->tree_lock); in clear_shadow_entry() 54 static void truncate_exceptional_entry(struct address_space *mapping, in truncate_exceptional_entry() argument 58 if (shmem_mapping(mapping)) in truncate_exceptional_entry() 61 if (dax_mapping(mapping)) { in truncate_exceptional_entry() [all …]
|
D | filemap.c | 116 static int page_cache_tree_insert(struct address_space *mapping, in page_cache_tree_insert() argument 123 error = __radix_tree_create(&mapping->page_tree, page->index, 0, in page_cache_tree_insert() 130 p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock); in page_cache_tree_insert() 134 mapping->nrexceptional--; in page_cache_tree_insert() 138 __radix_tree_replace(&mapping->page_tree, node, slot, page, in page_cache_tree_insert() 139 workingset_update_node, mapping); in page_cache_tree_insert() 140 mapping->nrpages++; in page_cache_tree_insert() 144 static void page_cache_tree_delete(struct address_space *mapping, in page_cache_tree_delete() argument 160 __radix_tree_lookup(&mapping->page_tree, page->index + i, in page_cache_tree_delete() 165 radix_tree_clear_tags(&mapping->page_tree, node, slot); in page_cache_tree_delete() [all …]
|
D | readahead.c | 30 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) in file_ra_state_init() argument 32 ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; in file_ra_state_init() 44 static void read_cache_pages_invalidate_page(struct address_space *mapping, in read_cache_pages_invalidate_page() argument 50 page->mapping = mapping; in read_cache_pages_invalidate_page() 52 page->mapping = NULL; in read_cache_pages_invalidate_page() 61 static void read_cache_pages_invalidate_pages(struct address_space *mapping, in read_cache_pages_invalidate_pages() argument 69 read_cache_pages_invalidate_page(mapping, victim); in read_cache_pages_invalidate_pages() 83 int read_cache_pages(struct address_space *mapping, struct list_head *pages, in read_cache_pages() argument 92 if (add_to_page_cache_lru(page, mapping, page->index, in read_cache_pages() 93 readahead_gfp_mask(mapping))) { in read_cache_pages() [all …]
|
D | page-writeback.c | 1562 static void balance_dirty_pages(struct address_space *mapping, in balance_dirty_pages() argument 1864 void balance_dirty_pages_ratelimited(struct address_space *mapping) in balance_dirty_pages_ratelimited() argument 1866 struct inode *inode = mapping->host; in balance_dirty_pages_ratelimited() 1913 balance_dirty_pages(mapping, wb, current->nr_dirtied); in balance_dirty_pages_ratelimited() 2105 void tag_pages_for_writeback(struct address_space *mapping, in tag_pages_for_writeback() argument 2113 spin_lock_irq(&mapping->tree_lock); in tag_pages_for_writeback() 2114 radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter, start, in tag_pages_for_writeback() 2118 radix_tree_iter_tag_set(&mapping->page_tree, &iter, in tag_pages_for_writeback() 2124 spin_unlock_irq(&mapping->tree_lock); in tag_pages_for_writeback() 2126 spin_lock_irq(&mapping->tree_lock); in tag_pages_for_writeback() [all …]
|
D | migrate.c | 85 struct address_space *mapping; in isolate_movable_page() local 123 mapping = page_mapping(page); in isolate_movable_page() 124 VM_BUG_ON_PAGE(!mapping, page); in isolate_movable_page() 126 if (!mapping->a_ops->isolate_page(page, mode)) in isolate_movable_page() 147 struct address_space *mapping; in putback_movable_page() local 153 mapping = page_mapping(page); in putback_movable_page() 154 mapping->a_ops->putback_page(page); in putback_movable_page() 436 int migrate_page_move_mapping(struct address_space *mapping, in migrate_page_move_mapping() argument 453 if (!mapping) { in migrate_page_move_mapping() 460 newpage->mapping = page->mapping; in migrate_page_move_mapping() [all …]
|
D | shmem.c | 330 static int shmem_radix_tree_replace(struct address_space *mapping, in shmem_radix_tree_replace() argument 339 item = __radix_tree_lookup(&mapping->page_tree, index, &node, &pslot); in shmem_radix_tree_replace() 344 __radix_tree_replace(&mapping->page_tree, node, pslot, in shmem_radix_tree_replace() 356 static bool shmem_confirm_swap(struct address_space *mapping, in shmem_confirm_swap() argument 362 item = radix_tree_lookup(&mapping->page_tree, index); in shmem_confirm_swap() 582 struct address_space *mapping, in shmem_add_to_page_cache() argument 594 page->mapping = mapping; in shmem_add_to_page_cache() 597 spin_lock_irq(&mapping->tree_lock); in shmem_add_to_page_cache() 604 if (radix_tree_gang_lookup_slot(&mapping->page_tree, in shmem_add_to_page_cache() 612 error = radix_tree_insert(&mapping->page_tree, in shmem_add_to_page_cache() [all …]
|
D | memory-failure.c | 84 struct address_space *mapping; in hwpoison_filter_dev() local 97 mapping = page_mapping(p); in hwpoison_filter_dev() 98 if (mapping == NULL || mapping->host == NULL) in hwpoison_filter_dev() 101 dev = mapping->host->i_sb->s_dev; in hwpoison_filter_dev() 446 struct address_space *mapping = page->mapping; in collect_procs_file() local 448 i_mmap_lock_read(mapping); in collect_procs_file() 456 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, in collect_procs_file() 470 i_mmap_unlock_read(mapping); in collect_procs_file() 484 if (!page->mapping) in collect_procs() 560 struct address_space *mapping) in truncate_error_page() argument [all …]
|
D | khugepaged.c | 1252 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) in retract_page_tables() argument 1258 i_mmap_lock_write(mapping); in retract_page_tables() 1259 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { in retract_page_tables() 1287 i_mmap_unlock_write(mapping); in retract_page_tables() 1309 struct address_space *mapping, pgoff_t start, in collapse_shmem() argument 1342 new_page->mapping = mapping; in collapse_shmem() 1351 spin_lock_irq(&mapping->tree_lock); in collapse_shmem() 1352 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { in collapse_shmem() 1369 if (n && !shmem_charge(mapping->host, n)) { in collapse_shmem() 1374 radix_tree_insert(&mapping->page_tree, index, in collapse_shmem() [all …]
|
D | fadvise.c | 33 struct address_space *mapping; in SYSCALL_DEFINE4() local 50 mapping = f.file->f_mapping; in SYSCALL_DEFINE4() 51 if (!mapping || len < 0) { in SYSCALL_DEFINE4() 56 bdi = inode_to_bdi(mapping->host); in SYSCALL_DEFINE4() 117 force_page_cache_readahead(mapping, f.file, start_index, in SYSCALL_DEFINE4() 123 if (!inode_write_congested(mapping->host)) in SYSCALL_DEFINE4() 124 __filemap_fdatawrite_range(mapping, offset, endbyte, in SYSCALL_DEFINE4() 168 count = invalidate_mapping_pages(mapping, in SYSCALL_DEFINE4() 179 invalidate_mapping_pages(mapping, start_index, in SYSCALL_DEFINE4()
|
D | cleancache.c | 189 pool_id = page->mapping->host->i_sb->cleancache_poolid; in __cleancache_get_page() 193 if (cleancache_get_key(page->mapping->host, &key) < 0) in __cleancache_get_page() 227 pool_id = page->mapping->host->i_sb->cleancache_poolid; in __cleancache_put_page() 229 cleancache_get_key(page->mapping->host, &key) >= 0) { in __cleancache_put_page() 244 void __cleancache_invalidate_page(struct address_space *mapping, in __cleancache_invalidate_page() argument 248 int pool_id = mapping->host->i_sb->cleancache_poolid; in __cleancache_invalidate_page() 256 if (cleancache_get_key(mapping->host, &key) >= 0) { in __cleancache_invalidate_page() 274 void __cleancache_invalidate_inode(struct address_space *mapping) in __cleancache_invalidate_inode() argument 276 int pool_id = mapping->host->i_sb->cleancache_poolid; in __cleancache_invalidate_inode() 282 if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0) in __cleancache_invalidate_inode()
|
D | workingset.c | 225 void *workingset_eviction(struct address_space *mapping, struct page *page) in workingset_eviction() argument 372 struct address_space *mapping = private; in workingset_update_node() local 375 if (dax_mapping(mapping) || shmem_mapping(mapping)) in workingset_update_node() 448 struct address_space *mapping; in shadow_lru_isolate() local 466 mapping = container_of(node->root, struct address_space, page_tree); in shadow_lru_isolate() 469 if (!spin_trylock(&mapping->tree_lock)) { in shadow_lru_isolate() 493 if (WARN_ON_ONCE(!mapping->nrexceptional)) in shadow_lru_isolate() 498 mapping->nrexceptional--; in shadow_lru_isolate() 504 __radix_tree_delete_node(&mapping->page_tree, node, in shadow_lru_isolate() 505 workingset_update_node, mapping); in shadow_lru_isolate() [all …]
|
D | util.c | 422 unsigned long mapping; in __page_rmapping() local 424 mapping = (unsigned long)page->mapping; in __page_rmapping() 425 mapping &= ~PAGE_MAPPING_FLAGS; in __page_rmapping() 427 return (void *)mapping; in __page_rmapping() 462 unsigned long mapping; in page_anon_vma() local 465 mapping = (unsigned long)page->mapping; in page_anon_vma() 466 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) in page_anon_vma() 473 struct address_space *mapping; in page_mapping() local 488 mapping = page->mapping; in page_mapping() 489 if ((unsigned long)mapping & PAGE_MAPPING_ANON) in page_mapping() [all …]
|
D | vmscan.c | 581 static void handle_write_error(struct address_space *mapping, in handle_write_error() argument 585 if (page_mapping(page) == mapping) in handle_write_error() 586 mapping_set_error(mapping, error); in handle_write_error() 606 static pageout_t pageout(struct page *page, struct address_space *mapping, in pageout() argument 627 if (!mapping) { in pageout() 641 if (mapping->a_ops->writepage == NULL) in pageout() 643 if (!may_write_to_inode(mapping->host, sc)) in pageout() 657 res = mapping->a_ops->writepage(page, &wbc); in pageout() 659 handle_write_error(mapping, page, res); in pageout() 681 static int __remove_mapping(struct address_space *mapping, struct page *page, in __remove_mapping() argument [all …]
|
D | page_io.c | 151 struct address_space *mapping = swap_file->f_mapping; in generic_swapfile_activate() local 152 struct inode *inode = mapping->host; in generic_swapfile_activate() 289 struct address_space *mapping = swap_file->f_mapping; in __swap_writepage() local 303 ret = mapping->a_ops->direct_IO(&kiocb, &from); in __swap_writepage() 369 struct address_space *mapping = swap_file->f_mapping; in swap_readpage() local 371 ret = mapping->a_ops->readpage(swap_file, page); in swap_readpage() 426 struct address_space *mapping = sis->swap_file->f_mapping; in swap_set_page_dirty() local 429 return mapping->a_ops->set_page_dirty(page); in swap_set_page_dirty()
|
D | mmap.c | 130 struct file *file, struct address_space *mapping) in __remove_shared_vm_struct() argument 135 mapping_unmap_writable(mapping); in __remove_shared_vm_struct() 137 flush_dcache_mmap_lock(mapping); in __remove_shared_vm_struct() 138 vma_interval_tree_remove(vma, &mapping->i_mmap); in __remove_shared_vm_struct() 139 flush_dcache_mmap_unlock(mapping); in __remove_shared_vm_struct() 151 struct address_space *mapping = file->f_mapping; in unlink_file_vma() local 152 i_mmap_lock_write(mapping); in unlink_file_vma() 153 __remove_shared_vm_struct(vma, file, mapping); in unlink_file_vma() 154 i_mmap_unlock_write(mapping); in unlink_file_vma() 577 struct address_space *mapping = file->f_mapping; in __vma_link_file() local [all …]
|
D | mincore.c | 51 static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff) in mincore_page() argument 63 if (shmem_mapping(mapping)) { in mincore_page() 64 page = find_get_entry(mapping, pgoff); in mincore_page() 75 page = find_get_page(mapping, pgoff); in mincore_page() 77 page = find_get_page(mapping, pgoff); in mincore_page()
|
D | hugetlb.c | 757 struct address_space *mapping = vma->vm_file->f_mapping; in vma_resv_map() local 758 struct inode *inode = mapping->host; in vma_resv_map() 1266 page->mapping = NULL; in free_huge_page() 3451 struct address_space *mapping; in unmap_ref_private() local 3461 mapping = vma->vm_file->f_mapping; in unmap_ref_private() 3468 i_mmap_lock_write(mapping); in unmap_ref_private() 3469 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { in unmap_ref_private() 3493 i_mmap_unlock_write(mapping); in unmap_ref_private() 3632 struct address_space *mapping; in hugetlbfs_pagecache_page() local 3635 mapping = vma->vm_file->f_mapping; in hugetlbfs_pagecache_page() [all …]
|
D | rmap.c | 470 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_get_anon_vma() 514 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_lock_anon_vma_read() 699 } else if (page->mapping) { in page_address_in_vma() 700 if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping) in page_address_in_vma() 965 struct address_space *mapping; in page_mkclean() local 977 mapping = page_mapping(page); in page_mkclean() 978 if (!mapping) in page_mkclean() 1012 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); in page_move_anon_rmap() 1041 page->mapping = (struct address_space *) anon_vma; in __page_set_anon_rmap() 1801 struct address_space *mapping = page_mapping(page); in rmap_walk_file() local [all …]
|
D | nommu.c | 694 struct address_space *mapping; in add_vma_to_mm() local 706 mapping = vma->vm_file->f_mapping; in add_vma_to_mm() 708 i_mmap_lock_write(mapping); in add_vma_to_mm() 709 flush_dcache_mmap_lock(mapping); in add_vma_to_mm() 710 vma_interval_tree_insert(vma, &mapping->i_mmap); in add_vma_to_mm() 711 flush_dcache_mmap_unlock(mapping); in add_vma_to_mm() 712 i_mmap_unlock_write(mapping); in add_vma_to_mm() 760 struct address_space *mapping; in delete_vma_from_mm() local 777 mapping = vma->vm_file->f_mapping; in delete_vma_from_mm() 779 i_mmap_lock_write(mapping); in delete_vma_from_mm() [all …]
|
D | huge_memory.c | 483 return (struct list_head *)&page[2].mapping; in page_deferred_list() 1429 if (PageDoubleMap(page) || !page->mapping) in follow_trans_huge_pmd() 1434 if (page->mapping && !PageDoubleMap(page)) in follow_trans_huge_pmd() 2339 VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING, in __split_huge_page_tail() 2341 page_tail->mapping = head->mapping; in __split_huge_page_tail() 2388 shmem_uncharge(head->mapping->host, 1); in __split_huge_page() 2407 spin_unlock(&head->mapping->tree_lock); in __split_huge_page() 2551 struct address_space *mapping = NULL; in split_huge_page_to_list() local 2579 mapping = NULL; in split_huge_page_to_list() 2582 mapping = head->mapping; in split_huge_page_to_list() [all …]
|
D | swap.c | 929 struct address_space *mapping, in pagevec_lookup_entries() argument 933 pvec->nr = find_get_entries(mapping, start, nr_pages, in pagevec_lookup_entries() 981 struct address_space *mapping, pgoff_t *start, pgoff_t end) in pagevec_lookup_range() argument 983 pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE, in pagevec_lookup_range() 990 struct address_space *mapping, pgoff_t *index, pgoff_t end, in pagevec_lookup_range_tag() argument 993 pvec->nr = find_get_pages_range_tag(mapping, index, end, tag, in pagevec_lookup_range_tag() 1000 struct address_space *mapping, pgoff_t *index, pgoff_t end, in pagevec_lookup_range_nr_tag() argument 1003 pvec->nr = find_get_pages_range_tag(mapping, index, end, tag, in pagevec_lookup_range_nr_tag()
|
D | compaction.c | 98 struct address_space *mapping; in PageMovable() local 104 mapping = page_mapping(page); in PageMovable() 105 if (mapping && mapping->a_ops && mapping->a_ops->isolate_page) in PageMovable() 112 void __SetPageMovable(struct page *page, struct address_space *mapping) in __SetPageMovable() argument 115 VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page); in __SetPageMovable() 116 page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE); in __SetPageMovable() 129 page->mapping = (void *)((unsigned long)page->mapping & in __ClearPageMovable()
|
D | internal.h | 56 extern int __do_page_cache_readahead(struct address_space *mapping, 64 struct address_space *mapping, struct file *filp) in ra_submit() argument 66 return __do_page_cache_readahead(mapping, filp, in ra_submit()
|
D | swapfile.c | 2326 struct address_space *mapping = swap_file->f_mapping; in destroy_swap_extents() local 2329 mapping->a_ops->swap_deactivate(swap_file); in destroy_swap_extents() 2413 struct address_space *mapping = swap_file->f_mapping; in setup_swap_extents() local 2414 struct inode *inode = mapping->host; in setup_swap_extents() 2423 if (mapping->a_ops->swap_activate) { in setup_swap_extents() 2424 ret = mapping->a_ops->swap_activate(sis, swap_file, span); in setup_swap_extents() 2534 struct address_space *mapping; in SYSCALL_DEFINE1() local 2554 mapping = victim->f_mapping; in SYSCALL_DEFINE1() 2558 if (p->swap_file->f_mapping == mapping) { in SYSCALL_DEFINE1() 2661 inode = mapping->host; in SYSCALL_DEFINE1() [all …]
|
D | userfaultfd.c | 194 struct address_space *mapping; in __mcopy_atomic_hugetlb() local 274 mapping = dst_vma->vm_file->f_mapping; in __mcopy_atomic_hugetlb() 275 hash = hugetlb_fault_mutex_hash(h, mapping, idx, dst_addr); in __mcopy_atomic_hugetlb()
|