/mm/ |
D | truncate.c | 26 static void clear_exceptional_entry(struct address_space *mapping, in clear_exceptional_entry() argument 33 if (shmem_mapping(mapping)) in clear_exceptional_entry() 36 spin_lock_irq(&mapping->tree_lock); in clear_exceptional_entry() 42 if (!__radix_tree_lookup(&mapping->page_tree, index, &node, &slot)) in clear_exceptional_entry() 47 mapping->nrshadows--; in clear_exceptional_entry() 61 __radix_tree_delete_node(&mapping->page_tree, node); in clear_exceptional_entry() 63 spin_unlock_irq(&mapping->tree_lock); in clear_exceptional_entry() 86 invalidatepage = page->mapping->a_ops->invalidatepage; in do_invalidatepage() 112 struct address_space *mapping = page->mapping; in cancel_dirty_page() local 113 if (mapping && mapping_cap_account_dirty(mapping)) { in cancel_dirty_page() [all …]
|
D | filemap.c | 112 static void page_cache_tree_delete(struct address_space *mapping, in page_cache_tree_delete() argument 123 __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot); in page_cache_tree_delete() 126 mapping->nrshadows++; in page_cache_tree_delete() 135 mapping->nrpages--; in page_cache_tree_delete() 139 mapping->page_tree.gfp_mask &= __GFP_BITS_MASK; in page_cache_tree_delete() 149 radix_tree_tag_clear(&mapping->page_tree, index, tag); in page_cache_tree_delete() 158 if (__radix_tree_delete_node(&mapping->page_tree, node)) in page_cache_tree_delete() 170 node->private_data = mapping; in page_cache_tree_delete() 182 struct address_space *mapping = page->mapping; in __delete_from_page_cache() local 193 cleancache_invalidate_page(mapping, page); in __delete_from_page_cache() [all …]
|
D | readahead.c | 28 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) in file_ra_state_init() argument 30 ra->ra_pages = mapping->backing_dev_info->ra_pages; in file_ra_state_init() 44 static void read_cache_pages_invalidate_page(struct address_space *mapping, in read_cache_pages_invalidate_page() argument 50 page->mapping = mapping; in read_cache_pages_invalidate_page() 52 page->mapping = NULL; in read_cache_pages_invalidate_page() 61 static void read_cache_pages_invalidate_pages(struct address_space *mapping, in read_cache_pages_invalidate_pages() argument 69 read_cache_pages_invalidate_page(mapping, victim); in read_cache_pages_invalidate_pages() 83 int read_cache_pages(struct address_space *mapping, struct list_head *pages, in read_cache_pages() argument 92 if (add_to_page_cache_lru(page, mapping, in read_cache_pages() 94 read_cache_pages_invalidate_page(mapping, page); in read_cache_pages() [all …]
|
D | filemap_xip.c | 52 do_xip_mapping_read(struct address_space *mapping, in do_xip_mapping_read() argument 59 struct inode *inode = mapping->host; in do_xip_mapping_read() 65 BUG_ON(!mapping->a_ops->get_xip_mem); in do_xip_mapping_read() 96 error = mapping->a_ops->get_xip_mem(mapping, index, 0, in do_xip_mapping_read() 110 if (mapping_writably_mapped(mapping)) in do_xip_mapping_read() 165 __xip_unmap (struct address_space * mapping, in __xip_unmap() argument 185 mutex_lock(&mapping->i_mmap_mutex); in __xip_unmap() 186 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { in __xip_unmap() 205 mutex_unlock(&mapping->i_mmap_mutex); in __xip_unmap() 225 struct address_space *mapping = file->f_mapping; in xip_file_fault() local [all …]
|
D | page-writeback.c | 1341 static void balance_dirty_pages(struct address_space *mapping, in balance_dirty_pages() argument 1357 struct backing_dev_info *bdi = mapping->backing_dev_info; in balance_dirty_pages() 1578 void balance_dirty_pages_ratelimited(struct address_space *mapping) in balance_dirty_pages_ratelimited() argument 1580 struct backing_dev_info *bdi = mapping->backing_dev_info; in balance_dirty_pages_ratelimited() 1620 balance_dirty_pages(mapping, current->nr_dirtied); in balance_dirty_pages_ratelimited() 1793 void tag_pages_for_writeback(struct address_space *mapping, in tag_pages_for_writeback() argument 1800 spin_lock_irq(&mapping->tree_lock); in tag_pages_for_writeback() 1801 tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree, in tag_pages_for_writeback() 1804 spin_unlock_irq(&mapping->tree_lock); in tag_pages_for_writeback() 1834 int write_cache_pages(struct address_space *mapping, in write_cache_pages() argument [all …]
|
D | migrate.c | 196 struct address_space *mapping, void *arg) in remove_linear_migration_ptes_from_nonlinear() argument 204 &mapping->i_mmap_nonlinear, shared.nonlinear) { in remove_linear_migration_ptes_from_nonlinear() 341 int migrate_page_move_mapping(struct address_space *mapping, in migrate_page_move_mapping() argument 349 if (!mapping) { in migrate_page_move_mapping() 356 spin_lock_irq(&mapping->tree_lock); in migrate_page_move_mapping() 358 pslot = radix_tree_lookup_slot(&mapping->page_tree, in migrate_page_move_mapping() 363 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { in migrate_page_move_mapping() 364 spin_unlock_irq(&mapping->tree_lock); in migrate_page_move_mapping() 369 spin_unlock_irq(&mapping->tree_lock); in migrate_page_move_mapping() 383 spin_unlock_irq(&mapping->tree_lock); in migrate_page_move_mapping() [all …]
|
D | shmem.c | 258 static int shmem_radix_tree_replace(struct address_space *mapping, in shmem_radix_tree_replace() argument 266 pslot = radix_tree_lookup_slot(&mapping->page_tree, index); in shmem_radix_tree_replace() 269 item = radix_tree_deref_slot_protected(pslot, &mapping->tree_lock); in shmem_radix_tree_replace() 283 static bool shmem_confirm_swap(struct address_space *mapping, in shmem_confirm_swap() argument 289 item = radix_tree_lookup(&mapping->page_tree, index); in shmem_confirm_swap() 298 struct address_space *mapping, in shmem_add_to_page_cache() argument 307 page->mapping = mapping; in shmem_add_to_page_cache() 310 spin_lock_irq(&mapping->tree_lock); in shmem_add_to_page_cache() 312 error = radix_tree_insert(&mapping->page_tree, index, page); in shmem_add_to_page_cache() 314 error = shmem_radix_tree_replace(mapping, index, expected, in shmem_add_to_page_cache() [all …]
|
D | fadvise.c | 31 struct address_space *mapping; in SYSCALL_DEFINE4() local 47 mapping = f.file->f_mapping; in SYSCALL_DEFINE4() 48 if (!mapping || len < 0) { in SYSCALL_DEFINE4() 53 if (mapping->a_ops->get_xip_mem) { in SYSCALL_DEFINE4() 76 bdi = mapping->backing_dev_info; in SYSCALL_DEFINE4() 110 force_page_cache_readahead(mapping, f.file, start_index, in SYSCALL_DEFINE4() 116 if (!bdi_write_congested(mapping->backing_dev_info)) in SYSCALL_DEFINE4() 117 __filemap_fdatawrite_range(mapping, offset, endbyte, in SYSCALL_DEFINE4() 125 unsigned long count = invalidate_mapping_pages(mapping, in SYSCALL_DEFINE4() 136 invalidate_mapping_pages(mapping, start_index, in SYSCALL_DEFINE4()
|
D | memory-failure.c | 81 struct address_space *mapping; in hwpoison_filter_dev() local 94 mapping = page_mapping(p); in hwpoison_filter_dev() 95 if (mapping == NULL || mapping->host == NULL) in hwpoison_filter_dev() 98 dev = mapping->host->i_sb->s_dev; in hwpoison_filter_dev() 467 struct address_space *mapping = page->mapping; in collect_procs_file() local 469 mutex_lock(&mapping->i_mmap_mutex); in collect_procs_file() 477 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, in collect_procs_file() 491 mutex_unlock(&mapping->i_mmap_mutex); in collect_procs_file() 505 if (!page->mapping) in collect_procs() 593 struct address_space *mapping; in me_pagecache_clean() local [all …]
|
D | cleancache.c | 241 fake_pool_id = page->mapping->host->i_sb->cleancache_poolid; in __cleancache_get_page() 246 if (cleancache_get_key(page->mapping->host, &key) < 0) in __cleancache_get_page() 283 fake_pool_id = page->mapping->host->i_sb->cleancache_poolid; in __cleancache_put_page() 290 cleancache_get_key(page->mapping->host, &key) >= 0) { in __cleancache_put_page() 305 void __cleancache_invalidate_page(struct address_space *mapping, in __cleancache_invalidate_page() argument 310 int fake_pool_id = mapping->host->i_sb->cleancache_poolid; in __cleancache_invalidate_page() 322 if (cleancache_get_key(mapping->host, &key) >= 0) { in __cleancache_invalidate_page() 340 void __cleancache_invalidate_inode(struct address_space *mapping) in __cleancache_invalidate_inode() argument 343 int fake_pool_id = mapping->host->i_sb->cleancache_poolid; in __cleancache_invalidate_inode() 354 if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0) in __cleancache_invalidate_inode()
|
D | fremap.c | 146 struct address_space *mapping; in SYSCALL_DEFINE5() local 219 mapping = vma->vm_file->f_mapping; in SYSCALL_DEFINE5() 225 if (mapping_cap_account_dirty(mapping)) { in SYSCALL_DEFINE5() 241 mutex_lock(&mapping->i_mmap_mutex); in SYSCALL_DEFINE5() 242 flush_dcache_mmap_lock(mapping); in SYSCALL_DEFINE5() 244 vma_interval_tree_remove(vma, &mapping->i_mmap); in SYSCALL_DEFINE5() 245 vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear); in SYSCALL_DEFINE5() 246 flush_dcache_mmap_unlock(mapping); in SYSCALL_DEFINE5() 247 mutex_unlock(&mapping->i_mmap_mutex); in SYSCALL_DEFINE5()
|
D | workingset.c | 213 void *workingset_eviction(struct address_space *mapping, struct page *page) in workingset_eviction() argument 308 struct address_space *mapping; in shadow_lru_isolate() local 326 mapping = node->private_data; in shadow_lru_isolate() 329 if (!spin_trylock(&mapping->tree_lock)) { in shadow_lru_isolate() 353 BUG_ON(!mapping->nrshadows); in shadow_lru_isolate() 354 mapping->nrshadows--; in shadow_lru_isolate() 359 if (!__radix_tree_delete_node(&mapping->page_tree, node)) in shadow_lru_isolate() 362 spin_unlock(&mapping->tree_lock); in shadow_lru_isolate()
|
D | vmscan.c | 481 static void handle_write_error(struct address_space *mapping, in handle_write_error() argument 485 if (page_mapping(page) == mapping) in handle_write_error() 486 mapping_set_error(mapping, error); in handle_write_error() 506 static pageout_t pageout(struct page *page, struct address_space *mapping, in pageout() argument 527 if (!mapping) { in pageout() 541 if (mapping->a_ops->writepage == NULL) in pageout() 543 if (!may_write_to_queue(mapping->backing_dev_info, sc)) in pageout() 557 res = mapping->a_ops->writepage(page, &wbc); in pageout() 559 handle_write_error(mapping, page, res); in pageout() 581 static int __remove_mapping(struct address_space *mapping, struct page *page, in __remove_mapping() argument [all …]
|
D | rmap.c | 459 anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping); in page_get_anon_vma() 503 anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping); in page_lock_anon_vma_read() 600 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { in page_address_in_vma() 602 vma->vm_file->f_mapping != page->mapping) in page_address_in_vma() 910 struct address_space *mapping; in page_mkclean() local 922 mapping = page_mapping(page); in page_mkclean() 923 if (!mapping) in page_mkclean() 953 page->mapping = (struct address_space *) anon_vma; in page_move_anon_rmap() 982 page->mapping = (struct address_space *) anon_vma; in __page_set_anon_rmap() 1457 struct address_space *mapping, void *arg) in try_to_unmap_nonlinear() argument [all …]
|
D | mmap.c | 250 struct file *file, struct address_space *mapping) in __remove_shared_vm_struct() argument 255 mapping_unmap_writable(mapping); in __remove_shared_vm_struct() 257 flush_dcache_mmap_lock(mapping); in __remove_shared_vm_struct() 261 vma_interval_tree_remove(vma, &mapping->i_mmap); in __remove_shared_vm_struct() 262 flush_dcache_mmap_unlock(mapping); in __remove_shared_vm_struct() 274 struct address_space *mapping = file->f_mapping; in unlink_file_vma() local 275 mutex_lock(&mapping->i_mmap_mutex); in unlink_file_vma() 276 __remove_shared_vm_struct(vma, file, mapping); in unlink_file_vma() 277 mutex_unlock(&mapping->i_mmap_mutex); in unlink_file_vma() 674 struct address_space *mapping = file->f_mapping; in __vma_link_file() local [all …]
|
D | page_io.c | 140 struct address_space *mapping = swap_file->f_mapping; in generic_swapfile_activate() local 141 struct inode *inode = mapping->host; in generic_swapfile_activate() 266 struct address_space *mapping = swap_file->f_mapping; in __swap_writepage() local 286 ret = mapping->a_ops->direct_IO(ITER_BVEC | WRITE, in __swap_writepage() 352 struct address_space *mapping = swap_file->f_mapping; in swap_readpage() local 354 ret = mapping->a_ops->readpage(swap_file, page); in swap_readpage() 384 struct address_space *mapping = sis->swap_file->f_mapping; in swap_set_page_dirty() local 385 return mapping->a_ops->set_page_dirty(page); in swap_set_page_dirty()
|
D | nommu.c | 709 struct address_space *mapping; in add_vma_to_mm() local 723 mapping = vma->vm_file->f_mapping; in add_vma_to_mm() 725 mutex_lock(&mapping->i_mmap_mutex); in add_vma_to_mm() 726 flush_dcache_mmap_lock(mapping); in add_vma_to_mm() 727 vma_interval_tree_insert(vma, &mapping->i_mmap); in add_vma_to_mm() 728 flush_dcache_mmap_unlock(mapping); in add_vma_to_mm() 729 mutex_unlock(&mapping->i_mmap_mutex); in add_vma_to_mm() 777 struct address_space *mapping; in delete_vma_from_mm() local 796 mapping = vma->vm_file->f_mapping; in delete_vma_from_mm() 798 mutex_lock(&mapping->i_mmap_mutex); in delete_vma_from_mm() [all …]
|
D | util.c | 298 struct address_space *mapping = page->mapping; in page_mapping() local 308 mapping = swap_address_space(entry); in page_mapping() 309 } else if ((unsigned long)mapping & PAGE_MAPPING_ANON) in page_mapping() 310 mapping = NULL; in page_mapping() 311 return mapping; in page_mapping()
|
D | mincore.c | 62 static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff) in mincore_page() argument 74 if (shmem_mapping(mapping)) { in mincore_page() 75 page = find_get_entry(mapping, pgoff); in mincore_page() 85 page = find_get_page(mapping, pgoff); in mincore_page() 87 page = find_get_page(mapping, pgoff); in mincore_page()
|
D | hugetlb.c | 439 struct address_space *mapping = vma->vm_file->f_mapping; in vma_resv_map() local 440 struct inode *inode = mapping->host; in vma_resv_map() 896 page->mapping = NULL; in free_huge_page() 2790 struct address_space *mapping; in unmap_ref_private() local 2800 mapping = file_inode(vma->vm_file)->i_mapping; in unmap_ref_private() 2807 mutex_lock(&mapping->i_mmap_mutex); in unmap_ref_private() 2808 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { in unmap_ref_private() 2832 mutex_unlock(&mapping->i_mmap_mutex); in unmap_ref_private() 2965 struct address_space *mapping; in hugetlbfs_pagecache_page() local 2968 mapping = vma->vm_file->f_mapping; in hugetlbfs_pagecache_page() [all …]
|
D | memory.c | 650 struct address_space *mapping; in print_bad_pte() local 676 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; in print_bad_pte() 687 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); in print_bad_pte() 1107 details->check_mapping != page->mapping) in zap_pte_range() 1998 if (!page->mapping) { in do_page_mkwrite() 2140 struct address_space *mapping; in do_wp_page() local 2146 mapping = dirty_page->mapping; in do_wp_page() 2149 if (dirtied && mapping) { in do_wp_page() 2154 balance_dirty_pages_ratelimited(mapping); in do_wp_page() 2163 struct address_space *mapping = dirty_page->mapping; in do_wp_page() local [all …]
|
D | mremap.c | 95 struct address_space *mapping = NULL; in move_ptes() local 121 mapping = vma->vm_file->f_mapping; in move_ptes() 122 mutex_lock(&mapping->i_mmap_mutex); in move_ptes() 158 if (mapping) in move_ptes() 159 mutex_unlock(&mapping->i_mmap_mutex); in move_ptes()
|
D | swap.c | 1068 struct address_space *mapping, in pagevec_lookup_entries() argument 1072 pvec->nr = find_get_entries(mapping, start, nr_pages, in pagevec_lookup_entries() 1114 unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, in pagevec_lookup() argument 1117 pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); in pagevec_lookup() 1122 unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, in pagevec_lookup_tag() argument 1125 pvec->nr = find_get_pages_tag(mapping, index, tag, in pagevec_lookup_tag()
|
D | internal.h | 25 extern int __do_page_cache_readahead(struct address_space *mapping, 33 struct address_space *mapping, struct file *filp) in ra_submit() argument 35 return __do_page_cache_readahead(mapping, filp, in ra_submit()
|
D | swapfile.c | 1675 struct address_space *mapping = swap_file->f_mapping; in destroy_swap_extents() local 1678 mapping->a_ops->swap_deactivate(swap_file); in destroy_swap_extents() 1762 struct address_space *mapping = swap_file->f_mapping; in setup_swap_extents() local 1763 struct inode *inode = mapping->host; in setup_swap_extents() 1772 if (mapping->a_ops->swap_activate) { in setup_swap_extents() 1773 ret = mapping->a_ops->swap_activate(sis, swap_file, span); in setup_swap_extents() 1851 struct address_space *mapping; in SYSCALL_DEFINE1() local 1871 mapping = victim->f_mapping; in SYSCALL_DEFINE1() 1875 if (p->swap_file->f_mapping == mapping) { in SYSCALL_DEFINE1() 1967 inode = mapping->host; in SYSCALL_DEFINE1() [all …]
|