Home
last modified time | relevance | path

Searched refs:mapping (Results 1 – 25 of 34) sorted by relevance

12

/mm/
Dtruncate.c26 static void clear_exceptional_entry(struct address_space *mapping, in clear_exceptional_entry() argument
33 if (shmem_mapping(mapping)) in clear_exceptional_entry()
36 spin_lock_irq(&mapping->tree_lock); in clear_exceptional_entry()
42 if (!__radix_tree_lookup(&mapping->page_tree, index, &node, &slot)) in clear_exceptional_entry()
47 mapping->nrshadows--; in clear_exceptional_entry()
61 __radix_tree_delete_node(&mapping->page_tree, node); in clear_exceptional_entry()
63 spin_unlock_irq(&mapping->tree_lock); in clear_exceptional_entry()
86 invalidatepage = page->mapping->a_ops->invalidatepage; in do_invalidatepage()
106 truncate_complete_page(struct address_space *mapping, struct page *page) in truncate_complete_page() argument
108 if (page->mapping != mapping) in truncate_complete_page()
[all …]
Dfilemap.c112 static int page_cache_tree_insert(struct address_space *mapping, in page_cache_tree_insert() argument
119 error = __radix_tree_create(&mapping->page_tree, page->index, in page_cache_tree_insert()
126 p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock); in page_cache_tree_insert()
131 mapping->nrshadows--; in page_cache_tree_insert()
136 mapping->nrpages++; in page_cache_tree_insert()
154 static void page_cache_tree_delete(struct address_space *mapping, in page_cache_tree_delete() argument
165 __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot); in page_cache_tree_delete()
176 mapping->nrshadows++; in page_cache_tree_delete()
185 mapping->nrpages--; in page_cache_tree_delete()
189 mapping->page_tree.gfp_mask &= __GFP_BITS_MASK; in page_cache_tree_delete()
[all …]
Dreadahead.c28 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) in file_ra_state_init() argument
30 ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; in file_ra_state_init()
44 static void read_cache_pages_invalidate_page(struct address_space *mapping, in read_cache_pages_invalidate_page() argument
50 page->mapping = mapping; in read_cache_pages_invalidate_page()
52 page->mapping = NULL; in read_cache_pages_invalidate_page()
61 static void read_cache_pages_invalidate_pages(struct address_space *mapping, in read_cache_pages_invalidate_pages() argument
69 read_cache_pages_invalidate_page(mapping, victim); in read_cache_pages_invalidate_pages()
83 int read_cache_pages(struct address_space *mapping, struct list_head *pages, in read_cache_pages() argument
92 if (add_to_page_cache_lru(page, mapping, page->index, in read_cache_pages()
93 mapping_gfp_constraint(mapping, GFP_KERNEL))) { in read_cache_pages()
[all …]
Dpage-writeback.c1523 static void balance_dirty_pages(struct address_space *mapping, in balance_dirty_pages() argument
1824 void balance_dirty_pages_ratelimited(struct address_space *mapping) in balance_dirty_pages_ratelimited() argument
1826 struct inode *inode = mapping->host; in balance_dirty_pages_ratelimited()
1873 balance_dirty_pages(mapping, wb, current->nr_dirtied); in balance_dirty_pages_ratelimited()
2107 void tag_pages_for_writeback(struct address_space *mapping, in tag_pages_for_writeback() argument
2114 spin_lock_irq(&mapping->tree_lock); in tag_pages_for_writeback()
2115 tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree, in tag_pages_for_writeback()
2118 spin_unlock_irq(&mapping->tree_lock); in tag_pages_for_writeback()
2155 int write_cache_pages(struct address_space *mapping, in write_cache_pages() argument
2173 writeback_index = mapping->writeback_index; /* prev offset */ in write_cache_pages()
[all …]
Dmigrate.c313 int migrate_page_move_mapping(struct address_space *mapping, in migrate_page_move_mapping() argument
323 if (!mapping) { in migrate_page_move_mapping()
331 newpage->mapping = page->mapping; in migrate_page_move_mapping()
341 spin_lock_irq(&mapping->tree_lock); in migrate_page_move_mapping()
343 pslot = radix_tree_lookup_slot(&mapping->page_tree, in migrate_page_move_mapping()
348 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { in migrate_page_move_mapping()
349 spin_unlock_irq(&mapping->tree_lock); in migrate_page_move_mapping()
354 spin_unlock_irq(&mapping->tree_lock); in migrate_page_move_mapping()
368 spin_unlock_irq(&mapping->tree_lock); in migrate_page_move_mapping()
378 newpage->mapping = page->mapping; in migrate_page_move_mapping()
[all …]
Dshmem.c255 static int shmem_radix_tree_replace(struct address_space *mapping, in shmem_radix_tree_replace() argument
263 pslot = radix_tree_lookup_slot(&mapping->page_tree, index); in shmem_radix_tree_replace()
266 item = radix_tree_deref_slot_protected(pslot, &mapping->tree_lock); in shmem_radix_tree_replace()
280 static bool shmem_confirm_swap(struct address_space *mapping, in shmem_confirm_swap() argument
286 item = radix_tree_lookup(&mapping->page_tree, index); in shmem_confirm_swap()
295 struct address_space *mapping, in shmem_add_to_page_cache() argument
304 page->mapping = mapping; in shmem_add_to_page_cache()
307 spin_lock_irq(&mapping->tree_lock); in shmem_add_to_page_cache()
309 error = radix_tree_insert(&mapping->page_tree, index, page); in shmem_add_to_page_cache()
311 error = shmem_radix_tree_replace(mapping, index, expected, in shmem_add_to_page_cache()
[all …]
Dutil.c329 unsigned long mapping; in __page_rmapping() local
331 mapping = (unsigned long)page->mapping; in __page_rmapping()
332 mapping &= ~PAGE_MAPPING_FLAGS; in __page_rmapping()
334 return (void *)mapping; in __page_rmapping()
346 unsigned long mapping; in page_anon_vma() local
349 mapping = (unsigned long)page->mapping; in page_anon_vma()
350 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) in page_anon_vma()
357 unsigned long mapping; in page_mapping() local
370 mapping = (unsigned long)page->mapping; in page_mapping()
371 if (mapping & PAGE_MAPPING_FLAGS) in page_mapping()
[all …]
Dfadvise.c32 struct address_space *mapping; in SYSCALL_DEFINE4() local
49 mapping = f.file->f_mapping; in SYSCALL_DEFINE4()
50 if (!mapping || len < 0) { in SYSCALL_DEFINE4()
82 bdi = inode_to_bdi(mapping->host); in SYSCALL_DEFINE4()
116 force_page_cache_readahead(mapping, f.file, start_index, in SYSCALL_DEFINE4()
122 if (!inode_write_congested(mapping->host)) in SYSCALL_DEFINE4()
123 __filemap_fdatawrite_range(mapping, offset, endbyte, in SYSCALL_DEFINE4()
135 unsigned long count = invalidate_mapping_pages(mapping, in SYSCALL_DEFINE4()
146 invalidate_mapping_pages(mapping, start_index, in SYSCALL_DEFINE4()
Dcleancache.c189 pool_id = page->mapping->host->i_sb->cleancache_poolid; in __cleancache_get_page()
193 if (cleancache_get_key(page->mapping->host, &key) < 0) in __cleancache_get_page()
227 pool_id = page->mapping->host->i_sb->cleancache_poolid; in __cleancache_put_page()
229 cleancache_get_key(page->mapping->host, &key) >= 0) { in __cleancache_put_page()
244 void __cleancache_invalidate_page(struct address_space *mapping, in __cleancache_invalidate_page() argument
248 int pool_id = mapping->host->i_sb->cleancache_poolid; in __cleancache_invalidate_page()
256 if (cleancache_get_key(mapping->host, &key) >= 0) { in __cleancache_invalidate_page()
274 void __cleancache_invalidate_inode(struct address_space *mapping) in __cleancache_invalidate_inode() argument
276 int pool_id = mapping->host->i_sb->cleancache_poolid; in __cleancache_invalidate_inode()
282 if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0) in __cleancache_invalidate_inode()
Dworkingset.c213 void *workingset_eviction(struct address_space *mapping, struct page *page) in workingset_eviction() argument
309 struct address_space *mapping; in shadow_lru_isolate() local
327 mapping = node->private_data; in shadow_lru_isolate()
330 if (!spin_trylock(&mapping->tree_lock)) { in shadow_lru_isolate()
352 BUG_ON(!mapping->nrshadows); in shadow_lru_isolate()
353 mapping->nrshadows--; in shadow_lru_isolate()
358 if (!__radix_tree_delete_node(&mapping->page_tree, node)) in shadow_lru_isolate()
361 spin_unlock(&mapping->tree_lock); in shadow_lru_isolate()
Dmemory-failure.c84 struct address_space *mapping; in hwpoison_filter_dev() local
97 mapping = page_mapping(p); in hwpoison_filter_dev()
98 if (mapping == NULL || mapping->host == NULL) in hwpoison_filter_dev()
101 dev = mapping->host->i_sb->s_dev; in hwpoison_filter_dev()
446 struct address_space *mapping = page->mapping; in collect_procs_file() local
448 i_mmap_lock_read(mapping); in collect_procs_file()
456 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, in collect_procs_file()
470 i_mmap_unlock_read(mapping); in collect_procs_file()
484 if (!page->mapping) in collect_procs()
584 struct address_space *mapping; in me_pagecache_clean() local
[all …]
Dvmscan.c520 static void handle_write_error(struct address_space *mapping, in handle_write_error() argument
524 if (page_mapping(page) == mapping) in handle_write_error()
525 mapping_set_error(mapping, error); in handle_write_error()
545 static pageout_t pageout(struct page *page, struct address_space *mapping, in pageout() argument
566 if (!mapping) { in pageout()
580 if (mapping->a_ops->writepage == NULL) in pageout()
582 if (!may_write_to_inode(mapping->host, sc)) in pageout()
596 res = mapping->a_ops->writepage(page, &wbc); in pageout()
598 handle_write_error(mapping, page, res); in pageout()
620 static int __remove_mapping(struct address_space *mapping, struct page *page, in __remove_mapping() argument
[all …]
Dpage_io.c135 struct address_space *mapping = swap_file->f_mapping; in generic_swapfile_activate() local
136 struct inode *inode = mapping->host; in generic_swapfile_activate()
256 struct address_space *mapping = swap_file->f_mapping; in __swap_writepage() local
270 ret = mapping->a_ops->direct_IO(&kiocb, &from, kiocb.ki_pos); in __swap_writepage()
335 struct address_space *mapping = swap_file->f_mapping; in swap_readpage() local
337 ret = mapping->a_ops->readpage(swap_file, page); in swap_readpage()
367 struct address_space *mapping = sis->swap_file->f_mapping; in swap_set_page_dirty() local
368 return mapping->a_ops->set_page_dirty(page); in swap_set_page_dirty()
Dmmap.c252 struct file *file, struct address_space *mapping) in __remove_shared_vm_struct() argument
257 mapping_unmap_writable(mapping); in __remove_shared_vm_struct()
259 flush_dcache_mmap_lock(mapping); in __remove_shared_vm_struct()
260 vma_interval_tree_remove(vma, &mapping->i_mmap); in __remove_shared_vm_struct()
261 flush_dcache_mmap_unlock(mapping); in __remove_shared_vm_struct()
273 struct address_space *mapping = file->f_mapping; in unlink_file_vma() local
274 i_mmap_lock_write(mapping); in unlink_file_vma()
275 __remove_shared_vm_struct(vma, file, mapping); in unlink_file_vma()
276 i_mmap_unlock_write(mapping); in unlink_file_vma()
673 struct address_space *mapping = file->f_mapping; in __vma_link_file() local
[all …]
Drmap.c464 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_get_anon_vma()
508 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_lock_anon_vma_read()
728 } else if (page->mapping) { in page_address_in_vma()
729 if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping) in page_address_in_vma()
1043 struct address_space *mapping; in page_mkclean() local
1055 mapping = page_mapping(page); in page_mkclean()
1056 if (!mapping) in page_mkclean()
1091 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); in page_move_anon_rmap()
1120 page->mapping = (struct address_space *) anon_vma; in __page_set_anon_rmap()
1703 struct address_space *mapping = page->mapping; in rmap_walk_file() local
[all …]
Dmincore.c49 static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff) in mincore_page() argument
61 if (shmem_mapping(mapping)) { in mincore_page()
62 page = find_get_entry(mapping, pgoff); in mincore_page()
72 page = find_get_page(mapping, pgoff); in mincore_page()
74 page = find_get_page(mapping, pgoff); in mincore_page()
Dhugetlb.c766 struct address_space *mapping = vma->vm_file->f_mapping; in vma_resv_map() local
767 struct inode *inode = mapping->host; in vma_resv_map()
1235 page->mapping = NULL; in free_huge_page()
3425 struct address_space *mapping; in unmap_ref_private() local
3435 mapping = file_inode(vma->vm_file)->i_mapping; in unmap_ref_private()
3442 i_mmap_lock_write(mapping); in unmap_ref_private()
3443 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { in unmap_ref_private()
3467 i_mmap_unlock_write(mapping); in unmap_ref_private()
3601 struct address_space *mapping; in hugetlbfs_pagecache_page() local
3604 mapping = vma->vm_file->f_mapping; in hugetlbfs_pagecache_page()
[all …]
Dnommu.c716 struct address_space *mapping; in add_vma_to_mm() local
728 mapping = vma->vm_file->f_mapping; in add_vma_to_mm()
730 i_mmap_lock_write(mapping); in add_vma_to_mm()
731 flush_dcache_mmap_lock(mapping); in add_vma_to_mm()
732 vma_interval_tree_insert(vma, &mapping->i_mmap); in add_vma_to_mm()
733 flush_dcache_mmap_unlock(mapping); in add_vma_to_mm()
734 i_mmap_unlock_write(mapping); in add_vma_to_mm()
782 struct address_space *mapping; in delete_vma_from_mm() local
799 mapping = vma->vm_file->f_mapping; in delete_vma_from_mm()
801 i_mmap_lock_write(mapping); in delete_vma_from_mm()
[all …]
Dmemory.c644 struct address_space *mapping; in print_bad_pte() local
670 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; in print_bad_pte()
681 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); in print_bad_pte()
689 mapping ? mapping->a_ops->readpage : NULL); in print_bad_pte()
1140 details->check_mapping != page->mapping) in zap_pte_range()
2062 if (!page->mapping) { in do_page_mkwrite()
2104 struct address_space *mapping; in wp_page_reuse() local
2112 mapping = page->mapping; in wp_page_reuse()
2116 if ((dirtied || page_mkwrite) && mapping) { in wp_page_reuse()
2121 balance_dirty_pages_ratelimited(mapping); in wp_page_reuse()
[all …]
Dswap.c1079 struct address_space *mapping, in pagevec_lookup_entries() argument
1083 pvec->nr = find_get_entries(mapping, start, nr_pages, in pagevec_lookup_entries()
1125 unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, in pagevec_lookup() argument
1128 pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); in pagevec_lookup()
1133 unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, in pagevec_lookup_tag() argument
1136 pvec->nr = find_get_pages_tag(mapping, index, tag, in pagevec_lookup_tag()
Dmremap.c94 struct address_space *mapping = NULL; in move_ptes() local
122 mapping = vma->vm_file->f_mapping; in move_ptes()
123 i_mmap_lock_write(mapping); in move_ptes()
174 if (mapping) in move_ptes()
175 i_mmap_unlock_write(mapping); in move_ptes()
Dinternal.h45 extern int __do_page_cache_readahead(struct address_space *mapping,
53 struct address_space *mapping, struct file *filp) in ra_submit() argument
55 return __do_page_cache_readahead(mapping, filp, in ra_submit()
Dswapfile.c1675 struct address_space *mapping = swap_file->f_mapping; in destroy_swap_extents() local
1678 mapping->a_ops->swap_deactivate(swap_file); in destroy_swap_extents()
1762 struct address_space *mapping = swap_file->f_mapping; in setup_swap_extents() local
1763 struct inode *inode = mapping->host; in setup_swap_extents()
1772 if (mapping->a_ops->swap_activate) { in setup_swap_extents()
1773 ret = mapping->a_ops->swap_activate(sis, swap_file, span); in setup_swap_extents()
1851 struct address_space *mapping; in SYSCALL_DEFINE1() local
1871 mapping = victim->f_mapping; in SYSCALL_DEFINE1()
1875 if (p->swap_file->f_mapping == mapping) { in SYSCALL_DEFINE1()
1967 inode = mapping->host; in SYSCALL_DEFINE1()
[all …]
DKconfig161 such as direct mapping pages cannot be migrated. So the corresponding
575 compressed RAM pages. zsmalloc uses virtual memory mapping
582 bool "Use page table mapping to access object in zsmalloc"
585 By default, zsmalloc uses a copy-based object mapping method to
587 architecture (ex, ARM) performs VM mapping faster than copying,
589 mapping rather than copying for object mapping.
666 mapping in an O_DIRECT operation, among other things.
Dmadvise.c191 struct address_space *mapping) in force_shm_swapin_readahead() argument
200 page = find_get_entry(mapping, index); in force_shm_swapin_readahead()

12