/mm/ |
D | pagewalk.c | 12 const struct mm_walk_ops *ops = walk->ops; in walk_pte_range() local 16 err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk); in walk_pte_range() 34 const struct mm_walk_ops *ops = walk->ops; in walk_pmd_range() local 42 if (ops->pte_hole) in walk_pmd_range() 43 err = ops->pte_hole(addr, next, walk); in walk_pmd_range() 52 if (ops->pmd_entry) in walk_pmd_range() 53 err = ops->pmd_entry(pmd, addr, next, walk); in walk_pmd_range() 61 if (!ops->pte_entry) in walk_pmd_range() 80 const struct mm_walk_ops *ops = walk->ops; in walk_pud_range() local 88 if (ops->pte_hole) in walk_pud_range() [all …]
|
D | frontswap.c | 32 #define for_each_frontswap_ops(ops) \ argument 33 for ((ops) = frontswap_ops; (ops); (ops) = (ops)->next) 113 void frontswap_register_ops(struct frontswap_ops *ops) in frontswap_register_ops() argument 132 ops->init(i); in frontswap_register_ops() 140 ops->next = frontswap_ops; in frontswap_register_ops() 141 } while (cmpxchg(&frontswap_ops, ops->next, ops) != ops->next); in frontswap_register_ops() 161 ops->init(i); in frontswap_register_ops() 163 ops->invalidate_area(i); in frontswap_register_ops() 193 struct frontswap_ops *ops; in __frontswap_init() local 210 for_each_frontswap_ops(ops) in __frontswap_init() [all …]
|
D | mmu_notifier.c | 59 if (mn->ops->release) in __mmu_notifier_release() 60 mn->ops->release(mn, mm); in __mmu_notifier_release() 104 if (mn->ops->clear_flush_young) in __mmu_notifier_clear_flush_young() 105 young |= mn->ops->clear_flush_young(mn, mm, start, end); in __mmu_notifier_clear_flush_young() 121 if (mn->ops->clear_young) in __mmu_notifier_clear_young() 122 young |= mn->ops->clear_young(mn, mm, start, end); in __mmu_notifier_clear_young() 137 if (mn->ops->test_young) { in __mmu_notifier_test_young() 138 young = mn->ops->test_young(mn, mm, address); in __mmu_notifier_test_young() 156 if (mn->ops->change_pte) in __mmu_notifier_change_pte() 157 mn->ops->change_pte(mn, mm, address, pte); in __mmu_notifier_change_pte() [all …]
|
D | memremap.c | 30 if (!pgmap->ops || !pgmap->ops->page_free) { in devmap_managed_enable_get() 81 if (pgmap->ops && pgmap->ops->kill) in dev_pagemap_kill() 82 pgmap->ops->kill(pgmap); in dev_pagemap_kill() 89 if (pgmap->ops && pgmap->ops->cleanup) { in dev_pagemap_cleanup() 90 pgmap->ops->cleanup(pgmap); in dev_pagemap_cleanup() 177 if (!pgmap->ops || !pgmap->ops->migrate_to_ram) { in memremap_pages() 199 if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup)) in memremap_pages() 209 if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) { in memremap_pages() 452 page->pgmap->ops->page_free(page); in __put_devmap_managed_page()
|
D | zpool.c | 24 const struct zpool_ops *ops; member 156 const struct zpool_ops *ops) in zpool_create_pool() argument 183 zpool->pool = driver->create(name, gfp, ops, zpool); in zpool_create_pool() 184 zpool->ops = ops; in zpool_create_pool() 185 zpool->evictable = driver->shrink && ops && ops->evict; in zpool_create_pool()
|
D | zbud.c | 100 const struct zbud_ops *ops; member 306 struct zbud_pool *zbud_create_pool(gfp_t gfp, const struct zbud_ops *ops) in zbud_create_pool() argument 320 pool->ops = ops; in zbud_create_pool() 509 if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) || in zbud_reclaim_page() 534 ret = pool->ops->evict(pool, first_handle); in zbud_reclaim_page() 539 ret = pool->ops->evict(pool, last_handle); in zbud_reclaim_page()
|
D | z3fold.c | 161 const struct z3fold_ops *ops; member 766 const struct z3fold_ops *ops) in z3fold_create_pool() argument 803 pool->ops = ops; in z3fold_create_pool() 1122 if (!pool->ops || !pool->ops->evict || retries == 0) { in z3fold_reclaim_page() 1199 ret = pool->ops->evict(pool, middle_handle); in z3fold_reclaim_page() 1204 ret = pool->ops->evict(pool, first_handle); in z3fold_reclaim_page() 1209 ret = pool->ops->evict(pool, last_handle); in z3fold_reclaim_page()
|
D | cleancache.c | 51 int cleancache_register_ops(const struct cleancache_ops *ops) in cleancache_register_ops() argument 53 if (cmpxchg(&cleancache_ops, NULL, ops)) in cleancache_register_ops()
|
D | hmm.c | 72 if (mirror->ops->release) in hmm_release() 73 mirror->ops->release(mirror); in hmm_release() 126 rc = mirror->ops->sync_cpu_device_pagetables(mirror, nrange); in hmm_invalidate_range_start() 182 if (!mm || !mirror || !mirror->ops) in hmm_mirror_register()
|
D | mmap.c | 3410 const struct vm_operations_struct *ops) in __install_special_mapping() argument 3425 vma->vm_ops = ops; in __install_special_mapping()
|
D | shmem.c | 3883 fc->ops = &shmem_fs_context_ops; in shmem_init_fs_context()
|
D | memory.c | 2791 ret = vmf->page->pgmap->ops->migrate_to_ram(vmf); in do_swap_page()
|