/mm/ |
D | swap_cgroup.c | 98 unsigned short old, unsigned short new) in swap_cgroup_cmpxchg() argument 109 if (retval == old) in swap_cgroup_cmpxchg() 131 unsigned short old; in swap_cgroup_record() local 139 old = sc->id; in swap_cgroup_record() 141 VM_BUG_ON(sc->id != old); in swap_cgroup_record() 153 return old; in swap_cgroup_record()
|
D | filemap.c | 719 errseq_t old = READ_ONCE(file->f_wb_err); in file_check_and_advance_wb_err() local 723 if (errseq_check(&mapping->wb_err, old)) { in file_check_and_advance_wb_err() 726 old = file->f_wb_err; in file_check_and_advance_wb_err() 729 trace_file_check_and_advance_wb_err(file, old); in file_check_and_advance_wb_err() 795 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) in replace_page_cache_page() argument 797 struct address_space *mapping = old->mapping; in replace_page_cache_page() 799 pgoff_t offset = old->index; in replace_page_cache_page() 803 VM_BUG_ON_PAGE(!PageLocked(old), old); in replace_page_cache_page() 811 mem_cgroup_migrate(old, new); in replace_page_cache_page() 816 old->mapping = NULL; in replace_page_cache_page() [all …]
|
D | mempolicy.c | 777 struct mempolicy *old; in vma_replace_policy() local 796 old = vma->vm_policy; in vma_replace_policy() 803 mpol_put(old); in vma_replace_policy() 871 struct mempolicy *new, *old; in do_set_mempolicy() local 890 old = current->mempolicy; in do_set_mempolicy() 895 mpol_put(old); in do_set_mempolicy() 1522 nodemask_t *old; in kernel_migrate_pages() local 1529 old = &scratch->mask1; in kernel_migrate_pages() 1532 err = get_nodes(old, old_nodes, maxnode); in kernel_migrate_pages() 1587 err = do_migrate_pages(mm, old, new, in kernel_migrate_pages() [all …]
|
D | page_counter.c | 172 unsigned long old; in page_counter_set_max() local 191 old = xchg(&counter->max, nr_pages); in page_counter_set_max() 196 counter->max = old; in page_counter_set_max()
|
D | swap_state.c | 136 void *old; in add_to_swap_cache() local 154 old = xas_load(&xas); in add_to_swap_cache() 155 if (xa_is_value(old)) { in add_to_swap_cache() 158 *shadowp = old; in add_to_swap_cache() 290 void *old; in clear_shadow_from_swap_cache() local 299 xas_for_each(&xas, old, end) { in clear_shadow_from_swap_cache() 300 if (!xa_is_value(old)) in clear_shadow_from_swap_cache()
|
D | list_lru.c | 394 struct list_lru_memcg *old, *new; in memcg_update_list_lru_node() local 398 old = rcu_dereference_protected(nlru->memcg_lrus, in memcg_update_list_lru_node() 409 memcpy(&new->lru, &old->lru, old_size * sizeof(void *)); in memcg_update_list_lru_node() 422 call_rcu(&old->rcu, kvfree_rcu_local); in memcg_update_list_lru_node()
|
D | pgtable-generic.c | 197 pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp)); in pmdp_invalidate() local 199 return old; in pmdp_invalidate()
|
D | page-writeback.c | 1086 unsigned long old = wb->write_bandwidth; in wb_update_write_bandwidth() local 1112 if (avg > old && old >= (unsigned long)bw) in wb_update_write_bandwidth() 1113 avg -= (avg - old) >> 3; in wb_update_write_bandwidth() 1115 if (avg < old && old <= (unsigned long)bw) in wb_update_write_bandwidth() 1116 avg += (old - avg) >> 3; in wb_update_write_bandwidth()
|
D | slub.c | 2218 struct page old; in deactivate_slab() local 2277 old.freelist = page->freelist; in deactivate_slab() 2278 old.counters = page->counters; in deactivate_slab() 2279 VM_BUG_ON(!old.frozen); in deactivate_slab() 2282 new.counters = old.counters; in deactivate_slab() 2285 set_freepointer(s, freelist, old.freelist); in deactivate_slab() 2288 new.freelist = old.freelist; in deactivate_slab() 2334 old.freelist, old.counters, in deactivate_slab() 2373 struct page old; in unfreeze_partials() local 2388 old.freelist = page->freelist; in unfreeze_partials() [all …]
|
D | memcontrol.c | 410 struct memcg_shrinker_map *new, *old; in memcg_expand_one_shrinker_map() local 416 old = rcu_dereference_protected( in memcg_expand_one_shrinker_map() 419 if (!old) in memcg_expand_one_shrinker_map() 431 call_rcu(&old->rcu, memcg_free_shrinker_map_rcu); in memcg_expand_one_shrinker_map() 2308 struct mem_cgroup *old = stock->cached; in drain_stock() local 2310 if (!old) in drain_stock() 2314 page_counter_uncharge(&old->memory, stock->nr_pages); in drain_stock() 2316 page_counter_uncharge(&old->memsw, stock->nr_pages); in drain_stock() 2320 css_put(&old->css); in drain_stock() 3189 struct obj_cgroup *old = stock->cached_objcg; in drain_obj_stock() local [all …]
|
D | mremap.c | 228 int old = atomic_xchg_release(&vma->vm_ref_count, 1); in unlock_vma_ref_count() local 234 VM_BUG_ON_VMA(old != -1, vma); in unlock_vma_ref_count()
|
D | migrate.c | 184 unsigned long addr, void *old) in remove_migration_pte() argument 187 .page = old, in remove_migration_pte() 272 void remove_migration_ptes(struct page *old, struct page *new, bool locked) in remove_migration_ptes() argument 276 .arg = old, in remove_migration_ptes()
|
D | internal.h | 474 static inline void mlock_migrate_page(struct page *new, struct page *old) { } in mlock_migrate_page() argument
|
D | mmap.c | 1357 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, str… in reusable_anon_vma() argument 1360 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma); in reusable_anon_vma() 1362 if (anon_vma && list_is_singular(&old->anon_vma_chain)) in reusable_anon_vma()
|
D | shmem.c | 776 void *old; in shmem_free_swap() local 778 old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); in shmem_free_swap() 779 if (old != radswap) in shmem_free_swap()
|