/mm/ |
D | swap_cgroup.c | 98 unsigned short old, unsigned short new) in swap_cgroup_cmpxchg() argument 109 if (retval == old) in swap_cgroup_cmpxchg() 131 unsigned short old; in swap_cgroup_record() local 139 old = sc->id; in swap_cgroup_record() 141 VM_BUG_ON(sc->id != old); in swap_cgroup_record() 153 return old; in swap_cgroup_record()
|
D | filemap.c | 735 errseq_t old = READ_ONCE(file->f_wb_err); in file_check_and_advance_wb_err() local 739 if (errseq_check(&mapping->wb_err, old)) { in file_check_and_advance_wb_err() 742 old = file->f_wb_err; in file_check_and_advance_wb_err() 745 trace_file_check_and_advance_wb_err(file, old); in file_check_and_advance_wb_err() 811 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) in replace_page_cache_page() argument 813 struct address_space *mapping = old->mapping; in replace_page_cache_page() 815 pgoff_t offset = old->index; in replace_page_cache_page() 819 VM_BUG_ON_PAGE(!PageLocked(old), old); in replace_page_cache_page() 830 old->mapping = NULL; in replace_page_cache_page() 832 if (!PageHuge(old)) in replace_page_cache_page() [all …]
|
D | mempolicy.c | 702 struct mempolicy *old; in vma_replace_policy() local 720 old = vma->vm_policy; in vma_replace_policy() 722 mpol_put(old); in vma_replace_policy() 796 struct mempolicy *new, *old; in do_set_mempolicy() local 816 old = current->mempolicy; in do_set_mempolicy() 821 mpol_put(old); in do_set_mempolicy() 1468 nodemask_t *old; in kernel_migrate_pages() local 1475 old = &scratch->mask1; in kernel_migrate_pages() 1478 err = get_nodes(old, old_nodes, maxnode); in kernel_migrate_pages() 1533 err = do_migrate_pages(mm, old, new, in kernel_migrate_pages() [all …]
|
D | page_counter.c | 176 unsigned long old; in page_counter_set_max() local 195 old = xchg(&counter->max, nr_pages); in page_counter_set_max() 200 counter->max = old; in page_counter_set_max()
|
D | slab_common.c | 192 struct memcg_cache_array *old; in free_memcg_params() local 194 old = container_of(rcu, struct memcg_cache_array, rcu); in free_memcg_params() 195 kvfree(old); in free_memcg_params() 200 struct memcg_cache_array *old, *new; in update_memcg_params() local 207 old = rcu_dereference_protected(s->memcg_params.memcg_caches, in update_memcg_params() 209 if (old) in update_memcg_params() 210 memcpy(new->entries, old->entries, in update_memcg_params() 214 if (old) in update_memcg_params() 215 call_rcu(&old->rcu, free_memcg_params); in update_memcg_params()
|
D | list_lru.c | 404 struct list_lru_memcg *old, *new; in memcg_update_list_lru_node() local 408 old = rcu_dereference_protected(nlru->memcg_lrus, in memcg_update_list_lru_node() 419 memcpy(&new->lru, &old->lru, old_size * sizeof(void *)); in memcg_update_list_lru_node() 432 call_rcu(&old->rcu, kvfree_rcu); in memcg_update_list_lru_node()
|
D | pgtable-generic.c | 188 pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mknotpresent(*pmdp)); in pmdp_invalidate() local 190 return old; in pmdp_invalidate()
|
D | page-writeback.c | 1089 unsigned long old = wb->write_bandwidth; in wb_update_write_bandwidth() local 1115 if (avg > old && old >= (unsigned long)bw) in wb_update_write_bandwidth() 1116 avg -= (avg - old) >> 3; in wb_update_write_bandwidth() 1118 if (avg < old && old <= (unsigned long)bw) in wb_update_write_bandwidth() 1119 avg += (old - avg) >> 3; in wb_update_write_bandwidth()
|
D | slub.c | 2048 struct page old; in deactivate_slab() local 2099 old.freelist = page->freelist; in deactivate_slab() 2100 old.counters = page->counters; in deactivate_slab() 2101 VM_BUG_ON(!old.frozen); in deactivate_slab() 2104 new.counters = old.counters; in deactivate_slab() 2107 set_freepointer(s, freelist, old.freelist); in deactivate_slab() 2110 new.freelist = old.freelist; in deactivate_slab() 2154 old.freelist, old.counters, in deactivate_slab() 2192 struct page old; in unfreeze_partials() local 2207 old.freelist = page->freelist; in unfreeze_partials() [all …]
|
D | migrate.c | 205 unsigned long addr, void *old) in remove_migration_pte() argument 208 .page = old, in remove_migration_pte() 289 void remove_migration_ptes(struct page *old, struct page *new, bool locked) in remove_migration_ptes() argument 293 .arg = old, in remove_migration_ptes()
|
D | internal.h | 389 static inline void mlock_migrate_page(struct page *new, struct page *old) { } in mlock_migrate_page() argument
|
D | memcontrol.c | 334 struct memcg_shrinker_map *new, *old; in memcg_expand_one_shrinker_map() local 340 old = rcu_dereference_protected( in memcg_expand_one_shrinker_map() 343 if (!old) in memcg_expand_one_shrinker_map() 355 call_rcu(&old->rcu, memcg_free_shrinker_map_rcu); in memcg_expand_one_shrinker_map() 2197 struct mem_cgroup *old = stock->cached; in drain_stock() local 2200 page_counter_uncharge(&old->memory, stock->nr_pages); in drain_stock() 2202 page_counter_uncharge(&old->memsw, stock->nr_pages); in drain_stock() 2203 css_put_many(&old->css, stock->nr_pages); in drain_stock()
|
D | mmap.c | 1283 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, str… in reusable_anon_vma() argument 1286 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma); in reusable_anon_vma() 1288 if (anon_vma && list_is_singular(&old->anon_vma_chain)) in reusable_anon_vma()
|
D | shmem.c | 686 void *old; in shmem_free_swap() local 688 old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); in shmem_free_swap() 689 if (old != radswap) in shmem_free_swap()
|