/mm/ |
D | swap_cgroup.c | 98 unsigned short old, unsigned short new) in swap_cgroup_cmpxchg() argument 109 if (retval == old) in swap_cgroup_cmpxchg() 131 unsigned short old; in swap_cgroup_record() local 139 old = sc->id; in swap_cgroup_record() 141 VM_BUG_ON(sc->id != old); in swap_cgroup_record() 153 return old; in swap_cgroup_record()
|
D | page_counter.c | 138 unsigned long old; in page_counter_limit() local 157 old = xchg(&counter->limit, limit); in page_counter_limit() 162 counter->limit = old; in page_counter_limit()
|
D | slab_common.c | 187 struct memcg_cache_array *old; in free_memcg_params() local 189 old = container_of(rcu, struct memcg_cache_array, rcu); in free_memcg_params() 190 kvfree(old); in free_memcg_params() 195 struct memcg_cache_array *old, *new; in update_memcg_params() local 202 old = rcu_dereference_protected(s->memcg_params.memcg_caches, in update_memcg_params() 204 if (old) in update_memcg_params() 205 memcpy(new->entries, old->entries, in update_memcg_params() 209 if (old) in update_memcg_params() 210 call_rcu(&old->rcu, free_memcg_params); in update_memcg_params()
|
D | mempolicy.c | 698 struct mempolicy *old; in vma_replace_policy() local 716 old = vma->vm_policy; in vma_replace_policy() 718 mpol_put(old); in vma_replace_policy() 792 struct mempolicy *new, *old; in do_set_mempolicy() local 812 old = current->mempolicy; in do_set_mempolicy() 817 mpol_put(old); in do_set_mempolicy() 1417 nodemask_t *old; in SYSCALL_DEFINE4() local 1424 old = &scratch->mask1; in SYSCALL_DEFINE4() 1427 err = get_nodes(old, old_nodes, maxnode); in SYSCALL_DEFINE4() 1491 err = do_migrate_pages(mm, old, new, in SYSCALL_DEFINE4() [all …]
|
D | filemap.c | 633 errseq_t old = READ_ONCE(file->f_wb_err); in file_check_and_advance_wb_err() local 637 if (errseq_check(&mapping->wb_err, old)) { in file_check_and_advance_wb_err() 640 old = file->f_wb_err; in file_check_and_advance_wb_err() 643 trace_file_check_and_advance_wb_err(file, old); in file_check_and_advance_wb_err() 706 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) in replace_page_cache_page() argument 710 VM_BUG_ON_PAGE(!PageLocked(old), old); in replace_page_cache_page() 716 struct address_space *mapping = old->mapping; in replace_page_cache_page() 720 pgoff_t offset = old->index; in replace_page_cache_page() 728 __delete_from_page_cache(old, NULL); in replace_page_cache_page() 740 mem_cgroup_migrate(old, new); in replace_page_cache_page() [all …]
|
D | list_lru.c | 345 struct list_lru_memcg *old, *new; in memcg_update_list_lru_node() local 349 old = nlru->memcg_lrus; in memcg_update_list_lru_node() 359 memcpy(new, old, old_size * sizeof(void *)); in memcg_update_list_lru_node() 372 kvfree(old); in memcg_update_list_lru_node()
|
D | page-writeback.c | 1090 unsigned long old = wb->write_bandwidth; in wb_update_write_bandwidth() local 1116 if (avg > old && old >= (unsigned long)bw) in wb_update_write_bandwidth() 1117 avg -= (avg - old) >> 3; in wb_update_write_bandwidth() 1119 if (avg < old && old <= (unsigned long)bw) in wb_update_write_bandwidth() 1120 avg += (old - avg) >> 3; in wb_update_write_bandwidth()
|
D | slub.c | 2014 struct page old; in deactivate_slab() local 2065 old.freelist = page->freelist; in deactivate_slab() 2066 old.counters = page->counters; in deactivate_slab() 2067 VM_BUG_ON(!old.frozen); in deactivate_slab() 2070 new.counters = old.counters; in deactivate_slab() 2073 set_freepointer(s, freelist, old.freelist); in deactivate_slab() 2076 new.freelist = old.freelist; in deactivate_slab() 2131 old.freelist, old.counters, in deactivate_slab() 2165 struct page old; in unfreeze_partials() local 2180 old.freelist = page->freelist; in unfreeze_partials() [all …]
|
D | migrate.c | 203 unsigned long addr, void *old) in remove_migration_pte() argument 206 .page = old, in remove_migration_pte() 289 void remove_migration_ptes(struct page *old, struct page *new, bool locked) in remove_migration_ptes() argument 293 .arg = old, in remove_migration_ptes()
|
D | internal.h | 359 static inline void mlock_migrate_page(struct page *new, struct page *old) { } in mlock_migrate_page() argument
|
D | mmap.c | 1240 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, str… in reusable_anon_vma() argument 1243 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma); in reusable_anon_vma() 1245 if (anon_vma && list_is_singular(&old->anon_vma_chain)) in reusable_anon_vma()
|
D | memcontrol.c | 1782 struct mem_cgroup *old = stock->cached; in drain_stock() local 1785 page_counter_uncharge(&old->memory, stock->nr_pages); in drain_stock() 1787 page_counter_uncharge(&old->memsw, stock->nr_pages); in drain_stock() 1788 css_put_many(&old->css, stock->nr_pages); in drain_stock()
|
D | shmem.c | 667 void *old; in shmem_free_swap() local 670 old = radix_tree_delete_item(&mapping->page_tree, index, radswap); in shmem_free_swap() 672 if (old != radswap) in shmem_free_swap()
|