/mm/ |
D | page_counter.c | 52 long new; in page_counter_cancel() local 54 new = atomic_long_sub_return(nr_pages, &counter->usage); in page_counter_cancel() 55 propagate_protected_usage(counter, new); in page_counter_cancel() 57 WARN_ON_ONCE(new < 0); in page_counter_cancel() 72 long new; in page_counter_charge() local 74 new = atomic_long_add_return(nr_pages, &c->usage); in page_counter_charge() 75 propagate_protected_usage(c, new); in page_counter_charge() 80 if (new > READ_ONCE(c->watermark)) in page_counter_charge() 81 WRITE_ONCE(c->watermark, new); in page_counter_charge() 101 long new; in page_counter_try_charge() local [all …]
|
D | mempolicy.c | 393 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) in mpol_rebind_task() argument 395 mpol_rebind_policy(tsk->mempolicy, new); in mpol_rebind_task() 404 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) in mpol_rebind_mm() argument 411 mpol_rebind_policy(vma->vm_policy, new); in mpol_rebind_mm() 778 struct mempolicy *new; in vma_replace_policy() local 785 new = mpol_dup(pol); in vma_replace_policy() 786 if (IS_ERR(new)) in vma_replace_policy() 787 return PTR_ERR(new); in vma_replace_policy() 791 err = vma->vm_ops->set_policy(vma, new); in vma_replace_policy() 801 WRITE_ONCE(vma->vm_policy, new); in vma_replace_policy() [all …]
|
D | ksm.c | 1557 struct rb_node **new; in stable_tree_search() local 1572 new = &root->rb_node; in stable_tree_search() 1575 while (*new) { in stable_tree_search() 1580 stable_node = rb_entry(*new, struct stable_node, node); in stable_tree_search() 1636 parent = *new; in stable_tree_search() 1638 new = &parent->rb_left; in stable_tree_search() 1640 new = &parent->rb_right; in stable_tree_search() 1705 rb_link_node(&page_node->node, parent, new); in stable_tree_search() 1810 struct rb_node **new; in stable_tree_insert() local 1820 new = &root->rb_node; in stable_tree_insert() [all …]
|
D | list_lru.c | 394 struct list_lru_memcg *old, *new; in memcg_update_list_lru_node() local 400 new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL); in memcg_update_list_lru_node() 401 if (!new) in memcg_update_list_lru_node() 404 if (__memcg_init_list_lru_node(new, old_size, new_size)) { in memcg_update_list_lru_node() 405 kvfree(new); in memcg_update_list_lru_node() 409 memcpy(&new->lru, &old->lru, old_size * sizeof(void *)); in memcg_update_list_lru_node() 419 rcu_assign_pointer(nlru->memcg_lrus, new); in memcg_update_list_lru_node()
|
D | slub.c | 1967 struct page new; in acquire_slab() local 1978 new.counters = counters; in acquire_slab() 1979 *objects = new.objects - new.inuse; in acquire_slab() 1981 new.inuse = page->objects; in acquire_slab() 1982 new.freelist = NULL; in acquire_slab() 1984 new.freelist = freelist; in acquire_slab() 1987 VM_BUG_ON(new.frozen); in acquire_slab() 1988 new.frozen = 1; in acquire_slab() 1992 new.freelist, new.counters, in acquire_slab() 2217 struct page new; in deactivate_slab() local [all …]
|
D | migrate.c | 192 struct page *new; in remove_migration_pte() local 199 new = page; in remove_migration_pte() 201 new = page - pvmw.page->index + in remove_migration_pte() 208 remove_migration_pmd(&pvmw, new); in remove_migration_pte() 213 get_page(new); in remove_migration_pte() 214 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot))); in remove_migration_pte() 227 if (unlikely(is_device_private_page(new))) { in remove_migration_pte() 228 entry = make_device_private_entry(new, pte_write(pte)); in remove_migration_pte() 237 if (PageHuge(new)) { in remove_migration_pte() 239 pte = arch_make_huge_pte(pte, vma, new, 0); in remove_migration_pte() [all …]
|
D | nommu.c | 1357 struct vm_area_struct *new; in split_vma() local 1373 new = vm_area_dup(vma); in split_vma() 1374 if (!new) { in split_vma() 1381 new->vm_region = region; in split_vma() 1386 region->vm_top = region->vm_end = new->vm_end = addr; in split_vma() 1388 region->vm_start = new->vm_start = addr; in split_vma() 1389 region->vm_pgoff = new->vm_pgoff += npages; in split_vma() 1392 if (new->vm_ops && new->vm_ops->open) in split_vma() 1393 new->vm_ops->open(new); in split_vma() 1406 add_nommu_region(new->vm_region); in split_vma() [all …]
|
D | mmap.c | 2864 struct vm_area_struct *new; in __split_vma() local 2873 new = vm_area_dup(vma); in __split_vma() 2874 if (!new) in __split_vma() 2878 new->vm_end = addr; in __split_vma() 2880 new->vm_start = addr; in __split_vma() 2881 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); in __split_vma() 2884 err = vma_dup_policy(vma, new); in __split_vma() 2888 err = anon_vma_clone(new, vma); in __split_vma() 2892 if (new->vm_file) in __split_vma() 2893 get_file(new->vm_file); in __split_vma() [all …]
|
D | memcontrol.c | 410 struct memcg_shrinker_map *new, *old; in memcg_expand_one_shrinker_map() local 422 new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid); in memcg_expand_one_shrinker_map() 423 if (!new) in memcg_expand_one_shrinker_map() 427 memset(new->map, (int)0xff, old_size); in memcg_expand_one_shrinker_map() 428 memset((void *)new->map + old_size, 0, size - old_size); in memcg_expand_one_shrinker_map() 430 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new); in memcg_expand_one_shrinker_map() 4329 struct mem_cgroup_threshold_ary *new; in __mem_cgroup_usage_register_event() local 4356 new = kmalloc(struct_size(new, entries, size), GFP_KERNEL); in __mem_cgroup_usage_register_event() 4357 if (!new) { in __mem_cgroup_usage_register_event() 4361 new->size = size; in __mem_cgroup_usage_register_event() [all …]
|
D | filemap.c | 795 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) in replace_page_cache_page() argument 804 VM_BUG_ON_PAGE(!PageLocked(new), new); in replace_page_cache_page() 805 VM_BUG_ON_PAGE(new->mapping, new); in replace_page_cache_page() 807 get_page(new); in replace_page_cache_page() 808 new->mapping = mapping; in replace_page_cache_page() 809 new->index = offset; in replace_page_cache_page() 811 mem_cgroup_migrate(old, new); in replace_page_cache_page() 814 xas_store(&xas, new); in replace_page_cache_page() 820 if (!PageHuge(new)) in replace_page_cache_page() 821 __inc_lruvec_page_state(new, NR_FILE_PAGES); in replace_page_cache_page() [all …]
|
D | memory.c | 472 pgtable_t new = pte_alloc_one(mm); in __pte_alloc() local 473 if (!new) in __pte_alloc() 494 pmd_populate(mm, pmd, new); in __pte_alloc() 495 new = NULL; in __pte_alloc() 498 if (new) in __pte_alloc() 499 pte_free(mm, new); in __pte_alloc() 505 pte_t *new = pte_alloc_one_kernel(&init_mm); in __pte_alloc_kernel() local 506 if (!new) in __pte_alloc_kernel() 513 pmd_populate_kernel(&init_mm, pmd, new); in __pte_alloc_kernel() 514 new = NULL; in __pte_alloc_kernel() [all …]
|
D | swap_cgroup.c | 98 unsigned short old, unsigned short new) in swap_cgroup_cmpxchg() argument 110 sc->id = new; in swap_cgroup_cmpxchg()
|
D | huge_memory.c | 2996 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) in remove_migration_pmd() argument 3009 get_page(new); in remove_migration_pmd() 3010 pmde = pmd_mkold(mk_huge_pmd(new, vma->vm_page_prot)); in remove_migration_pmd() 3019 if (PageAnon(new)) in remove_migration_pmd() 3020 page_add_anon_rmap(new, vma, mmun_start, true); in remove_migration_pmd() 3022 page_add_file_rmap(new, true); in remove_migration_pmd() 3024 if ((vma->vm_flags & VM_LOCKED) && !PageDoubleMap(new)) in remove_migration_pmd() 3025 mlock_vma_page(new); in remove_migration_pmd()
|
D | internal.h | 474 static inline void mlock_migrate_page(struct page *new, struct page *old) { } in mlock_migrate_page() argument
|
D | Kconfig | 297 deprecated interface virt_to_bus(). All new architectures 559 This is marked experimental because it is a new feature (as of 879 This is marked experimental because it is a new feature. Write
|
/mm/damon/ |
D | core.c | 806 struct damon_region *new; in damon_split_region_at() local 808 new = damon_new_region(r->ar.start + sz_r, r->ar.end); in damon_split_region_at() 809 if (!new) in damon_split_region_at() 812 r->ar.end = new->ar.start; in damon_split_region_at() 814 new->age = r->age; in damon_split_region_at() 815 new->last_nr_accesses = r->last_nr_accesses; in damon_split_region_at() 817 damon_insert_region(new, r, damon_next_region(r), t); in damon_split_region_at()
|