/mm/ |
D | page_counter.c | 57 long new; in page_counter_cancel() local 59 new = atomic_long_sub_return(nr_pages, &counter->usage); in page_counter_cancel() 60 propagate_protected_usage(counter, new); in page_counter_cancel() 62 WARN_ON_ONCE(new < 0); in page_counter_cancel() 77 long new; in page_counter_charge() local 79 new = atomic_long_add_return(nr_pages, &c->usage); in page_counter_charge() 80 propagate_protected_usage(counter, new); in page_counter_charge() 85 if (new > c->watermark) in page_counter_charge() 86 c->watermark = new; in page_counter_charge() 106 long new; in page_counter_try_charge() local [all …]
|
D | mempolicy.c | 367 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) in mpol_rebind_task() argument 369 mpol_rebind_policy(tsk->mempolicy, new); in mpol_rebind_task() 378 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) in mpol_rebind_mm() argument 384 mpol_rebind_policy(vma->vm_policy, new); in mpol_rebind_mm() 703 struct mempolicy *new; in vma_replace_policy() local 710 new = mpol_dup(pol); in vma_replace_policy() 711 if (IS_ERR(new)) in vma_replace_policy() 712 return PTR_ERR(new); in vma_replace_policy() 715 err = vma->vm_ops->set_policy(vma, new); in vma_replace_policy() 721 vma->vm_policy = new; /* protected by mmap_sem */ in vma_replace_policy() [all …]
|
D | ksm.c | 1555 struct rb_node **new; in stable_tree_search() local 1570 new = &root->rb_node; in stable_tree_search() 1573 while (*new) { in stable_tree_search() 1578 stable_node = rb_entry(*new, struct stable_node, node); in stable_tree_search() 1634 parent = *new; in stable_tree_search() 1636 new = &parent->rb_left; in stable_tree_search() 1638 new = &parent->rb_right; in stable_tree_search() 1703 rb_link_node(&page_node->node, parent, new); in stable_tree_search() 1808 struct rb_node **new; in stable_tree_insert() local 1818 new = &root->rb_node; in stable_tree_insert() [all …]
|
D | list_lru.c | 404 struct list_lru_memcg *old, *new; in memcg_update_list_lru_node() local 410 new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL); in memcg_update_list_lru_node() 411 if (!new) in memcg_update_list_lru_node() 414 if (__memcg_init_list_lru_node(new, old_size, new_size)) { in memcg_update_list_lru_node() 415 kvfree(new); in memcg_update_list_lru_node() 419 memcpy(&new->lru, &old->lru, old_size * sizeof(void *)); in memcg_update_list_lru_node() 429 rcu_assign_pointer(nlru->memcg_lrus, new); in memcg_update_list_lru_node()
|
D | slub.c | 1795 struct page new; in acquire_slab() local 1806 new.counters = counters; in acquire_slab() 1807 *objects = new.objects - new.inuse; in acquire_slab() 1809 new.inuse = page->objects; in acquire_slab() 1810 new.freelist = NULL; in acquire_slab() 1812 new.freelist = freelist; in acquire_slab() 1815 VM_BUG_ON(new.frozen); in acquire_slab() 1816 new.frozen = 1; in acquire_slab() 1820 new.freelist, new.counters, in acquire_slab() 2047 struct page new; in deactivate_slab() local [all …]
|
D | migrate.c | 213 struct page *new; in remove_migration_pte() local 220 new = page; in remove_migration_pte() 222 new = page - pvmw.page->index + in remove_migration_pte() 229 remove_migration_pmd(&pvmw, new); in remove_migration_pte() 234 get_page(new); in remove_migration_pte() 235 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot))); in remove_migration_pte() 246 if (unlikely(is_zone_device_page(new))) { in remove_migration_pte() 247 if (is_device_private_page(new)) { in remove_migration_pte() 248 entry = make_device_private_entry(new, pte_write(pte)); in remove_migration_pte() 254 if (PageHuge(new)) { in remove_migration_pte() [all …]
|
D | mmap.c | 2672 struct vm_area_struct *new; in __split_vma() local 2681 new = vm_area_dup(vma); in __split_vma() 2682 if (!new) in __split_vma() 2686 new->vm_end = addr; in __split_vma() 2688 new->vm_start = addr; in __split_vma() 2689 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); in __split_vma() 2692 err = vma_dup_policy(vma, new); in __split_vma() 2696 err = anon_vma_clone(new, vma); in __split_vma() 2700 if (new->vm_file) in __split_vma() 2701 get_file(new->vm_file); in __split_vma() [all …]
|
D | nommu.c | 1382 struct vm_area_struct *new; in split_vma() local 1398 new = vm_area_dup(vma); in split_vma() 1399 if (!new) { in split_vma() 1406 new->vm_region = region; in split_vma() 1411 region->vm_top = region->vm_end = new->vm_end = addr; in split_vma() 1413 region->vm_start = new->vm_start = addr; in split_vma() 1414 region->vm_pgoff = new->vm_pgoff += npages; in split_vma() 1417 if (new->vm_ops && new->vm_ops->open) in split_vma() 1418 new->vm_ops->open(new); in split_vma() 1431 add_nommu_region(new->vm_region); in split_vma() [all …]
|
D | memory.c | 427 pgtable_t new = pte_alloc_one(mm); in __pte_alloc() local 428 if (!new) in __pte_alloc() 449 pmd_populate(mm, pmd, new); in __pte_alloc() 450 new = NULL; in __pte_alloc() 453 if (new) in __pte_alloc() 454 pte_free(mm, new); in __pte_alloc() 460 pte_t *new = pte_alloc_one_kernel(&init_mm); in __pte_alloc_kernel() local 461 if (!new) in __pte_alloc_kernel() 468 pmd_populate_kernel(&init_mm, pmd, new); in __pte_alloc_kernel() 469 new = NULL; in __pte_alloc_kernel() [all …]
|
D | filemap.c | 811 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) in replace_page_cache_page() argument 820 VM_BUG_ON_PAGE(!PageLocked(new), new); in replace_page_cache_page() 821 VM_BUG_ON_PAGE(new->mapping, new); in replace_page_cache_page() 823 get_page(new); in replace_page_cache_page() 824 new->mapping = mapping; in replace_page_cache_page() 825 new->index = offset; in replace_page_cache_page() 828 xas_store(&xas, new); in replace_page_cache_page() 833 __dec_node_page_state(new, NR_FILE_PAGES); in replace_page_cache_page() 834 if (!PageHuge(new)) in replace_page_cache_page() 835 __inc_node_page_state(new, NR_FILE_PAGES); in replace_page_cache_page() [all …]
|
D | memcontrol.c | 334 struct memcg_shrinker_map *new, *old; in memcg_expand_one_shrinker_map() local 346 new = kvmalloc(sizeof(*new) + size, GFP_KERNEL); in memcg_expand_one_shrinker_map() 347 if (!new) in memcg_expand_one_shrinker_map() 351 memset(new->map, (int)0xff, old_size); in memcg_expand_one_shrinker_map() 352 memset((void *)new->map + old_size, 0, size - old_size); in memcg_expand_one_shrinker_map() 354 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new); in memcg_expand_one_shrinker_map() 4057 struct mem_cgroup_threshold_ary *new; in __mem_cgroup_usage_register_event() local 4084 new = kmalloc(struct_size(new, entries, size), GFP_KERNEL); in __mem_cgroup_usage_register_event() 4085 if (!new) { in __mem_cgroup_usage_register_event() 4089 new->size = size; in __mem_cgroup_usage_register_event() [all …]
|
D | swap_cgroup.c | 98 unsigned short old, unsigned short new) in swap_cgroup_cmpxchg() argument 110 sc->id = new; in swap_cgroup_cmpxchg()
|
D | slab_common.c | 200 struct memcg_cache_array *old, *new; in update_memcg_params() local 202 new = kvzalloc(sizeof(struct memcg_cache_array) + in update_memcg_params() 204 if (!new) in update_memcg_params() 210 memcpy(new->entries, old->entries, in update_memcg_params() 213 rcu_assign_pointer(s->memcg_params.memcg_caches, new); in update_memcg_params()
|
D | huge_memory.c | 3056 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) in remove_migration_pmd() argument 3069 get_page(new); in remove_migration_pmd() 3070 pmde = pmd_mkold(mk_huge_pmd(new, vma->vm_page_prot)); in remove_migration_pmd() 3077 if (PageAnon(new)) in remove_migration_pmd() 3078 page_add_anon_rmap(new, vma, mmun_start, true); in remove_migration_pmd() 3080 page_add_file_rmap(new, true); in remove_migration_pmd() 3082 if ((vma->vm_flags & VM_LOCKED) && !PageDoubleMap(new)) in remove_migration_pmd() 3083 mlock_vma_page(new); in remove_migration_pmd()
|
D | internal.h | 389 static inline void mlock_migrate_page(struct page *new, struct page *old) { } in mlock_migrate_page() argument
|
D | Kconfig | 280 deprecated interface virt_to_bus(). All new architectures 538 This is marked experimental because it is a new feature (as of 722 This is marked experimental because it is a new feature. Write
|