/mm/ |
D | mempolicy.c | 437 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new, in mpol_rebind_task() argument 440 mpol_rebind_policy(tsk->mempolicy, new, step); in mpol_rebind_task() 449 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) in mpol_rebind_mm() argument 455 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE); in mpol_rebind_mm() 714 struct mempolicy *new; in vma_replace_policy() local 721 new = mpol_dup(pol); in vma_replace_policy() 722 if (IS_ERR(new)) in vma_replace_policy() 723 return PTR_ERR(new); in vma_replace_policy() 726 err = vma->vm_ops->set_policy(vma, new); in vma_replace_policy() 732 vma->vm_policy = new; /* protected by mmap_sem */ in vma_replace_policy() [all …]
|
D | ksm.c | 1156 struct rb_node **new; in stable_tree_search() local 1171 new = &root->rb_node; in stable_tree_search() 1174 while (*new) { in stable_tree_search() 1179 stable_node = rb_entry(*new, struct stable_node, node); in stable_tree_search() 1187 parent = *new; in stable_tree_search() 1189 new = &parent->rb_left; in stable_tree_search() 1191 new = &parent->rb_right; in stable_tree_search() 1225 rb_link_node(&page_node->node, parent, new); in stable_tree_search() 1257 struct rb_node **new; in stable_tree_insert() local 1264 new = &root->rb_node; in stable_tree_insert() [all …]
|
D | migrate.c | 106 static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, in remove_migration_pte() argument 115 if (unlikely(PageHuge(new))) { in remove_migration_pte() 146 get_page(new); in remove_migration_pte() 147 pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); in remove_migration_pte() 156 if (PageHuge(new)) { in remove_migration_pte() 158 pte = arch_make_huge_pte(pte, vma, new, 0); in remove_migration_pte() 161 flush_dcache_page(new); in remove_migration_pte() 164 if (PageHuge(new)) { in remove_migration_pte() 165 if (PageAnon(new)) in remove_migration_pte() 166 hugepage_add_anon_rmap(new, vma, addr); in remove_migration_pte() [all …]
|
D | slub.c | 1572 struct page new; in acquire_slab() local 1583 new.counters = counters; in acquire_slab() 1584 *objects = new.objects - new.inuse; in acquire_slab() 1586 new.inuse = page->objects; in acquire_slab() 1587 new.freelist = NULL; in acquire_slab() 1589 new.freelist = freelist; in acquire_slab() 1592 VM_BUG_ON(new.frozen); in acquire_slab() 1593 new.frozen = 1; in acquire_slab() 1597 new.freelist, new.counters, in acquire_slab() 1822 struct page new; in deactivate_slab() local [all …]
|
D | mmap.c | 2501 struct vm_area_struct *new; in __split_vma() local 2508 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); in __split_vma() 2509 if (!new) in __split_vma() 2513 *new = *vma; in __split_vma() 2515 INIT_LIST_HEAD(&new->anon_vma_chain); in __split_vma() 2518 new->vm_end = addr; in __split_vma() 2520 new->vm_start = addr; in __split_vma() 2521 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); in __split_vma() 2524 err = vma_dup_policy(vma, new); in __split_vma() 2528 err = anon_vma_clone(new, vma); in __split_vma() [all …]
|
D | nommu.c | 1549 struct vm_area_struct *new; in split_vma() local 1567 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); in split_vma() 1568 if (!new) { in split_vma() 1574 *new = *vma; in split_vma() 1576 new->vm_region = region; in split_vma() 1581 region->vm_top = region->vm_end = new->vm_end = addr; in split_vma() 1583 region->vm_start = new->vm_start = addr; in split_vma() 1584 region->vm_pgoff = new->vm_pgoff += npages; in split_vma() 1587 if (new->vm_ops && new->vm_ops->open) in split_vma() 1588 new->vm_ops->open(new); in split_vma() [all …]
|
D | filemap.c | 463 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) in replace_page_cache_page() argument 468 VM_BUG_ON_PAGE(!PageLocked(new), new); in replace_page_cache_page() 469 VM_BUG_ON_PAGE(new->mapping, new); in replace_page_cache_page() 479 page_cache_get(new); in replace_page_cache_page() 480 new->mapping = mapping; in replace_page_cache_page() 481 new->index = offset; in replace_page_cache_page() 485 error = radix_tree_insert(&mapping->page_tree, offset, new); in replace_page_cache_page() 488 __inc_zone_page_state(new, NR_FILE_PAGES); in replace_page_cache_page() 489 if (PageSwapBacked(new)) in replace_page_cache_page() 490 __inc_zone_page_state(new, NR_SHMEM); in replace_page_cache_page() [all …]
|
D | memory.c | 565 pgtable_t new = pte_alloc_one(mm, address); in __pte_alloc() local 567 if (!new) in __pte_alloc() 589 pmd_populate(mm, pmd, new); in __pte_alloc() 590 new = NULL; in __pte_alloc() 594 if (new) in __pte_alloc() 595 pte_free(mm, new); in __pte_alloc() 603 pte_t *new = pte_alloc_one_kernel(&init_mm, address); in __pte_alloc_kernel() local 604 if (!new) in __pte_alloc_kernel() 611 pmd_populate_kernel(&init_mm, pmd, new); in __pte_alloc_kernel() 612 new = NULL; in __pte_alloc_kernel() [all …]
|
D | memcontrol.c | 4598 struct mem_cgroup_threshold_ary *new; in __mem_cgroup_usage_register_event() local 4624 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold), in __mem_cgroup_usage_register_event() 4626 if (!new) { in __mem_cgroup_usage_register_event() 4630 new->size = size; in __mem_cgroup_usage_register_event() 4634 memcpy(new->entries, thresholds->primary->entries, (size - 1) * in __mem_cgroup_usage_register_event() 4639 new->entries[size - 1].eventfd = eventfd; in __mem_cgroup_usage_register_event() 4640 new->entries[size - 1].threshold = threshold; in __mem_cgroup_usage_register_event() 4643 sort(new->entries, size, sizeof(struct mem_cgroup_threshold), in __mem_cgroup_usage_register_event() 4647 new->current_threshold = -1; in __mem_cgroup_usage_register_event() 4649 if (new->entries[i].threshold <= usage) { in __mem_cgroup_usage_register_event() [all …]
|
D | percpu.c | 445 int *old = NULL, *new = NULL; in pcpu_extend_area_map() local 446 size_t old_size = 0, new_size = new_alloc * sizeof(new[0]); in pcpu_extend_area_map() 451 new = pcpu_mem_zalloc(new_size); in pcpu_extend_area_map() 452 if (!new) in pcpu_extend_area_map() 464 memcpy(new, old, old_size); in pcpu_extend_area_map() 467 chunk->map = new; in pcpu_extend_area_map() 468 new = NULL; in pcpu_extend_area_map() 478 pcpu_mem_free(new, new_size); in pcpu_extend_area_map()
|
D | page_cgroup.c | 411 unsigned short old, unsigned short new) in swap_cgroup_cmpxchg() argument 423 sc->id = new; in swap_cgroup_cmpxchg()
|
D | internal.h | 278 static inline void mlock_migrate_page(struct page *new, struct page *old) { } in mlock_migrate_page() argument
|
D | Kconfig | 322 deprecated interface virt_to_bus(). All new architectures 556 This is marked experimental because it is a new feature (as of
|