/mm/ |
D | interval_tree.c | 34 struct vm_area_struct *parent; in vma_interval_tree_insert_after() local 40 parent = prev; in vma_interval_tree_insert_after() 43 parent = rb_entry(prev->shared.linear.rb.rb_right, in vma_interval_tree_insert_after() 45 if (parent->shared.linear.rb_subtree_last < last) in vma_interval_tree_insert_after() 46 parent->shared.linear.rb_subtree_last = last; in vma_interval_tree_insert_after() 47 while (parent->shared.linear.rb.rb_left) { in vma_interval_tree_insert_after() 48 parent = rb_entry(parent->shared.linear.rb.rb_left, in vma_interval_tree_insert_after() 50 if (parent->shared.linear.rb_subtree_last < last) in vma_interval_tree_insert_after() 51 parent->shared.linear.rb_subtree_last = last; in vma_interval_tree_insert_after() 53 link = &parent->shared.linear.rb.rb_left; in vma_interval_tree_insert_after() [all …]
|
D | hugetlb_cgroup.c | 55 return hugetlb_cgroup_from_css(h_cg->css.parent); in parent_hugetlb_cgroup() 115 struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg); in hugetlb_cgroup_move_parent() local 127 if (!parent) { in hugetlb_cgroup_move_parent() 128 parent = root_h_cgroup; in hugetlb_cgroup_move_parent() 130 res_counter_charge_nofail(&parent->hugepage[idx], in hugetlb_cgroup_move_parent() 134 res_counter_uncharge_until(counter, counter->parent, csize); in hugetlb_cgroup_move_parent() 136 set_hugetlb_cgroup(page, parent); in hugetlb_cgroup_move_parent()
|
D | ksm.c | 1157 struct rb_node *parent; in stable_tree_search() local 1172 parent = NULL; in stable_tree_search() 1187 parent = *new; in stable_tree_search() 1189 new = &parent->rb_left; in stable_tree_search() 1191 new = &parent->rb_right; in stable_tree_search() 1225 rb_link_node(&page_node->node, parent, new); in stable_tree_search() 1258 struct rb_node *parent = NULL; in stable_tree_insert() local 1279 parent = *new; in stable_tree_insert() 1281 new = &parent->rb_left; in stable_tree_insert() 1283 new = &parent->rb_right; in stable_tree_insert() [all …]
|
D | nommu.c | 588 struct rb_node **p, *parent; in add_nommu_region() local 592 parent = NULL; in add_nommu_region() 595 parent = *p; in add_nommu_region() 596 pregion = rb_entry(parent, struct vm_region, vm_rb); in add_nommu_region() 607 rb_link_node(®ion->vm_rb, parent, p); in add_nommu_region() 710 struct rb_node **p, *parent, *rb_prev; in add_vma_to_mm() local 733 parent = rb_prev = NULL; in add_vma_to_mm() 736 parent = *p; in add_vma_to_mm() 737 pvma = rb_entry(parent, struct vm_area_struct, vm_rb); in add_vma_to_mm() 744 rb_prev = parent; in add_vma_to_mm() [all …]
|
D | memcontrol.c | 712 struct rb_node *parent = NULL; in __mem_cgroup_insert_exceeded() local 722 parent = *p; in __mem_cgroup_insert_exceeded() 723 mz_node = rb_entry(parent, struct mem_cgroup_per_zone, in __mem_cgroup_insert_exceeded() 734 rb_link_node(&mz->tree_node, parent, p); in __mem_cgroup_insert_exceeded() 1518 if (mem_cgroup_disabled() || !memcg->css.parent) in mem_cgroup_swappiness() 2636 res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes); in __mem_cgroup_cancel_local_charge() 2639 memcg->memsw.parent, bytes); in __mem_cgroup_cancel_local_charge() 3459 struct mem_cgroup *parent; in mem_cgroup_move_parent() local 3474 parent = parent_mem_cgroup(child); in mem_cgroup_move_parent() 3478 if (!parent) in mem_cgroup_move_parent() [all …]
|
D | kmemleak.c | 522 struct kmemleak_object *object, *parent; in create_object() local 574 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node); in create_object() 575 if (ptr + size <= parent->pointer) in create_object() 576 link = &parent->rb_node.rb_left; in create_object() 577 else if (parent->pointer + parent->size <= ptr) in create_object() 578 link = &parent->rb_node.rb_right; in create_object() 584 object = parent; in create_object()
|
D | zswap.c | 263 struct rb_node **link = &root->rb_node, *parent = NULL; in zswap_rb_insert() local 267 parent = *link; in zswap_rb_insert() 268 myentry = rb_entry(parent, struct zswap_entry, rbnode); in zswap_rb_insert() 278 rb_link_node(&entry->rbnode, parent, link); in zswap_rb_insert()
|
D | slab.c | 257 static void kmem_cache_node_init(struct kmem_cache_node *parent) in kmem_cache_node_init() argument 259 INIT_LIST_HEAD(&parent->slabs_full); in kmem_cache_node_init() 260 INIT_LIST_HEAD(&parent->slabs_partial); in kmem_cache_node_init() 261 INIT_LIST_HEAD(&parent->slabs_free); in kmem_cache_node_init() 262 parent->shared = NULL; in kmem_cache_node_init() 263 parent->alien = NULL; in kmem_cache_node_init() 264 parent->colour_next = 0; in kmem_cache_node_init() 265 spin_lock_init(&parent->list_lock); in kmem_cache_node_init() 266 parent->free_objects = 0; in kmem_cache_node_init() 267 parent->free_touched = 0; in kmem_cache_node_init()
|
D | rmap.c | 76 anon_vma->parent = anon_vma; in anon_vma_alloc() 344 anon_vma->parent = pvma->anon_vma; in anon_vma_fork() 355 anon_vma->parent->degree++; in anon_vma_fork() 387 anon_vma->parent->degree--; in unlink_anon_vmas()
|
D | backing-dev.c | 323 int bdi_register(struct backing_dev_info *bdi, struct device *parent, in bdi_register() argument 333 dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args); in bdi_register()
|
D | vmalloc.c | 309 struct rb_node *parent = NULL; in __insert_vmap_area() local 315 parent = *p; in __insert_vmap_area() 316 tmp_va = rb_entry(parent, struct vmap_area, rb_node); in __insert_vmap_area() 325 rb_link_node(&va->rb_node, parent, p); in __insert_vmap_area()
|
D | mempolicy.c | 2204 struct rb_node *parent = NULL; in sp_insert() local 2208 parent = *p; in sp_insert() 2209 nd = rb_entry(parent, struct sp_node, nd); in sp_insert() 2217 rb_link_node(&new->nd, parent, p); in sp_insert()
|
D | hugetlb.c | 1950 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, in hugetlb_sysfs_add_hstate() argument 1957 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); in hugetlb_sysfs_add_hstate()
|
D | shmem.c | 2708 struct inode *parent) in shmem_encode_fh() argument
|