/mm/ |
D | interval_tree.c | 30 struct rb_root_cached *root) in vma_interval_tree_insert_after() argument 57 rb_insert_augmented(&node->shared.rb, &root->rb_root, in vma_interval_tree_insert_after() 76 struct rb_root_cached *root) in INTERVAL_TREE_DEFINE() 82 __anon_vma_interval_tree_insert(node, root); in INTERVAL_TREE_DEFINE() 86 struct rb_root_cached *root) in anon_vma_interval_tree_remove() argument 88 __anon_vma_interval_tree_remove(node, root); in anon_vma_interval_tree_remove() 92 anon_vma_interval_tree_iter_first(struct rb_root_cached *root, in anon_vma_interval_tree_iter_first() argument 95 return __anon_vma_interval_tree_iter_first(root, first, last); in anon_vma_interval_tree_iter_first()
|
D | rmap.c | 103 anon_vma->root = anon_vma; in anon_vma_alloc() 131 if (rwsem_is_locked(&anon_vma->root->rwsem)) { in anon_vma_free() 243 static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) in lock_anon_vma_root() argument 245 struct anon_vma *new_root = anon_vma->root; in lock_anon_vma_root() 246 if (new_root != root) { in lock_anon_vma_root() 247 if (WARN_ON_ONCE(root)) in lock_anon_vma_root() 248 up_write(&root->rwsem); in lock_anon_vma_root() 249 root = new_root; in lock_anon_vma_root() 250 down_write(&root->rwsem); in lock_anon_vma_root() 252 return root; in lock_anon_vma_root() [all …]
|
D | ksm.c | 588 struct rb_root *root) in alloc_stable_node_chain() argument 606 rb_replace_node(&dup->node, &chain->node, root); in alloc_stable_node_chain() 621 struct rb_root *root) in free_stable_node_chain() argument 623 rb_erase(&chain->node, root); in free_stable_node_chain() 909 struct rb_root *root) in remove_stable_node_chain() argument 929 free_stable_node_chain(stable_node, root); in remove_stable_node_chain() 1351 struct rb_root *root, in stable_node_dup() argument 1424 root); in stable_node_dup() 1469 struct rb_root *root) in stable_node_dup_any() argument 1474 free_stable_node_chain(stable_node, root); in stable_node_dup_any() [all …]
|
D | cleancache.c | 306 struct dentry *root = debugfs_create_dir("cleancache", NULL); in init_cleancache() local 308 debugfs_create_u64("succ_gets", 0444, root, &cleancache_succ_gets); in init_cleancache() 309 debugfs_create_u64("failed_gets", 0444, root, &cleancache_failed_gets); in init_cleancache() 310 debugfs_create_u64("puts", 0444, root, &cleancache_puts); in init_cleancache() 311 debugfs_create_u64("invalidates", 0444, root, &cleancache_invalidates); in init_cleancache()
|
D | frontswap.c | 489 struct dentry *root = debugfs_create_dir("frontswap", NULL); in init_frontswap() local 490 if (root == NULL) in init_frontswap() 492 debugfs_create_u64("loads", 0444, root, &frontswap_loads); in init_frontswap() 493 debugfs_create_u64("succ_stores", 0444, root, &frontswap_succ_stores); in init_frontswap() 494 debugfs_create_u64("failed_stores", 0444, root, in init_frontswap() 496 debugfs_create_u64("invalidates", 0444, root, &frontswap_invalidates); in init_frontswap()
|
D | mmap.c | 336 struct rb_root *root = &mm->mm_rb; in browse_rb() local 341 for (nd = rb_first(root); nd; nd = rb_next(nd)) { in browse_rb() 382 static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore) in validate_mm_rb() argument 386 for (nd = rb_first(root); nd; nd = rb_next(nd)) { in validate_mm_rb() 435 #define validate_mm_rb(root, ignore) do { } while (0) argument 458 struct rb_root *root) in vma_rb_insert() argument 461 validate_mm_rb(root, NULL); in vma_rb_insert() 463 rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks); in vma_rb_insert() 466 static void __vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root) in __vma_rb_erase() argument 473 rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks); in __vma_rb_erase() [all …]
|
D | vmalloc.c | 864 struct rb_root *root, struct rb_node *from, in find_va_links() argument 870 if (root) { in find_va_links() 871 link = &root->rb_node; in find_va_links() 930 link_va(struct vmap_area *va, struct rb_root *root, in link_va() argument 945 if (root == &free_vmap_area_root) { in link_va() 958 root, &free_vmap_area_rb_augment_cb); in link_va() 961 rb_insert_color(&va->rb_node, root); in link_va() 969 unlink_va(struct vmap_area *va, struct rb_root *root) in unlink_va() argument 974 if (root == &free_vmap_area_root) in unlink_va() 976 root, &free_vmap_area_rb_augment_cb); in unlink_va() [all …]
|
D | zswap.c | 294 static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset) in zswap_rb_search() argument 296 struct rb_node *node = root->rb_node; in zswap_rb_search() 315 static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry, in zswap_rb_insert() argument 318 struct rb_node **link = &root->rb_node, *parent = NULL; in zswap_rb_insert() 334 rb_insert_color(&entry->rbnode, root); in zswap_rb_insert() 338 static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry) in zswap_rb_erase() argument 341 rb_erase(&entry->rbnode, root); in zswap_rb_erase() 385 static struct zswap_entry *zswap_entry_find_get(struct rb_root *root, in zswap_entry_find_get() argument 390 entry = zswap_rb_search(root, offset); in zswap_entry_find_get()
|
D | sparse.c | 84 unsigned long root = SECTION_NR_TO_ROOT(section_nr); in sparse_index_init() local 94 if (mem_section[root]) in sparse_index_init() 101 mem_section[root] = section; in sparse_index_init()
|
D | mempolicy.c | 2282 struct rb_node *n = sp->root.rb_node; in sp_lookup() 2315 struct rb_node **p = &sp->root.rb_node; in sp_insert() 2330 rb_insert_color(&new->nd, &sp->root); in sp_insert() 2342 if (!sp->root.rb_node) in mpol_shared_policy_lookup() 2470 rb_erase(&n->nd, &sp->root); in sp_delete() 2584 sp->root = RB_ROOT; /* empty tree == default mempolicy */ in mpol_shared_policy_init() 2649 if (!p->root.rb_node) in mpol_free_shared_policy() 2652 next = rb_first(&p->root); in mpol_free_shared_policy()
|
D | memcontrol.c | 230 #define for_each_mem_cgroup_tree(iter, root) \ argument 231 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 233 iter = mem_cgroup_iter(root, iter, NULL)) 1040 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, in mem_cgroup_iter() argument 1052 if (!root) in mem_cgroup_iter() 1053 root = root_mem_cgroup; in mem_cgroup_iter() 1063 mz = root->nodeinfo[reclaim->pgdat->node_id]; in mem_cgroup_iter() 1089 css = css_next_descendant_pre(css, &root->css); in mem_cgroup_iter() 1109 if (css == &root->css) in mem_cgroup_iter() 1137 if (prev && prev != root) in mem_cgroup_iter() [all …]
|
D | memblock.c | 2143 struct dentry *root = debugfs_create_dir("memblock", NULL); in memblock_init_debugfs() local 2145 debugfs_create_file("memory", 0444, root, in memblock_init_debugfs() 2147 debugfs_create_file("reserved", 0444, root, in memblock_init_debugfs() 2150 debugfs_create_file("physmem", 0444, root, &physmem, in memblock_init_debugfs()
|
D | shmem.c | 3554 struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb); in shmem_reconfigure() 3616 static int shmem_show_options(struct seq_file *seq, struct dentry *root) in shmem_show_options() argument 3618 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); in shmem_show_options()
|
D | memory.c | 3541 static inline void unmap_mapping_range_tree(struct rb_root_cached *root, in unmap_mapping_range_tree() argument 3547 vma_interval_tree_foreach(vma, root, in unmap_mapping_range_tree()
|
D | Kconfig | 310 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
|
/mm/damon/ |
D | dbgfs.c | 704 struct dentry *root, **new_dirs, *new_dir; in dbgfs_mk_context() local 722 root = dbgfs_dirs[0]; in dbgfs_mk_context() 723 if (!root) in dbgfs_mk_context() 726 new_dir = debugfs_create_dir(name, root); in dbgfs_mk_context() 790 struct dentry *root, *dir, **new_dirs; in dbgfs_rm_context() local 799 root = dbgfs_dirs[0]; in dbgfs_rm_context() 800 if (!root) in dbgfs_rm_context() 803 dir = debugfs_lookup(name, root); in dbgfs_rm_context()
|