/mm/ |
D | rmap.c | 81 anon_vma->root = anon_vma; in anon_vma_alloc() 109 if (rwsem_is_locked(&anon_vma->root->rwsem)) { in anon_vma_free() 222 static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) in lock_anon_vma_root() argument 224 struct anon_vma *new_root = anon_vma->root; in lock_anon_vma_root() 225 if (new_root != root) { in lock_anon_vma_root() 226 if (WARN_ON_ONCE(root)) in lock_anon_vma_root() 227 up_write(&root->rwsem); in lock_anon_vma_root() 228 root = new_root; in lock_anon_vma_root() 229 down_write(&root->rwsem); in lock_anon_vma_root() 231 return root; in lock_anon_vma_root() [all …]
|
D | interval_tree.c | 31 struct rb_root *root) in vma_interval_tree_insert_after() argument 58 rb_insert_augmented(&node->shared.linear.rb, root, in vma_interval_tree_insert_after() 77 struct rb_root *root) in INTERVAL_TREE_DEFINE() 83 __anon_vma_interval_tree_insert(node, root); in INTERVAL_TREE_DEFINE() 87 struct rb_root *root) in anon_vma_interval_tree_remove() argument 89 __anon_vma_interval_tree_remove(node, root); in anon_vma_interval_tree_remove() 93 anon_vma_interval_tree_iter_first(struct rb_root *root, in anon_vma_interval_tree_iter_first() argument 96 return __anon_vma_interval_tree_iter_first(root, first, last); in anon_vma_interval_tree_iter_first()
|
D | frontswap.c | 444 struct dentry *root = debugfs_create_dir("frontswap", NULL); in init_frontswap() local 445 if (root == NULL) in init_frontswap() 447 debugfs_create_u64("loads", S_IRUGO, root, &frontswap_loads); in init_frontswap() 448 debugfs_create_u64("succ_stores", S_IRUGO, root, &frontswap_succ_stores); in init_frontswap() 449 debugfs_create_u64("failed_stores", S_IRUGO, root, in init_frontswap() 452 root, &frontswap_invalidates); in init_frontswap()
|
D | cleancache.c | 393 struct dentry *root = debugfs_create_dir("cleancache", NULL); in init_cleancache() local 394 if (root == NULL) in init_cleancache() 396 debugfs_create_u64("succ_gets", S_IRUGO, root, &cleancache_succ_gets); in init_cleancache() 398 root, &cleancache_failed_gets); in init_cleancache() 399 debugfs_create_u64("puts", S_IRUGO, root, &cleancache_puts); in init_cleancache() 401 root, &cleancache_invalidates); in init_cleancache()
|
D | sparse.c | 82 unsigned long root = SECTION_NR_TO_ROOT(section_nr); in sparse_index_init() local 85 if (mem_section[root]) in sparse_index_init() 92 mem_section[root] = section; in sparse_index_init() 111 struct mem_section* root; in __section_nr() local 114 root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); in __section_nr() 115 if (!root) in __section_nr() 118 if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT))) in __section_nr() 124 return (root_nr * SECTIONS_PER_ROOT) + (ms - root); in __section_nr()
|
D | zswap.c | 239 static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset) in zswap_rb_search() argument 241 struct rb_node *node = root->rb_node; in zswap_rb_search() 260 static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry, in zswap_rb_insert() argument 263 struct rb_node **link = &root->rb_node, *parent = NULL; in zswap_rb_insert() 279 rb_insert_color(&entry->rbnode, root); in zswap_rb_insert() 283 static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry) in zswap_rb_erase() argument 286 rb_erase(&entry->rbnode, root); in zswap_rb_erase() 325 static struct zswap_entry *zswap_entry_find_get(struct rb_root *root, in zswap_entry_find_get() argument 330 entry = zswap_rb_search(root, offset); in zswap_entry_find_get()
|
D | mmap.c | 406 static int browse_rb(struct rb_root *root) in browse_rb() argument 412 for (nd = rb_first(root); nd; nd = rb_next(nd)) { in browse_rb() 451 static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore) in validate_mm_rb() argument 455 for (nd = rb_first(root); nd; nd = rb_next(nd)) { in validate_mm_rb() 504 #define validate_mm_rb(root, ignore) do { } while (0) argument 526 struct rb_root *root) in vma_rb_insert() argument 529 validate_mm_rb(root, NULL); in vma_rb_insert() 531 rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks); in vma_rb_insert() 534 static void vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root) in vma_rb_erase() argument 540 validate_mm_rb(root, vma); in vma_rb_erase() [all …]
|
D | ksm.c | 1155 struct rb_root *root; in stable_tree_search() local 1169 root = root_stable_tree + nid; in stable_tree_search() 1171 new = &root->rb_node; in stable_tree_search() 1226 rb_insert_color(&page_node->node, root); in stable_tree_search() 1234 rb_replace_node(&stable_node->node, &page_node->node, root); in stable_tree_search() 1237 rb_erase(&stable_node->node, root); in stable_tree_search() 1256 struct rb_root *root; in stable_tree_insert() local 1263 root = root_stable_tree + nid; in stable_tree_insert() 1264 new = &root->rb_node; in stable_tree_insert() 1303 rb_insert_color(&stable_node->node, root); in stable_tree_insert() [all …]
|
D | memcontrol.c | 1071 static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root, in __mem_cgroup_iter_next() argument 1078 next_css = css_next_descendant_pre(prev_css, &root->css); in __mem_cgroup_iter_next() 1098 if (next_css == &root->css) in __mem_cgroup_iter_next() 1119 static void mem_cgroup_iter_invalidate(struct mem_cgroup *root) in mem_cgroup_iter_invalidate() argument 1126 atomic_inc(&root->dead_count); in mem_cgroup_iter_invalidate() 1131 struct mem_cgroup *root, in mem_cgroup_iter_load() argument 1143 *sequence = atomic_read(&root->dead_count); in mem_cgroup_iter_load() 1154 if (position && position != root && in mem_cgroup_iter_load() 1164 struct mem_cgroup *root, in mem_cgroup_iter_update() argument 1168 if (last_visited && last_visited != root) in mem_cgroup_iter_update() [all …]
|
D | memblock.c | 1581 struct dentry *root = debugfs_create_dir("memblock", NULL); in memblock_init_debugfs() local 1582 if (!root) in memblock_init_debugfs() 1584 debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops); in memblock_init_debugfs() 1585 debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops); in memblock_init_debugfs() 1587 debugfs_create_file("physmem", S_IRUGO, root, &memblock.physmem, &memblock_debug_fops); in memblock_init_debugfs()
|
D | mempolicy.c | 2172 struct rb_node *n = sp->root.rb_node; in sp_lookup() 2203 struct rb_node **p = &sp->root.rb_node; in sp_insert() 2218 rb_insert_color(&new->nd, &sp->root); in sp_insert() 2230 if (!sp->root.rb_node) in mpol_shared_policy_lookup() 2338 rb_erase(&n->nd, &sp->root); in sp_delete() 2451 sp->root = RB_ROOT; /* empty tree == default mempolicy */ in mpol_shared_policy_init() 2516 if (!p->root.rb_node) in mpol_free_shared_policy() 2519 next = rb_first(&p->root); in mpol_free_shared_policy()
|
D | vmscan.c | 2306 struct mem_cgroup *root = sc->target_mem_cgroup; in shrink_zone() local 2316 memcg = mem_cgroup_iter(root, NULL, &reclaim); in shrink_zone() 2338 mem_cgroup_iter_break(root, memcg); in shrink_zone() 2341 memcg = mem_cgroup_iter(root, memcg, &reclaim); in shrink_zone()
|
D | slab.c | 3744 struct kmem_cache *root = memcg_root_cache(cachep); in enable_cpucache() local 3745 limit = root->limit; in enable_cpucache() 3746 shared = root->shared; in enable_cpucache() 3747 batchcount = root->batchcount; in enable_cpucache()
|
D | shmem.c | 2891 static int shmem_show_options(struct seq_file *seq, struct dentry *root) in shmem_show_options() argument 2893 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); in shmem_show_options()
|
D | memory.c | 2307 static inline void unmap_mapping_range_tree(struct rb_root *root, in unmap_mapping_range_tree() argument 2313 vma_interval_tree_foreach(vma, root, in unmap_mapping_range_tree()
|
D | Kconfig | 341 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
|