/mm/ |
D | interval_tree.c | 29 void vma_interval_tree_insert_after(struct vm_area_struct *node, in vma_interval_tree_insert_after() argument 35 unsigned long last = vma_last_pgoff(node); in vma_interval_tree_insert_after() 37 VM_BUG_ON_VMA(vma_start_pgoff(node) != vma_start_pgoff(prev), node); in vma_interval_tree_insert_after() 56 node->shared.linear.rb_subtree_last = last; in vma_interval_tree_insert_after() 57 rb_link_node(&node->shared.linear.rb, &parent->shared.linear.rb, link); in vma_interval_tree_insert_after() 58 rb_insert_augmented(&node->shared.linear.rb, root, in vma_interval_tree_insert_after() 76 void anon_vma_interval_tree_insert(struct anon_vma_chain *node, in INTERVAL_TREE_DEFINE() 80 node->cached_vma_start = avc_start_pgoff(node); in INTERVAL_TREE_DEFINE() 81 node->cached_vma_last = avc_last_pgoff(node); in INTERVAL_TREE_DEFINE() 83 __anon_vma_interval_tree_insert(node, root); in INTERVAL_TREE_DEFINE() [all …]
|
D | sparse-vmemmap.c | 38 static void * __init_refok __earlyonly_bootmem_alloc(int node, in __earlyonly_bootmem_alloc() argument 44 BOOTMEM_ALLOC_ACCESSIBLE, node); in __earlyonly_bootmem_alloc() 50 void * __meminit vmemmap_alloc_block(unsigned long size, int node) in vmemmap_alloc_block() argument 56 if (node_state(node, N_HIGH_MEMORY)) in vmemmap_alloc_block() 58 node, GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT, in vmemmap_alloc_block() 68 return __earlyonly_bootmem_alloc(node, size, size, in vmemmap_alloc_block() 73 void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node) in vmemmap_alloc_block_buf() argument 78 return vmemmap_alloc_block(size, node); in vmemmap_alloc_block_buf() 83 return vmemmap_alloc_block(size, node); in vmemmap_alloc_block_buf() 90 void __meminit vmemmap_verify(pte_t *pte, int node, in vmemmap_verify() argument [all …]
|
D | slab.c | 248 int node, struct list_head *list); 612 int node; in init_reap_node() local 614 node = next_node(cpu_to_mem(cpu), node_online_map); in init_reap_node() 615 if (node == MAX_NUMNODES) in init_reap_node() 616 node = first_node(node_online_map); in init_reap_node() 618 per_cpu(slab_reap_node, cpu) = node; in init_reap_node() 623 int node = __this_cpu_read(slab_reap_node); in next_reap_node() local 625 node = next_node(node, node_online_map); in next_reap_node() 626 if (unlikely(node >= MAX_NUMNODES)) in next_reap_node() 627 node = first_node(node_online_map); in next_reap_node() [all …]
|
D | workingset.c | 309 struct radix_tree_node *node; in shadow_lru_isolate() local 325 node = container_of(item, struct radix_tree_node, private_list); in shadow_lru_isolate() 326 mapping = node->private_data; in shadow_lru_isolate() 344 BUG_ON(!node->count); in shadow_lru_isolate() 345 BUG_ON(node->count & RADIX_TREE_COUNT_MASK); in shadow_lru_isolate() 348 if (node->slots[i]) { in shadow_lru_isolate() 349 BUG_ON(!radix_tree_exceptional_entry(node->slots[i])); in shadow_lru_isolate() 350 node->slots[i] = NULL; in shadow_lru_isolate() 351 BUG_ON(node->count < (1U << RADIX_TREE_COUNT_SHIFT)); in shadow_lru_isolate() 352 node->count -= 1U << RADIX_TREE_COUNT_SHIFT; in shadow_lru_isolate() [all …]
|
D | slob.c | 190 static void *slob_new_pages(gfp_t gfp, int order, int node) in slob_new_pages() argument 195 if (node != NUMA_NO_NODE) in slob_new_pages() 196 page = alloc_pages_exact_node(node, gfp, order); in slob_new_pages() 268 static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) in slob_alloc() argument 291 if (node != NUMA_NO_NODE && page_to_nid(sp) != node) in slob_alloc() 316 b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); in slob_alloc() 427 __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) in __do_kmalloc_node() argument 441 m = slob_alloc(size + align, gfp, align, node); in __do_kmalloc_node() 449 size, size + align, gfp, node); in __do_kmalloc_node() 455 ret = slob_new_pages(gfp, order, node); in __do_kmalloc_node() [all …]
|
D | list_lru.c | 16 struct list_lru_node *nlru = &lru->node[nid]; in list_lru_add() 35 struct list_lru_node *nlru = &lru->node[nid]; in list_lru_del() 55 struct list_lru_node *nlru = &lru->node[nid]; in list_lru_count_node() 71 struct list_lru_node *nlru = &lru->node[nid]; in list_lru_walk_node() 130 size_t size = sizeof(*lru->node) * nr_node_ids; in list_lru_init_key() 132 lru->node = kzalloc(size, GFP_KERNEL); in list_lru_init_key() 133 if (!lru->node) in list_lru_init_key() 138 spin_lock_init(&lru->node[i].lock); in list_lru_init_key() 140 lockdep_set_class(&lru->node[i].lock, key); in list_lru_init_key() 141 INIT_LIST_HEAD(&lru->node[i].list); in list_lru_init_key() [all …]
|
D | slub.c | 996 static inline unsigned long slabs_node(struct kmem_cache *s, int node) in slabs_node() argument 998 struct kmem_cache_node *n = get_node(s, node); in slabs_node() 1008 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) in inc_slabs_node() argument 1010 struct kmem_cache_node *n = get_node(s, node); in inc_slabs_node() 1023 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) in dec_slabs_node() argument 1025 struct kmem_cache_node *n = get_node(s, node); in dec_slabs_node() 1248 static inline unsigned long slabs_node(struct kmem_cache *s, int node) in slabs_node() argument 1252 static inline void inc_slabs_node(struct kmem_cache *s, int node, in inc_slabs_node() argument 1254 static inline void dec_slabs_node(struct kmem_cache *s, int node, in dec_slabs_node() argument 1317 gfp_t flags, int node, struct kmem_cache_order_objects oo) in alloc_slab_page() argument [all …]
|
D | migrate.c | 1184 int node; member 1193 while (pm->node != MAX_NUMNODES && pm->page != p) in new_page_node() 1196 if (pm->node == MAX_NUMNODES) in new_page_node() 1203 pm->node); in new_page_node() 1205 return alloc_pages_exact_node(pm->node, in new_page_node() 1228 for (pp = pm; pp->node != MAX_NUMNODES; pp++) { in do_move_page_to_node_array() 1254 if (err == pp->node) in do_move_page_to_node_array() 1339 int node; in do_pages_move() local 1346 if (get_user(node, nodes + j + chunk_start)) in do_pages_move() 1350 if (node < 0 || node >= MAX_NUMNODES) in do_pages_move() [all …]
|
D | ksm.c | 136 struct rb_node node; /* when node of stable tree */ member 173 struct rb_node node; /* when node of unstable tree */ member 513 rb_erase(&stable_node->node, in remove_node_from_stable_tree() 649 rb_erase(&rmap_item->node, in remove_rmap_item_from_tree() 751 struct stable_node, node); in remove_all_stable_nodes() 1179 stable_node = rb_entry(*new, struct stable_node, node); in stable_tree_search() 1225 rb_link_node(&page_node->node, parent, new); in stable_tree_search() 1226 rb_insert_color(&page_node->node, root); in stable_tree_search() 1234 rb_replace_node(&stable_node->node, &page_node->node, root); in stable_tree_search() 1237 rb_erase(&stable_node->node, root); in stable_tree_search() [all …]
|
D | hugetlb.c | 670 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \ argument 673 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \ 676 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \ argument 679 ((node = hstate_next_node_to_free(hs, mask)) || 1); \ 799 int nr_nodes, node; in alloc_fresh_gigantic_page() local 801 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { in alloc_fresh_gigantic_page() 802 page = alloc_fresh_gigantic_page_node(h, node); in alloc_fresh_gigantic_page() 1031 int nr_nodes, node; in alloc_fresh_huge_page() local 1034 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { in alloc_fresh_huge_page() 1035 page = alloc_fresh_huge_page_node(h, node); in alloc_fresh_huge_page() [all …]
|
D | vmalloc.c | 347 int node, gfp_t gfp_mask) in alloc_vmap_area() argument 360 gfp_mask & GFP_RECLAIM_MASK, node); in alloc_vmap_area() 801 int node, err; in new_vmap_block() local 803 node = numa_node_id(); in new_vmap_block() 806 gfp_mask & GFP_RECLAIM_MASK, node); in new_vmap_block() 812 node, gfp_mask); in new_vmap_block() 1096 void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) in vm_map_ram() argument 1110 VMALLOC_START, VMALLOC_END, node, GFP_KERNEL); in vm_map_ram() 1311 unsigned long end, int node, gfp_t gfp_mask, const void *caller) in __get_vm_area_node() argument 1324 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); in __get_vm_area_node() [all …]
|
D | vmpressure.c | 136 struct list_head node; member 150 list_for_each_entry(ev, &vmpr->events, node) { in vmpressure_event() 319 list_add(&ev->node, &vmpr->events); in vmpressure_register_event() 343 list_for_each_entry(ev, &vmpr->events, node) { in vmpressure_unregister_event() 346 list_del(&ev->node); in vmpressure_unregister_event()
|
D | truncate.c | 29 struct radix_tree_node *node; in clear_exceptional_entry() local 42 if (!__radix_tree_lookup(&mapping->page_tree, index, &node, &slot)) in clear_exceptional_entry() 48 if (!node) in clear_exceptional_entry() 50 workingset_node_shadows_dec(node); in clear_exceptional_entry() 58 if (!workingset_node_shadows(node) && in clear_exceptional_entry() 59 !list_empty(&node->private_list)) in clear_exceptional_entry() 60 list_lru_del(&workingset_shadow_nodes, &node->private_list); in clear_exceptional_entry() 61 __radix_tree_delete_node(&mapping->page_tree, node); in clear_exceptional_entry()
|
D | filemap.c | 115 struct radix_tree_node *node; in page_cache_tree_delete() local 123 __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot); in page_cache_tree_delete() 137 if (!node) { in page_cache_tree_delete() 148 if (test_bit(offset, node->tags[tag])) in page_cache_tree_delete() 154 workingset_node_pages_dec(node); in page_cache_tree_delete() 156 workingset_node_shadows_inc(node); in page_cache_tree_delete() 158 if (__radix_tree_delete_node(&mapping->page_tree, node)) in page_cache_tree_delete() 168 if (!workingset_node_pages(node) && in page_cache_tree_delete() 169 list_empty(&node->private_list)) { in page_cache_tree_delete() 170 node->private_data = mapping; in page_cache_tree_delete() [all …]
|
D | memory_hotplug.c | 248 int node = pgdat->node_id; in register_page_bootmem_info_node() local 256 get_page_bootmem(node, page, NODE_INFO); in register_page_bootmem_info_node() 267 get_page_bootmem(node, page, NODE_INFO); in register_page_bootmem_info_node() 282 if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node)) in register_page_bootmem_info_node() 950 static void node_states_set_node(int node, struct memory_notify *arg) in node_states_set_node() argument 953 node_set_state(node, N_NORMAL_MEMORY); in node_states_set_node() 956 node_set_state(node, N_HIGH_MEMORY); in node_states_set_node() 958 node_set_state(node, N_MEMORY); in node_states_set_node() 1657 static void node_states_clear_node(int node, struct memory_notify *arg) in node_states_clear_node() argument 1660 node_clear_state(node, N_NORMAL_MEMORY); in node_states_clear_node() [all …]
|
D | mempolicy.c | 129 int node; in get_task_policy() local 134 node = numa_node_id(); in get_task_policy() 135 if (node != NUMA_NO_NODE) { in get_task_policy() 136 pol = &preferred_node_policy[node]; in get_task_policy() 371 int node = first_node(pol->w.user_nodemask); in mpol_rebind_preferred() local 373 if (node_isset(node, *nodes)) { in mpol_rebind_preferred() 374 pol->v.preferred_node = node; in mpol_rebind_preferred() 984 static struct page *new_node_page(struct page *page, unsigned long node, int **x) in new_node_page() argument 988 node); in new_node_page() 990 return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0); in new_node_page() [all …]
|
D | page_alloc.c | 1916 return local_zone->node == zone->node; in zone_local() 3544 static int find_next_best_node(int node, nodemask_t *used_node_mask) in find_next_best_node() argument 3552 if (!node_isset(node, *used_node_mask)) { in find_next_best_node() 3553 node_set(node, *used_node_mask); in find_next_best_node() 3554 return node; in find_next_best_node() 3564 val = node_distance(node, n); in find_next_best_node() 3567 val += (n < node); in find_next_best_node() 3596 static void build_zonelists_in_node_order(pg_data_t *pgdat, int node) in build_zonelists_in_node_order() argument 3604 j = build_zonelists_node(NODE_DATA(node), zonelist, j); in build_zonelists_in_node_order() 3633 int pos, j, node; in build_zonelists_in_zone_order() local [all …]
|
D | quicklist.c | 29 int node = numa_node_id(); in max_pages() local 30 struct zone *zones = NODE_DATA(node)->node_zones; in max_pages() 44 num_cpus_on_node = cpumask_weight(cpumask_of_node(node)); in max_pages()
|
D | page_isolation.c | 302 int node = next_online_node(page_to_nid(page)); in alloc_migrate_target() local 303 if (node == MAX_NUMNODES) in alloc_migrate_target() 304 node = first_online_node; in alloc_migrate_target() 306 node); in alloc_migrate_target()
|
D | zswap.c | 241 struct rb_node *node = root->rb_node; in zswap_rb_search() local 244 while (node) { in zswap_rb_search() 245 entry = rb_entry(node, struct zswap_entry, rbnode); in zswap_rb_search() 247 node = node->rb_left; in zswap_rb_search() 249 node = node->rb_right; in zswap_rb_search()
|
D | memcontrol.c | 1863 int node; in mem_cgroup_select_victim_node() local 1866 node = memcg->last_scanned_node; in mem_cgroup_select_victim_node() 1868 node = next_node(node, memcg->scan_nodes); in mem_cgroup_select_victim_node() 1869 if (node == MAX_NUMNODES) in mem_cgroup_select_victim_node() 1870 node = first_node(memcg->scan_nodes); in mem_cgroup_select_victim_node() 1877 if (unlikely(node == MAX_NUMNODES)) in mem_cgroup_select_victim_node() 1878 node = numa_node_id(); in mem_cgroup_select_victim_node() 1880 memcg->last_scanned_node = node; in mem_cgroup_select_victim_node() 1881 return node; in mem_cgroup_select_victim_node() 3807 int node, int zid, enum lru_list lru) in mem_cgroup_force_empty_list() argument [all …]
|
D | vmstat.c | 585 if (z->node == ((flags & __GFP_OTHER_NODE) ? in zone_statistics() 586 preferred_zone->node : numa_node_id())) in zone_statistics() 693 loff_t node = *pos; in frag_start() local 695 pgdat && node; in frag_start() 697 --node; in frag_start() 1381 static void vmstat_cpu_dead(int node) in vmstat_cpu_dead() argument 1387 if (cpu_to_node(cpu) == node) in vmstat_cpu_dead() 1390 node_clear_state(node, N_CPU); in vmstat_cpu_dead()
|
D | slab.h | 345 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) in get_node() argument 347 return s->node[node]; in get_node()
|
D | kmemcheck.c | 8 void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) in kmemcheck_alloc_shadow() argument 20 shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order); in kmemcheck_alloc_shadow()
|
D | compaction.c | 1540 int compaction_register_node(struct node *node) in compaction_register_node() argument 1542 return device_create_file(&node->dev, &dev_attr_compact); in compaction_register_node() 1545 void compaction_unregister_node(struct node *node) in compaction_unregister_node() argument 1547 return device_remove_file(&node->dev, &dev_attr_compact); in compaction_unregister_node()
|