/mm/ |
D | interval_tree.c | 28 void vma_interval_tree_insert_after(struct vm_area_struct *node, in vma_interval_tree_insert_after() argument 34 unsigned long last = vma_last_pgoff(node); in vma_interval_tree_insert_after() 36 VM_BUG_ON_VMA(vma_start_pgoff(node) != vma_start_pgoff(prev), node); in vma_interval_tree_insert_after() 55 node->shared.rb_subtree_last = last; in vma_interval_tree_insert_after() 56 rb_link_node(&node->shared.rb, &parent->shared.rb, link); in vma_interval_tree_insert_after() 57 rb_insert_augmented(&node->shared.rb, &root->rb_root, in vma_interval_tree_insert_after() 75 void anon_vma_interval_tree_insert(struct anon_vma_chain *node, in INTERVAL_TREE_DEFINE() 79 node->cached_vma_start = avc_start_pgoff(node); in INTERVAL_TREE_DEFINE() 80 node->cached_vma_last = avc_last_pgoff(node); in INTERVAL_TREE_DEFINE() 82 __anon_vma_interval_tree_insert(node, root); in INTERVAL_TREE_DEFINE() [all …]
|
D | sparse-vmemmap.c | 40 static void * __ref __earlyonly_bootmem_alloc(int node, in __earlyonly_bootmem_alloc() argument 46 MEMBLOCK_ALLOC_ACCESSIBLE, node); in __earlyonly_bootmem_alloc() 49 void * __meminit vmemmap_alloc_block(unsigned long size, int node) in vmemmap_alloc_block() argument 58 page = alloc_pages_node(node, gfp_mask, order); in vmemmap_alloc_block() 69 return __earlyonly_bootmem_alloc(node, size, size, in vmemmap_alloc_block() 74 void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node) in vmemmap_alloc_block_buf() argument 79 ptr = vmemmap_alloc_block(size, node); in vmemmap_alloc_block_buf() 132 void __meminit vmemmap_verify(pte_t *pte, int node, in vmemmap_verify() argument 138 if (node_distance(actual_node, node) > LOCAL_DISTANCE) in vmemmap_verify() 143 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) in vmemmap_pte_populate() argument [all …]
|
D | workingset.c | 368 void workingset_update_node(struct xa_node *node) in workingset_update_node() argument 380 if (node->count && node->count == node->nr_values) { in workingset_update_node() 381 if (list_empty(&node->private_list)) { in workingset_update_node() 382 list_lru_add(&shadow_nodes, &node->private_list); in workingset_update_node() 383 __inc_lruvec_slab_state(node, WORKINGSET_NODES); in workingset_update_node() 386 if (!list_empty(&node->private_list)) { in workingset_update_node() 387 list_lru_del(&shadow_nodes, &node->private_list); in workingset_update_node() 388 __dec_lruvec_slab_state(node, WORKINGSET_NODES); in workingset_update_node() 454 struct xa_node *node = container_of(item, struct xa_node, private_list); in shadow_lru_isolate() local 455 XA_STATE(xas, node->array, 0); in shadow_lru_isolate() [all …]
|
D | slab.c | 212 int node, struct list_head *list); 492 int node = __this_cpu_read(slab_reap_node); in next_reap_node() local 494 node = next_node_in(node, node_online_map); in next_reap_node() 495 __this_cpu_write(slab_reap_node, node); in next_reap_node() 532 static struct array_cache *alloc_arraycache(int node, int entries, in alloc_arraycache() argument 538 ac = kmalloc_node(memsize, gfp, node); in alloc_arraycache() 596 static inline struct alien_cache **alloc_alien_cache(int node, in alloc_alien_cache() argument 633 static struct alien_cache *__alloc_alien_cache(int node, int entries, in __alloc_alien_cache() argument 639 alc = kmalloc_node(memsize, gfp, node); in __alloc_alien_cache() 648 static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) in alloc_alien_cache() argument [all …]
|
D | list_lru.c | 128 struct list_lru_node *nlru = &lru->node[nid]; in list_lru_add() 152 struct list_lru_node *nlru = &lru->node[nid]; in list_lru_del() 187 struct list_lru_node *nlru = &lru->node[nid]; in list_lru_count_one() 204 nlru = &lru->node[nid]; in list_lru_count_node() 272 struct list_lru_node *nlru = &lru->node[nid]; in list_lru_walk_one() 288 struct list_lru_node *nlru = &lru->node[nid]; in list_lru_walk_one_irq() 309 struct list_lru_node *nlru = &lru->node[nid]; in list_lru_walk_node() 458 if (memcg_init_list_lru_node(&lru->node[i])) in memcg_init_list_lru() 464 if (!lru->node[i].memcg_lrus) in memcg_init_list_lru() 466 memcg_destroy_list_lru_node(&lru->node[i]); in memcg_init_list_lru() [all …]
|
D | slob.c | 191 static void *slob_new_pages(gfp_t gfp, int order, int node) in slob_new_pages() argument 196 if (node != NUMA_NO_NODE) in slob_new_pages() 197 page = __alloc_pages_node(node, gfp, order); in slob_new_pages() 301 static void *slob_alloc(size_t size, gfp_t gfp, int align, int node, in slob_alloc() argument 326 if (node != NUMA_NO_NODE && page_to_nid(sp) != node) in slob_alloc() 358 b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); in slob_alloc() 469 __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) in __do_kmalloc_node() argument 493 m = slob_alloc(size + minalign, gfp, align, node, minalign); in __do_kmalloc_node() 501 size, size + minalign, gfp, node); in __do_kmalloc_node() 507 ret = slob_new_pages(gfp, order, node); in __do_kmalloc_node() [all …]
|
D | slub.c | 1031 static inline unsigned long slabs_node(struct kmem_cache *s, int node) in slabs_node() argument 1033 struct kmem_cache_node *n = get_node(s, node); in slabs_node() 1043 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) in inc_slabs_node() argument 1045 struct kmem_cache_node *n = get_node(s, node); in inc_slabs_node() 1058 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) in dec_slabs_node() argument 1060 struct kmem_cache_node *n = get_node(s, node); in dec_slabs_node() 1373 static inline unsigned long slabs_node(struct kmem_cache *s, int node) in slabs_node() argument 1377 static inline void inc_slabs_node(struct kmem_cache *s, int node, in inc_slabs_node() argument 1379 static inline void dec_slabs_node(struct kmem_cache *s, int node, in dec_slabs_node() argument 1489 gfp_t flags, int node, struct kmem_cache_order_objects oo) in alloc_slab_page() argument [all …]
|
D | vmalloc.c | 380 get_subtree_max_size(struct rb_node *node) in get_subtree_max_size() argument 384 va = rb_entry_safe(node, struct vmap_area, rb_node); in get_subtree_max_size() 559 struct rb_node *node; in augment_tree_propagate_check() local 568 node = n; in augment_tree_propagate_check() 570 while (node) { in augment_tree_propagate_check() 571 va = rb_entry(node, struct vmap_area, rb_node); in augment_tree_propagate_check() 573 if (get_subtree_max_size(node->rb_left) == size) { in augment_tree_propagate_check() 574 node = node->rb_left; in augment_tree_propagate_check() 581 node = node->rb_right; in augment_tree_propagate_check() 626 struct rb_node *node = &va->rb_node; in augment_tree_propagate_from() local [all …]
|
D | zswap.c | 127 struct hlist_node node; member 269 struct rb_node *node = root->rb_node; in zswap_rb_search() local 272 while (node) { in zswap_rb_search() 273 entry = rb_entry(node, struct zswap_entry, rbnode); in zswap_rb_search() 275 node = node->rb_left; in zswap_rb_search() 277 node = node->rb_right; in zswap_rb_search() 398 static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node) in zswap_cpu_comp_prepare() argument 400 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); in zswap_cpu_comp_prepare() 416 static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node) in zswap_cpu_comp_dead() argument 418 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); in zswap_cpu_comp_dead() [all …]
|
D | hugetlb.c | 901 int node = NUMA_NO_NODE; in dequeue_huge_page_nodemask() local 916 if (zone_to_nid(zone) == node) in dequeue_huge_page_nodemask() 918 node = zone_to_nid(zone); in dequeue_huge_page_nodemask() 920 page = dequeue_huge_page_node_exact(h, node); in dequeue_huge_page_nodemask() 1037 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \ argument 1040 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \ 1043 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \ argument 1046 ((node = hstate_next_node_to_free(hs, mask)) || 1); \ 1336 struct llist_node *node; in free_hpage_workfn() local 1339 node = llist_del_all(&hpage_freelist); in free_hpage_workfn() [all …]
|
D | ksm.c | 157 struct rb_node node; /* when node of stable tree */ member 207 struct rb_node node; /* when node of unstable tree */ member 371 rb_erase(&dup->node, root_stable_tree + NUMA(dup->nid)); in stable_node_dup_del() 609 rb_replace_node(&dup->node, &chain->node, root); in alloc_stable_node_chain() 626 rb_erase(&chain->node, root); in free_stable_node_chain() 810 rb_erase(&rmap_item->node, in remove_rmap_item_from_tree() 947 struct stable_node, node); in remove_all_stable_nodes() 1428 rb_replace_node(&stable_node->node, &found->node, in stable_node_dup() 1578 stable_node = rb_entry(*new, struct stable_node, node); in stable_tree_search() 1703 rb_link_node(&page_node->node, parent, new); in stable_tree_search() [all …]
|
D | memory_hotplug.c | 228 int node = pgdat->node_id; in register_page_bootmem_info_node() local 235 get_page_bootmem(node, page, NODE_INFO); in register_page_bootmem_info_node() 248 if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node)) in register_page_bootmem_info_node() 657 static void node_states_set_node(int node, struct memory_notify *arg) in node_states_set_node() argument 660 node_set_state(node, N_NORMAL_MEMORY); in node_states_set_node() 663 node_set_state(node, N_HIGH_MEMORY); in node_states_set_node() 666 node_set_state(node, N_MEMORY); in node_states_set_node() 1469 static void node_states_clear_node(int node, struct memory_notify *arg) in node_states_clear_node() argument 1472 node_clear_state(node, N_NORMAL_MEMORY); in node_states_clear_node() 1475 node_clear_state(node, N_HIGH_MEMORY); in node_states_clear_node() [all …]
|
D | khugepaged.c | 781 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) in khugepaged_alloc_page() argument 785 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER); in khugepaged_alloc_page() 845 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) in khugepaged_alloc_page() argument 949 int node, int referenced) in collapse_huge_page() argument 974 new_page = khugepaged_alloc_page(hpage, gfp, node); in collapse_huge_page() 1126 int node = NUMA_NO_NODE, unmapped = 0; in khugepaged_scan_pmd() local 1184 node = page_to_nid(page); in khugepaged_scan_pmd() 1185 if (khugepaged_scan_abort(node)) { in khugepaged_scan_pmd() 1189 khugepaged_node_load[node]++; in khugepaged_scan_pmd() 1230 node = khugepaged_find_target_node(); in khugepaged_scan_pmd() [all …]
|
D | vmpressure.c | 155 struct list_head node; member 166 list_for_each_entry(ev, &vmpr->events, node) { in vmpressure_event() 406 list_add(&ev->node, &vmpr->events); in vmpressure_register_event() 432 list_for_each_entry(ev, &vmpr->events, node) { in vmpressure_unregister_event() 435 list_del(&ev->node); in vmpressure_unregister_event()
|
D | mempolicy.c | 133 int node; in get_task_policy() local 138 node = numa_node_id(); in get_task_policy() 139 if (node != NUMA_NO_NODE) { in get_task_policy() 140 pol = &preferred_node_policy[node]; in get_task_policy() 324 int node = first_node(pol->w.user_nodemask); in mpol_rebind_preferred() local 326 if (node_isset(node, *nodes)) { in mpol_rebind_preferred() 327 pol->v.preferred_node = node; in mpol_rebind_preferred() 1001 struct page *alloc_new_node_page(struct page *page, unsigned long node) in alloc_new_node_page() argument 1005 node); in alloc_new_node_page() 1009 thp = alloc_pages_node(node, in alloc_new_node_page() [all …]
|
D | cma_debug.c | 19 struct hlist_node node; member 74 hlist_add_head(&mem->node, &cma->mem_head); in cma_add_to_cma_mem_list() 84 mem = hlist_entry(cma->mem_head.first, struct cma_mem, node); in cma_get_entry_from_list() 85 hlist_del_init(&mem->node); in cma_get_entry_from_list()
|
D | vmstat.c | 959 unsigned long sum_zone_node_page_state(int node, in sum_zone_node_page_state() argument 962 struct zone *zones = NODE_DATA(node)->node_zones; in sum_zone_node_page_state() 976 unsigned long sum_zone_numa_state(int node, in sum_zone_numa_state() argument 979 struct zone *zones = NODE_DATA(node)->node_zones; in sum_zone_numa_state() 1306 loff_t node = *pos; in frag_start() local 1309 pgdat && node; in frag_start() 1311 --node; in frag_start() 1928 int node; in init_cpu_node_state() local 1930 for_each_online_node(node) { in init_cpu_node_state() 1931 if (cpumask_weight(cpumask_of_node(node)) > 0) in init_cpu_node_state() [all …]
|
D | migrate.c | 1502 struct list_head *pagelist, int node) in do_move_pages_to_node() argument 1509 err = migrate_pages(pagelist, alloc_new_node_page, NULL, node, in do_move_pages_to_node() 1526 int node, struct list_head *pagelist, bool migrate_all) in add_page_for_migration() argument 1552 if (page_to_nid(page) == node) in add_page_for_migration() 1610 int node; in do_pages_move() local 1615 if (get_user(node, nodes + i)) in do_pages_move() 1620 if (node < 0 || node >= MAX_NUMNODES) in do_pages_move() 1622 if (!node_state(node, N_MEMORY)) in do_pages_move() 1626 if (!node_isset(node, task_nodes)) in do_pages_move() 1630 current_node = node; in do_pages_move() [all …]
|
D | backing-dev.c | 408 struct rb_node **node, *parent; in wb_congested_get_create() local 413 node = &bdi->cgwb_congested_tree.rb_node; in wb_congested_get_create() 416 while (*node != NULL) { in wb_congested_get_create() 417 parent = *node; in wb_congested_get_create() 421 node = &parent->rb_left; in wb_congested_get_create() 423 node = &parent->rb_right; in wb_congested_get_create() 431 rb_link_node(&congested->rb_node, parent, node); in wb_congested_get_create()
|
D | memcontrol.c | 1672 int node; in mem_cgroup_select_victim_node() local 1675 node = memcg->last_scanned_node; in mem_cgroup_select_victim_node() 1677 node = next_node_in(node, memcg->scan_nodes); in mem_cgroup_select_victim_node() 1683 if (unlikely(node == MAX_NUMNODES)) in mem_cgroup_select_victim_node() 1684 node = numa_node_id(); in mem_cgroup_select_victim_node() 1686 memcg->last_scanned_node = node; in mem_cgroup_select_victim_node() 1687 return node; in mem_cgroup_select_victim_node() 3411 int node, cpu, i; in memcg_flush_percpu_vmstats() local 3421 for_each_node(node) { in memcg_flush_percpu_vmstats() 3422 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; in memcg_flush_percpu_vmstats() [all …]
|
D | page_alloc.c | 1484 static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node) in early_pfn_in_nid() argument 1489 if (nid >= 0 && nid != node) in early_pfn_in_nid() 1495 static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node) in early_pfn_in_nid() argument 5544 static int find_next_best_node(int node, nodemask_t *used_node_mask) in find_next_best_node() argument 5552 if (!node_isset(node, *used_node_mask)) { in find_next_best_node() 5553 node_set(node, *used_node_mask); in find_next_best_node() 5554 return node; in find_next_best_node() 5564 val = node_distance(node, n); in find_next_best_node() 5567 val += (n < node); in find_next_best_node() 5607 pg_data_t *node = NODE_DATA(node_order[i]); in build_zonelists_in_node_order() local [all …]
|
D | util.c | 538 void *kvmalloc_node(size_t size, gfp_t flags, int node) in kvmalloc_node() argument 548 return kmalloc_node(size, flags, node); in kvmalloc_node() 564 ret = kmalloc_node(size, kmalloc_flags, node); in kvmalloc_node() 573 return __vmalloc_node_flags_caller(size, node, flags, in kvmalloc_node()
|
D | kmemleak.c | 121 struct hlist_node node; member 475 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) { in free_object_rcu() 476 hlist_del(&area->node); in free_object_rcu() 817 INIT_HLIST_NODE(&area->node); in add_scan_area() 821 hlist_add_head(&area->node, &object->area_list); in add_scan_area() 1352 hlist_for_each_entry(area, &object->area_list, node) in scan_object()
|
D | slab.h | 628 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) in get_node() argument 630 return s->node[node]; in get_node()
|
/mm/kasan/ |
D | init.c | 84 static __init void *early_alloc(size_t size, int node) in early_alloc() argument 87 MEMBLOCK_ALLOC_ACCESSIBLE, node); in early_alloc() 91 __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS)); in early_alloc()
|