• Home
  • Raw
  • Download

Lines Matching refs:node

996 static inline unsigned long slabs_node(struct kmem_cache *s, int node)  in slabs_node()  argument
998 struct kmem_cache_node *n = get_node(s, node); in slabs_node()
1008 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) in inc_slabs_node() argument
1010 struct kmem_cache_node *n = get_node(s, node); in inc_slabs_node()
1023 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) in dec_slabs_node() argument
1025 struct kmem_cache_node *n = get_node(s, node); in dec_slabs_node()
1248 static inline unsigned long slabs_node(struct kmem_cache *s, int node) in slabs_node() argument
1252 static inline void inc_slabs_node(struct kmem_cache *s, int node, in inc_slabs_node() argument
1254 static inline void dec_slabs_node(struct kmem_cache *s, int node, in dec_slabs_node() argument
1317 gfp_t flags, int node, struct kmem_cache_order_objects oo) in alloc_slab_page() argument
1327 if (node == NUMA_NO_NODE) in alloc_slab_page()
1330 page = alloc_pages_exact_node(node, flags, order); in alloc_slab_page()
1338 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) in allocate_slab() argument
1357 page = alloc_slab_page(s, alloc_gfp, node, oo); in allocate_slab()
1365 page = alloc_slab_page(s, alloc_gfp, node, oo); in allocate_slab()
1375 kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node); in allocate_slab()
1409 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) in new_slab() argument
1420 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); in new_slab()
1725 static void *get_partial(struct kmem_cache *s, gfp_t flags, int node, in get_partial() argument
1729 int searchnode = node; in get_partial()
1731 if (node == NUMA_NO_NODE) in get_partial()
1733 else if (!node_present_pages(node)) in get_partial()
1734 searchnode = node_to_mem_node(node); in get_partial()
1737 if (object || node != NUMA_NO_NODE) in get_partial()
2126 static inline int node_match(struct page *page, int node) in node_match() argument
2129 if (!page || (node != NUMA_NO_NODE && page_to_nid(page) != node)) in node_match()
2169 int node; in slab_out_of_memory() local
2185 for_each_kmem_cache_node(s, node, n) { in slab_out_of_memory()
2195 node, nr_slabs, nr_objs, nr_free); in slab_out_of_memory()
2201 int node, struct kmem_cache_cpu **pc) in new_slab_objects() argument
2207 freelist = get_partial(s, flags, node, c); in new_slab_objects()
2212 page = new_slab(s, flags, node); in new_slab_objects()
2292 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, in __slab_alloc() argument
2314 if (unlikely(!node_match(page, node))) { in __slab_alloc()
2315 int searchnode = node; in __slab_alloc()
2317 if (node != NUMA_NO_NODE && !node_present_pages(node)) in __slab_alloc()
2318 searchnode = node_to_mem_node(node); in __slab_alloc()
2378 freelist = new_slab_objects(s, gfpflags, node, &c); in __slab_alloc()
2381 slab_out_of_memory(s, gfpflags, node); in __slab_alloc()
2413 gfp_t gfpflags, int node, unsigned long addr) in slab_alloc_node() argument
2450 if (unlikely(!object || !node_match(page, node))) { in slab_alloc_node()
2451 object = __slab_alloc(s, gfpflags, node, addr, c); in slab_alloc_node()
2518 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) in kmem_cache_alloc_node() argument
2520 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); in kmem_cache_alloc_node()
2523 s->object_size, s->size, gfpflags, node); in kmem_cache_alloc_node()
2532 int node, size_t size) in kmem_cache_alloc_node_trace() argument
2534 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); in kmem_cache_alloc_node_trace()
2537 size, s->size, gfpflags, node); in kmem_cache_alloc_node_trace()
2896 static void early_kmem_cache_node_alloc(int node) in early_kmem_cache_node_alloc() argument
2903 page = new_slab(kmem_cache_node, GFP_NOWAIT, node); in early_kmem_cache_node_alloc()
2906 if (page_to_nid(page) != node) { in early_kmem_cache_node_alloc()
2907 pr_err("SLUB: Unable to allocate memory from node %d\n", node); in early_kmem_cache_node_alloc()
2916 kmem_cache_node->node[node] = n; in early_kmem_cache_node_alloc()
2922 inc_slabs_node(kmem_cache_node, node, page->objects); in early_kmem_cache_node_alloc()
2933 int node; in free_kmem_cache_nodes() local
2936 for_each_kmem_cache_node(s, node, n) { in free_kmem_cache_nodes()
2938 s->node[node] = NULL; in free_kmem_cache_nodes()
2944 int node; in init_kmem_cache_nodes() local
2946 for_each_node_state(node, N_NORMAL_MEMORY) { in init_kmem_cache_nodes()
2950 early_kmem_cache_node_alloc(node); in init_kmem_cache_nodes()
2954 GFP_KERNEL, node); in init_kmem_cache_nodes()
2961 s->node[node] = n; in init_kmem_cache_nodes()
3229 int node; in kmem_cache_close() local
3234 for_each_kmem_cache_node(s, node, n) { in kmem_cache_close()
3236 if (n->nr_partial || slabs_node(s, node)) in kmem_cache_close()
3303 static void *kmalloc_large_node(size_t size, gfp_t flags, int node) in kmalloc_large_node() argument
3309 page = alloc_kmem_pages_node(node, flags, get_order(size)); in kmalloc_large_node()
3317 void *__kmalloc_node(size_t size, gfp_t flags, int node) in __kmalloc_node() argument
3323 ret = kmalloc_large_node(size, flags, node); in __kmalloc_node()
3327 flags, node); in __kmalloc_node()
3337 ret = slab_alloc_node(s, flags, node, _RET_IP_); in __kmalloc_node()
3339 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); in __kmalloc_node()
3437 int node; in __kmem_cache_shrink() local
3451 for_each_kmem_cache_node(s, node, n) { in __kmem_cache_shrink()
3530 s->node[offline_node] = NULL; in slab_mem_offline_callback()
3570 s->node[nid] = n; in slab_mem_going_online_callback()
3621 int node; in bootstrap() local
3633 for_each_kmem_cache_node(s, node, n) { in bootstrap()
3668 offsetof(struct kmem_cache, node) + in kmem_cache_init()
3816 int node, unsigned long caller) in __kmalloc_node_track_caller() argument
3822 ret = kmalloc_large_node(size, gfpflags, node); in __kmalloc_node_track_caller()
3826 gfpflags, node); in __kmalloc_node_track_caller()
3836 ret = slab_alloc_node(s, gfpflags, node, caller); in __kmalloc_node_track_caller()
3839 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); in __kmalloc_node_track_caller()
3928 int node; in validate_slab_cache() local
3938 for_each_kmem_cache_node(s, node, n) in validate_slab_cache()
4090 int node; in list_locations() local
4103 for_each_kmem_cache_node(s, node, n) { in list_locations()
4252 int node; in show_slab_objects() local
4266 int node; in show_slab_objects() local
4273 node = page_to_nid(page); in show_slab_objects()
4282 nodes[node] += x; in show_slab_objects()
4286 node = page_to_nid(page); in show_slab_objects()
4294 nodes[node] += x; in show_slab_objects()
4304 for_each_kmem_cache_node(s, node, n) { in show_slab_objects()
4314 nodes[node] += x; in show_slab_objects()
4322 for_each_kmem_cache_node(s, node, n) { in show_slab_objects()
4330 nodes[node] += x; in show_slab_objects()
4335 for (node = 0; node < nr_node_ids; node++) in show_slab_objects()
4336 if (nodes[node]) in show_slab_objects()
4338 node, nodes[node]); in show_slab_objects()
4348 int node; in any_slab_objects() local
4351 for_each_kmem_cache_node(s, node, n) in any_slab_objects()
5315 int node; in get_slabinfo() local
5318 for_each_kmem_cache_node(s, node, n) { in get_slabinfo()