• Home
  • Raw
  • Download

Lines Matching refs:node

1031 static inline unsigned long slabs_node(struct kmem_cache *s, int node)  in slabs_node()  argument
1033 struct kmem_cache_node *n = get_node(s, node); in slabs_node()
1043 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) in inc_slabs_node() argument
1045 struct kmem_cache_node *n = get_node(s, node); in inc_slabs_node()
1058 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) in dec_slabs_node() argument
1060 struct kmem_cache_node *n = get_node(s, node); in dec_slabs_node()
1373 static inline unsigned long slabs_node(struct kmem_cache *s, int node) in slabs_node() argument
1377 static inline void inc_slabs_node(struct kmem_cache *s, int node, in inc_slabs_node() argument
1379 static inline void dec_slabs_node(struct kmem_cache *s, int node, in dec_slabs_node() argument
1489 gfp_t flags, int node, struct kmem_cache_order_objects oo) in alloc_slab_page() argument
1494 if (node == NUMA_NO_NODE) in alloc_slab_page()
1497 page = __alloc_pages_node(node, flags, order); in alloc_slab_page()
1616 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) in allocate_slab() argument
1640 page = alloc_slab_page(s, alloc_gfp, node, oo); in allocate_slab()
1648 page = alloc_slab_page(s, alloc_gfp, node, oo); in allocate_slab()
1696 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) in new_slab() argument
1707 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); in new_slab()
1948 static void *get_partial(struct kmem_cache *s, gfp_t flags, int node, in get_partial() argument
1952 int searchnode = node; in get_partial()
1954 if (node == NUMA_NO_NODE) in get_partial()
1956 else if (!node_present_pages(node)) in get_partial()
1957 searchnode = node_to_mem_node(node); in get_partial()
1960 if (object || node != NUMA_NO_NODE) in get_partial()
2369 static inline int node_match(struct page *page, int node) in node_match() argument
2372 if (node != NUMA_NO_NODE && page_to_nid(page) != node) in node_match()
2412 int node; in slab_out_of_memory() local
2428 for_each_kmem_cache_node(s, node, n) { in slab_out_of_memory()
2438 node, nr_slabs, nr_objs, nr_free); in slab_out_of_memory()
2444 int node, struct kmem_cache_cpu **pc) in new_slab_objects() argument
2452 freelist = get_partial(s, flags, node, c); in new_slab_objects()
2457 page = new_slab(s, flags, node); in new_slab_objects()
2539 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, in ___slab_alloc() argument
2550 if (unlikely(!node_match(page, node))) { in ___slab_alloc()
2551 int searchnode = node; in ___slab_alloc()
2553 if (node != NUMA_NO_NODE && !node_present_pages(node)) in ___slab_alloc()
2554 searchnode = node_to_mem_node(node); in ___slab_alloc()
2608 freelist = new_slab_objects(s, gfpflags, node, &c); in ___slab_alloc()
2611 slab_out_of_memory(s, gfpflags, node); in ___slab_alloc()
2632 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, in __slab_alloc() argument
2648 p = ___slab_alloc(s, gfpflags, node, addr, c); in __slab_alloc()
2675 gfp_t gfpflags, int node, unsigned long addr) in slab_alloc_node() argument
2721 if (unlikely(!object || !node_match(page, node))) { in slab_alloc_node()
2722 object = __slab_alloc(s, gfpflags, node, addr, c); in slab_alloc_node()
2792 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) in kmem_cache_alloc_node() argument
2794 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); in kmem_cache_alloc_node()
2797 s->object_size, s->size, gfpflags, node); in kmem_cache_alloc_node()
2806 int node, size_t size) in kmem_cache_alloc_node_trace() argument
2808 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); in kmem_cache_alloc_node_trace()
2811 size, s->size, gfpflags, node); in kmem_cache_alloc_node_trace()
3366 static void early_kmem_cache_node_alloc(int node) in early_kmem_cache_node_alloc() argument
3373 page = new_slab(kmem_cache_node, GFP_NOWAIT, node); in early_kmem_cache_node_alloc()
3376 if (page_to_nid(page) != node) { in early_kmem_cache_node_alloc()
3377 pr_err("SLUB: Unable to allocate memory from node %d\n", node); in early_kmem_cache_node_alloc()
3392 kmem_cache_node->node[node] = n; in early_kmem_cache_node_alloc()
3394 inc_slabs_node(kmem_cache_node, node, page->objects); in early_kmem_cache_node_alloc()
3405 int node; in free_kmem_cache_nodes() local
3408 for_each_kmem_cache_node(s, node, n) { in free_kmem_cache_nodes()
3409 s->node[node] = NULL; in free_kmem_cache_nodes()
3423 int node; in init_kmem_cache_nodes() local
3425 for_each_node_state(node, N_NORMAL_MEMORY) { in init_kmem_cache_nodes()
3429 early_kmem_cache_node_alloc(node); in init_kmem_cache_nodes()
3433 GFP_KERNEL, node); in init_kmem_cache_nodes()
3441 s->node[node] = n; in init_kmem_cache_nodes()
3725 int node; in __kmem_cache_empty() local
3728 for_each_kmem_cache_node(s, node, n) in __kmem_cache_empty()
3729 if (n->nr_partial || slabs_node(s, node)) in __kmem_cache_empty()
3739 int node; in __kmem_cache_shutdown() local
3744 for_each_kmem_cache_node(s, node, n) { in __kmem_cache_shutdown()
3746 if (n->nr_partial || slabs_node(s, node)) in __kmem_cache_shutdown()
3809 static void *kmalloc_large_node(size_t size, gfp_t flags, int node) in kmalloc_large_node() argument
3816 page = alloc_pages_node(node, flags, order); in kmalloc_large_node()
3826 void *__kmalloc_node(size_t size, gfp_t flags, int node) in __kmalloc_node() argument
3832 ret = kmalloc_large_node(size, flags, node); in __kmalloc_node()
3836 flags, node); in __kmalloc_node()
3846 ret = slab_alloc_node(s, flags, node, _RET_IP_); in __kmalloc_node()
3848 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); in __kmalloc_node()
3973 int node; in __kmem_cache_shrink() local
3984 for_each_kmem_cache_node(s, node, n) { in __kmem_cache_shrink()
4026 if (slabs_node(s, node)) in __kmem_cache_shrink()
4103 s->node[offline_node] = NULL; in slab_mem_offline_callback()
4143 s->node[nid] = n; in slab_mem_going_online_callback()
4194 int node; in bootstrap() local
4206 for_each_kmem_cache_node(s, node, n) { in bootstrap()
4243 offsetof(struct kmem_cache, node) + in kmem_cache_init()
4344 int node, unsigned long caller) in __kmalloc_node_track_caller() argument
4350 ret = kmalloc_large_node(size, gfpflags, node); in __kmalloc_node_track_caller()
4354 gfpflags, node); in __kmalloc_node_track_caller()
4364 ret = slab_alloc_node(s, gfpflags, node, caller); in __kmalloc_node_track_caller()
4367 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); in __kmalloc_node_track_caller()
4456 int node; in validate_slab_cache() local
4465 for_each_kmem_cache_node(s, node, n) in validate_slab_cache()
4617 int node; in list_locations() local
4629 for_each_kmem_cache_node(s, node, n) { in list_locations()
4791 int node; in show_slab_objects() local
4805 int node; in show_slab_objects() local
4812 node = page_to_nid(page); in show_slab_objects()
4821 nodes[node] += x; in show_slab_objects()
4825 node = page_to_nid(page); in show_slab_objects()
4833 nodes[node] += x; in show_slab_objects()
4853 for_each_kmem_cache_node(s, node, n) { in show_slab_objects()
4863 nodes[node] += x; in show_slab_objects()
4871 for_each_kmem_cache_node(s, node, n) { in show_slab_objects()
4879 nodes[node] += x; in show_slab_objects()
4884 for (node = 0; node < nr_node_ids; node++) in show_slab_objects()
4885 if (nodes[node]) in show_slab_objects()
4887 node, nodes[node]); in show_slab_objects()
4896 int node; in any_slab_objects() local
4899 for_each_kmem_cache_node(s, node, n) in any_slab_objects()
5902 int node; in get_slabinfo() local
5905 for_each_kmem_cache_node(s, node, n) { in get_slabinfo()