• Home
  • Raw
  • Download

Lines Matching refs:node

213 			int node, struct list_head *list);
493 int node = __this_cpu_read(slab_reap_node); in next_reap_node() local
495 node = next_node_in(node, node_online_map); in next_reap_node()
496 __this_cpu_write(slab_reap_node, node); in next_reap_node()
533 static struct array_cache *alloc_arraycache(int node, int entries, in alloc_arraycache() argument
539 ac = kmalloc_node(memsize, gfp, node); in alloc_arraycache()
607 static inline struct alien_cache **alloc_alien_cache(int node, in alloc_alien_cache() argument
644 static struct alien_cache *__alloc_alien_cache(int node, int entries, in __alloc_alien_cache() argument
650 alc = kmalloc_node(memsize, gfp, node); in __alloc_alien_cache()
659 static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) in alloc_alien_cache() argument
666 alc_ptr = kcalloc_node(nr_node_ids, sizeof(void *), gfp, node); in alloc_alien_cache()
671 if (i == node || !node_online(i)) in alloc_alien_cache()
673 alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp); in alloc_alien_cache()
696 struct array_cache *ac, int node, in __drain_alien_cache() argument
699 struct kmem_cache_node *n = get_node(cachep, node); in __drain_alien_cache()
711 free_block(cachep, ac->entry, ac->avail, node, list); in __drain_alien_cache()
722 int node = __this_cpu_read(slab_reap_node); in reap_alien() local
725 struct alien_cache *alc = n->alien[node]; in reap_alien()
733 __drain_alien_cache(cachep, ac, node, &list); in reap_alien()
764 int node, int page_node) in __cache_free_alien() argument
771 n = get_node(cachep, node); in __cache_free_alien()
797 int node = numa_mem_id(); in cache_free_alien() local
802 if (likely(node == page_node)) in cache_free_alien()
805 return __cache_free_alien(cachep, objp, node, page_node); in cache_free_alien()
818 static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp) in init_cache_node() argument
827 n = get_node(cachep, node); in init_cache_node()
830 n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + in init_cache_node()
837 n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node); in init_cache_node()
846 (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num; in init_cache_node()
853 cachep->node[node] = n; in init_cache_node()
868 static int init_cache_node_node(int node) in init_cache_node_node() argument
874 ret = init_cache_node(cachep, node, GFP_KERNEL); in init_cache_node_node()
884 int node, gfp_t gfp, bool force_change) in setup_kmem_cache_node() argument
894 new_alien = alloc_alien_cache(node, cachep->limit, gfp); in setup_kmem_cache_node()
900 new_shared = alloc_arraycache(node, in setup_kmem_cache_node()
906 ret = init_cache_node(cachep, node, gfp); in setup_kmem_cache_node()
910 n = get_node(cachep, node); in setup_kmem_cache_node()
914 n->shared->avail, node, &list); in setup_kmem_cache_node()
955 int node = cpu_to_mem(cpu); in cpuup_canceled() local
956 const struct cpumask *mask = cpumask_of_node(node); in cpuup_canceled()
964 n = get_node(cachep, node); in cpuup_canceled()
975 free_block(cachep, nc->entry, nc->avail, node, &list); in cpuup_canceled()
986 shared->avail, node, &list); in cpuup_canceled()
1010 n = get_node(cachep, node); in cpuup_canceled()
1020 int node = cpu_to_mem(cpu); in cpuup_prepare() local
1029 err = init_cache_node_node(node); in cpuup_prepare()
1038 err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false); in cpuup_prepare()
1106 static int __meminit drain_cache_node_node(int node) in drain_cache_node_node() argument
1114 n = get_node(cachep, node); in drain_cache_node_node()
1180 cachep->node[nodeid] = ptr; in init_list()
1189 int node; in set_up_node() local
1191 for_each_online_node(node) { in set_up_node()
1192 cachep->node[node] = &init_kmem_cache_node[index + node]; in set_up_node()
1193 cachep->node[node]->next_reap = jiffies + in set_up_node()
1249 offsetof(struct kmem_cache, node) + in kmem_cache_init()
1333 int node; in slab_out_of_memory() local
1345 for_each_kmem_cache_node(cachep, node, n) { in slab_out_of_memory()
1355 node, total_slabs - free_slabs, total_slabs, in slab_out_of_memory()
1769 int node; in setup_cpu_cache() local
1771 for_each_online_node(node) { in setup_cpu_cache()
1772 cachep->node[node] = kmalloc_node( in setup_cpu_cache()
1773 sizeof(struct kmem_cache_node), gfp, node); in setup_cpu_cache()
1774 BUG_ON(!cachep->node[node]); in setup_cpu_cache()
1775 kmem_cache_node_init(cachep->node[node]); in setup_cpu_cache()
1779 cachep->node[numa_mem_id()]->next_reap = in setup_cpu_cache()
2113 static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) in check_spinlock_acquired_node() argument
2117 assert_spin_locked(&get_node(cachep, node)->list_lock); in check_spinlock_acquired_node()
2130 int node, bool free_all, struct list_head *list) in drain_array_locked() argument
2141 free_block(cachep, ac->entry, tofree, node, list); in drain_array_locked()
2150 int node = numa_mem_id(); in do_drain() local
2156 n = get_node(cachep, node); in do_drain()
2158 free_block(cachep, ac->entry, ac->avail, node, &list); in do_drain()
2167 int node; in drain_cpu_caches() local
2172 for_each_kmem_cache_node(cachep, node, n) in drain_cpu_caches()
2176 for_each_kmem_cache_node(cachep, node, n) { in drain_cpu_caches()
2178 drain_array_locked(cachep, n->shared, node, true, &list); in drain_cpu_caches()
2227 int node; in __kmem_cache_empty() local
2230 for_each_kmem_cache_node(s, node, n) in __kmem_cache_empty()
2240 int node; in __kmem_cache_shrink() local
2246 for_each_kmem_cache_node(cachep, node, n) { in __kmem_cache_shrink()
2274 cachep->node[i] = NULL; in __kmem_cache_release()
2903 int node; in cache_alloc_refill() local
2908 node = numa_mem_id(); in cache_alloc_refill()
2920 n = get_node(cachep, node); in cache_alloc_refill()
2964 page = cache_grow_begin(cachep, gfp_exact_node(flags), node); in cache_alloc_refill()
3330 int nr_objects, int node, struct list_head *list) in free_block() argument
3333 struct kmem_cache_node *n = get_node(cachep, node); in free_block()
3346 check_spinlock_acquired_node(cachep, node); in free_block()
3377 int node = numa_mem_id(); in cache_flusharray() local
3383 n = get_node(cachep, node); in cache_flusharray()
3398 free_block(cachep, ac->entry, batchcount, node, &list); in cache_flusharray()
3628 __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller) in __do_kmalloc_node() argument
3638 ret = kmem_cache_alloc_node_trace(cachep, flags, node, size); in __do_kmalloc_node()
3644 void *__kmalloc_node(size_t size, gfp_t flags, int node) in __kmalloc_node() argument
3646 return __do_kmalloc_node(size, flags, node, _RET_IP_); in __kmalloc_node()
3651 int node, unsigned long caller) in __kmalloc_node_track_caller() argument
3653 return __do_kmalloc_node(size, flags, node, caller); in __kmalloc_node_track_caller()
3791 int node; in setup_kmem_cache_nodes() local
3794 for_each_online_node(node) { in setup_kmem_cache_nodes()
3795 ret = setup_kmem_cache_node(cachep, node, gfp, true); in setup_kmem_cache_nodes()
3806 node--; in setup_kmem_cache_nodes()
3807 while (node >= 0) { in setup_kmem_cache_nodes()
3808 n = get_node(cachep, node); in setup_kmem_cache_nodes()
3813 cachep->node[node] = NULL; in setup_kmem_cache_nodes()
3815 node--; in setup_kmem_cache_nodes()
3851 int node; in do_tune_cpucache() local
3855 node = cpu_to_mem(cpu); in do_tune_cpucache()
3856 n = get_node(cachep, node); in do_tune_cpucache()
3858 free_block(cachep, ac->entry, ac->avail, node, &list); in do_tune_cpucache()
3939 struct array_cache *ac, int node) in drain_array() argument
3955 drain_array_locked(cachep, ac, node, false, &list); in drain_array()
3977 int node = numa_mem_id(); in cache_reap() local
3992 n = get_node(searchp, node); in cache_reap()
3996 drain_array(searchp, n, cpu_cache_get(searchp), node); in cache_reap()
4007 drain_array(searchp, n, n->shared, node); in cache_reap()
4035 int node; in get_slabinfo() local
4038 for_each_kmem_cache_node(cachep, node, n) { in get_slabinfo()