Lines Matching refs:n
359 const char *n) in __cmpxchg_double_slab() argument
387 pr_info("%s %s: cmpxchg double redo ", n, s->name); in __cmpxchg_double_slab()
396 const char *n) in cmpxchg_double_slab() argument
428 pr_info("%s %s: cmpxchg double redo ", n, s->name); in cmpxchg_double_slab()
977 struct kmem_cache_node *n, struct page *page) in add_full() argument
982 lockdep_assert_held(&n->list_lock); in add_full()
983 list_add(&page->lru, &n->full); in add_full()
986 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) in remove_full() argument
991 lockdep_assert_held(&n->list_lock); in remove_full()
998 struct kmem_cache_node *n = get_node(s, node); in slabs_node() local
1000 return atomic_long_read(&n->nr_slabs); in slabs_node()
1003 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) in node_nr_slabs() argument
1005 return atomic_long_read(&n->nr_slabs); in node_nr_slabs()
1010 struct kmem_cache_node *n = get_node(s, node); in inc_slabs_node() local
1018 if (likely(n)) { in inc_slabs_node()
1019 atomic_long_inc(&n->nr_slabs); in inc_slabs_node()
1020 atomic_long_add(objects, &n->total_objects); in inc_slabs_node()
1025 struct kmem_cache_node *n = get_node(s, node); in dec_slabs_node() local
1027 atomic_long_dec(&n->nr_slabs); in dec_slabs_node()
1028 atomic_long_sub(objects, &n->total_objects); in dec_slabs_node()
1082 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); in free_debug_processing() local
1084 spin_lock_irqsave(&n->list_lock, *flags); in free_debug_processing()
1127 return n; in free_debug_processing()
1131 spin_unlock_irqrestore(&n->list_lock, *flags); in free_debug_processing()
1234 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, in add_full() argument
1236 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, in remove_full() argument
1250 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) in node_nr_slabs() argument
1530 __add_partial(struct kmem_cache_node *n, struct page *page, int tail) in __add_partial() argument
1532 n->nr_partial++; in __add_partial()
1534 list_add_tail(&page->lru, &n->partial); in __add_partial()
1536 list_add(&page->lru, &n->partial); in __add_partial()
1539 static inline void add_partial(struct kmem_cache_node *n, in add_partial() argument
1542 lockdep_assert_held(&n->list_lock); in add_partial()
1543 __add_partial(n, page, tail); in add_partial()
1547 __remove_partial(struct kmem_cache_node *n, struct page *page) in __remove_partial() argument
1550 n->nr_partial--; in __remove_partial()
1553 static inline void remove_partial(struct kmem_cache_node *n, in remove_partial() argument
1556 lockdep_assert_held(&n->list_lock); in remove_partial()
1557 __remove_partial(n, page); in remove_partial()
1567 struct kmem_cache_node *n, struct page *page, in acquire_slab() argument
1574 lockdep_assert_held(&n->list_lock); in acquire_slab()
1601 remove_partial(n, page); in acquire_slab()
1612 static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, in get_partial_node() argument
1626 if (!n || !n->nr_partial) in get_partial_node()
1629 spin_lock(&n->list_lock); in get_partial_node()
1630 list_for_each_entry_safe(page, page2, &n->partial, lru) { in get_partial_node()
1636 t = acquire_slab(s, n, page, object == NULL, &objects); in get_partial_node()
1654 spin_unlock(&n->list_lock); in get_partial_node()
1698 struct kmem_cache_node *n; in get_any_partial() local
1700 n = get_node(s, zone_to_nid(zone)); in get_any_partial()
1702 if (n && cpuset_zone_allowed_hardwall(zone, flags) && in get_any_partial()
1703 n->nr_partial > s->min_partial) { in get_any_partial()
1704 object = get_partial_node(s, n, c, flags); in get_any_partial()
1778 static inline void note_cmpxchg_failure(const char *n, in note_cmpxchg_failure() argument
1784 pr_info("%s %s: cmpxchg redo ", n, s->name); in note_cmpxchg_failure()
1817 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); in deactivate_slab() local
1889 if (!new.inuse && n->nr_partial >= s->min_partial) in deactivate_slab()
1900 spin_lock(&n->list_lock); in deactivate_slab()
1911 spin_lock(&n->list_lock); in deactivate_slab()
1919 remove_partial(n, page); in deactivate_slab()
1923 remove_full(s, n, page); in deactivate_slab()
1927 add_partial(n, page, tail); in deactivate_slab()
1933 add_full(s, n, page); in deactivate_slab()
1946 spin_unlock(&n->list_lock); in deactivate_slab()
1966 struct kmem_cache_node *n = NULL, *n2 = NULL; in unfreeze_partials() local
1976 if (n != n2) { in unfreeze_partials()
1977 if (n) in unfreeze_partials()
1978 spin_unlock(&n->list_lock); in unfreeze_partials()
1980 n = n2; in unfreeze_partials()
1981 spin_lock(&n->list_lock); in unfreeze_partials()
2000 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) { in unfreeze_partials()
2004 add_partial(n, page, DEACTIVATE_TO_TAIL); in unfreeze_partials()
2009 if (n) in unfreeze_partials()
2010 spin_unlock(&n->list_lock); in unfreeze_partials()
2141 static inline unsigned long node_nr_objs(struct kmem_cache_node *n) in node_nr_objs() argument
2143 return atomic_long_read(&n->total_objects); in node_nr_objs()
2148 static unsigned long count_partial(struct kmem_cache_node *n, in count_partial() argument
2155 spin_lock_irqsave(&n->list_lock, flags); in count_partial()
2156 list_for_each_entry(page, &n->partial, lru) in count_partial()
2158 spin_unlock_irqrestore(&n->list_lock, flags); in count_partial()
2170 struct kmem_cache_node *n; in slab_out_of_memory() local
2185 for_each_kmem_cache_node(s, node, n) { in slab_out_of_memory()
2190 nr_free = count_partial(n, count_free); in slab_out_of_memory()
2191 nr_slabs = node_nr_slabs(n); in slab_out_of_memory()
2192 nr_objs = node_nr_objs(n); in slab_out_of_memory()
2560 struct kmem_cache_node *n = NULL; in __slab_free() local
2566 !(n = free_debug_processing(s, page, x, addr, &flags))) in __slab_free()
2570 if (unlikely(n)) { in __slab_free()
2571 spin_unlock_irqrestore(&n->list_lock, flags); in __slab_free()
2572 n = NULL; in __slab_free()
2594 n = get_node(s, page_to_nid(page)); in __slab_free()
2603 spin_lock_irqsave(&n->list_lock, flags); in __slab_free()
2613 if (likely(!n)) { in __slab_free()
2632 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) in __slab_free()
2641 remove_full(s, n, page); in __slab_free()
2642 add_partial(n, page, DEACTIVATE_TO_TAIL); in __slab_free()
2645 spin_unlock_irqrestore(&n->list_lock, flags); in __slab_free()
2653 remove_partial(n, page); in __slab_free()
2657 remove_full(s, n, page); in __slab_free()
2660 spin_unlock_irqrestore(&n->list_lock, flags); in __slab_free()
2853 init_kmem_cache_node(struct kmem_cache_node *n) in init_kmem_cache_node() argument
2855 n->nr_partial = 0; in init_kmem_cache_node()
2856 spin_lock_init(&n->list_lock); in init_kmem_cache_node()
2857 INIT_LIST_HEAD(&n->partial); in init_kmem_cache_node()
2859 atomic_long_set(&n->nr_slabs, 0); in init_kmem_cache_node()
2860 atomic_long_set(&n->total_objects, 0); in init_kmem_cache_node()
2861 INIT_LIST_HEAD(&n->full); in init_kmem_cache_node()
2899 struct kmem_cache_node *n; in early_kmem_cache_node_alloc() local
2911 n = page->freelist; in early_kmem_cache_node_alloc()
2912 BUG_ON(!n); in early_kmem_cache_node_alloc()
2913 page->freelist = get_freepointer(kmem_cache_node, n); in early_kmem_cache_node_alloc()
2916 kmem_cache_node->node[node] = n; in early_kmem_cache_node_alloc()
2918 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); in early_kmem_cache_node_alloc()
2919 init_tracking(kmem_cache_node, n); in early_kmem_cache_node_alloc()
2921 init_kmem_cache_node(n); in early_kmem_cache_node_alloc()
2928 __add_partial(n, page, DEACTIVATE_TO_HEAD); in early_kmem_cache_node_alloc()
2934 struct kmem_cache_node *n; in free_kmem_cache_nodes() local
2936 for_each_kmem_cache_node(s, node, n) { in free_kmem_cache_nodes()
2937 kmem_cache_free(kmem_cache_node, n); in free_kmem_cache_nodes()
2947 struct kmem_cache_node *n; in init_kmem_cache_nodes() local
2953 n = kmem_cache_alloc_node(kmem_cache_node, in init_kmem_cache_nodes()
2956 if (!n) { in init_kmem_cache_nodes()
2961 s->node[node] = n; in init_kmem_cache_nodes()
2962 init_kmem_cache_node(n); in init_kmem_cache_nodes()
3209 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) in free_partial() argument
3213 list_for_each_entry_safe(page, h, &n->partial, lru) { in free_partial()
3215 __remove_partial(n, page); in free_partial()
3230 struct kmem_cache_node *n; in kmem_cache_close() local
3234 for_each_kmem_cache_node(s, node, n) { in kmem_cache_close()
3235 free_partial(s, n); in kmem_cache_close()
3236 if (n->nr_partial || slabs_node(s, node)) in kmem_cache_close()
3353 const char *__check_heap_object(const void *ptr, unsigned long n, in __check_heap_object() argument
3379 if (offset <= object_size && n <= object_size - offset) in __check_heap_object()
3439 struct kmem_cache_node *n; in __kmem_cache_shrink() local
3451 for_each_kmem_cache_node(s, node, n) { in __kmem_cache_shrink()
3452 if (!n->nr_partial) in __kmem_cache_shrink()
3458 spin_lock_irqsave(&n->list_lock, flags); in __kmem_cache_shrink()
3466 list_for_each_entry_safe(page, t, &n->partial, lru) { in __kmem_cache_shrink()
3469 n->nr_partial--; in __kmem_cache_shrink()
3477 list_splice(slabs_by_inuse + i, n->partial.prev); in __kmem_cache_shrink()
3479 spin_unlock_irqrestore(&n->list_lock, flags); in __kmem_cache_shrink()
3504 struct kmem_cache_node *n; in slab_mem_offline_callback() local
3520 n = get_node(s, offline_node); in slab_mem_offline_callback()
3521 if (n) { in slab_mem_offline_callback()
3531 kmem_cache_free(kmem_cache_node, n); in slab_mem_offline_callback()
3539 struct kmem_cache_node *n; in slab_mem_going_online_callback() local
3564 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL); in slab_mem_going_online_callback()
3565 if (!n) { in slab_mem_going_online_callback()
3569 init_kmem_cache_node(n); in slab_mem_going_online_callback()
3570 s->node[nid] = n; in slab_mem_going_online_callback()
3623 struct kmem_cache_node *n; in bootstrap() local
3633 for_each_kmem_cache_node(s, node, n) { in bootstrap()
3636 list_for_each_entry(p, &n->partial, lru) in bootstrap()
3640 list_for_each_entry(p, &n->full, lru) in bootstrap()
3894 struct kmem_cache_node *n, unsigned long *map) in validate_slab_node() argument
3900 spin_lock_irqsave(&n->list_lock, flags); in validate_slab_node()
3902 list_for_each_entry(page, &n->partial, lru) { in validate_slab_node()
3906 if (count != n->nr_partial) in validate_slab_node()
3908 s->name, count, n->nr_partial); in validate_slab_node()
3913 list_for_each_entry(page, &n->full, lru) { in validate_slab_node()
3917 if (count != atomic_long_read(&n->nr_slabs)) in validate_slab_node()
3919 s->name, count, atomic_long_read(&n->nr_slabs)); in validate_slab_node()
3922 spin_unlock_irqrestore(&n->list_lock, flags); in validate_slab_node()
3932 struct kmem_cache_node *n; in validate_slab_cache() local
3938 for_each_kmem_cache_node(s, node, n) in validate_slab_cache()
3939 count += validate_slab_node(s, n, map); in validate_slab_cache()
4093 struct kmem_cache_node *n; in list_locations() local
4103 for_each_kmem_cache_node(s, node, n) { in list_locations()
4107 if (!atomic_long_read(&n->nr_slabs)) in list_locations()
4110 spin_lock_irqsave(&n->list_lock, flags); in list_locations()
4111 list_for_each_entry(page, &n->partial, lru) in list_locations()
4113 list_for_each_entry(page, &n->full, lru) in list_locations()
4115 spin_unlock_irqrestore(&n->list_lock, flags); in list_locations()
4302 struct kmem_cache_node *n; in show_slab_objects() local
4304 for_each_kmem_cache_node(s, node, n) { in show_slab_objects()
4307 x = atomic_long_read(&n->total_objects); in show_slab_objects()
4309 x = atomic_long_read(&n->total_objects) - in show_slab_objects()
4310 count_partial(n, count_free); in show_slab_objects()
4312 x = atomic_long_read(&n->nr_slabs); in show_slab_objects()
4320 struct kmem_cache_node *n; in show_slab_objects() local
4322 for_each_kmem_cache_node(s, node, n) { in show_slab_objects()
4324 x = count_partial(n, count_total); in show_slab_objects()
4326 x = count_partial(n, count_inuse); in show_slab_objects()
4328 x = n->nr_partial; in show_slab_objects()
4349 struct kmem_cache_node *n; in any_slab_objects() local
4351 for_each_kmem_cache_node(s, node, n) in any_slab_objects()
4352 if (atomic_long_read(&n->total_objects)) in any_slab_objects()
4359 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr) argument
4360 #define to_slab(n) container_of(n, struct kmem_cache, kobj) argument
5316 struct kmem_cache_node *n; in get_slabinfo() local
5318 for_each_kmem_cache_node(s, node, n) { in get_slabinfo()
5319 nr_slabs += node_nr_slabs(n); in get_slabinfo()
5320 nr_objs += node_nr_objs(n); in get_slabinfo()
5321 nr_free += count_partial(n, count_free); in get_slabinfo()