Lines Matching refs:cachep
210 static void free_block(struct kmem_cache *cachep, void **objpp, int len,
212 static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
213 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
216 static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
218 static inline void fixup_slab_list(struct kmem_cache *cachep,
239 #define MAKE_LIST(cachep, listp, slab, nodeid) \ argument
242 list_splice(&get_node(cachep, nodeid)->slab, listp); \
245 #define MAKE_ALL_LISTS(cachep, ptr, nodeid) \ argument
247 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
248 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
249 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
325 static int obj_offset(struct kmem_cache *cachep) in obj_offset() argument
327 return cachep->obj_offset; in obj_offset()
330 static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp) in dbg_redzone1() argument
332 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); in dbg_redzone1()
333 return (unsigned long long*) (objp + obj_offset(cachep) - in dbg_redzone1()
337 static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp) in dbg_redzone2() argument
339 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); in dbg_redzone2()
340 if (cachep->flags & SLAB_STORE_USER) in dbg_redzone2()
341 return (unsigned long long *)(objp + cachep->size - in dbg_redzone2()
344 return (unsigned long long *) (objp + cachep->size - in dbg_redzone2()
348 static void **dbg_userword(struct kmem_cache *cachep, void *objp) in dbg_userword() argument
350 BUG_ON(!(cachep->flags & SLAB_STORE_USER)); in dbg_userword()
351 return (void **)(objp + cachep->size - BYTES_PER_WORD); in dbg_userword()
357 #define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) argument
358 #define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) argument
359 #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) argument
365 static inline bool is_store_user_clean(struct kmem_cache *cachep) in is_store_user_clean() argument
367 return atomic_read(&cachep->store_user_clean) == 1; in is_store_user_clean()
370 static inline void set_store_user_clean(struct kmem_cache *cachep) in set_store_user_clean() argument
372 atomic_set(&cachep->store_user_clean, 1); in set_store_user_clean()
375 static inline void set_store_user_dirty(struct kmem_cache *cachep) in set_store_user_dirty() argument
377 if (is_store_user_clean(cachep)) in set_store_user_dirty()
378 atomic_set(&cachep->store_user_clean, 0); in set_store_user_dirty()
382 static inline void set_store_user_dirty(struct kmem_cache *cachep) {} in set_store_user_dirty() argument
432 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) in cpu_cache_get() argument
434 return this_cpu_ptr(cachep->cpu_cache); in cpu_cache_get()
476 #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg) argument
478 static void __slab_error(const char *function, struct kmem_cache *cachep, in __slab_error() argument
482 function, cachep->name, msg); in __slab_error()
596 static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep, in cache_free_pfmemalloc() argument
604 n = get_node(cachep, page_node); in cache_free_pfmemalloc()
607 free_block(cachep, &objp, 1, page_node, &list); in cache_free_pfmemalloc()
610 slabs_destroy(cachep, &list); in cache_free_pfmemalloc()
638 #define drain_alien_cache(cachep, alien) do { } while (0) argument
639 #define reap_alien(cachep, n) do { } while (0) argument
651 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) in cache_free_alien() argument
656 static inline void *alternate_node_alloc(struct kmem_cache *cachep, in alternate_node_alloc() argument
662 static inline void *____cache_alloc_node(struct kmem_cache *cachep, in ____cache_alloc_node() argument
727 static void __drain_alien_cache(struct kmem_cache *cachep, in __drain_alien_cache() argument
731 struct kmem_cache_node *n = get_node(cachep, node); in __drain_alien_cache()
743 free_block(cachep, ac->entry, ac->avail, node, list); in __drain_alien_cache()
752 static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n) in reap_alien() argument
765 __drain_alien_cache(cachep, ac, node, &list); in reap_alien()
767 slabs_destroy(cachep, &list); in reap_alien()
773 static void drain_alien_cache(struct kmem_cache *cachep, in drain_alien_cache() argument
788 __drain_alien_cache(cachep, ac, i, &list); in drain_alien_cache()
790 slabs_destroy(cachep, &list); in drain_alien_cache()
795 static int __cache_free_alien(struct kmem_cache *cachep, void *objp, in __cache_free_alien() argument
803 n = get_node(cachep, node); in __cache_free_alien()
804 STATS_INC_NODEFREES(cachep); in __cache_free_alien()
810 STATS_INC_ACOVERFLOW(cachep); in __cache_free_alien()
811 __drain_alien_cache(cachep, ac, page_node, &list); in __cache_free_alien()
815 slabs_destroy(cachep, &list); in __cache_free_alien()
817 n = get_node(cachep, page_node); in __cache_free_alien()
819 free_block(cachep, &objp, 1, page_node, &list); in __cache_free_alien()
821 slabs_destroy(cachep, &list); in __cache_free_alien()
826 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) in cache_free_alien() argument
837 return __cache_free_alien(cachep, objp, node, page_node); in cache_free_alien()
850 static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp) in init_cache_node() argument
859 n = get_node(cachep, node); in init_cache_node()
862 n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + in init_cache_node()
863 cachep->num; in init_cache_node()
875 ((unsigned long)cachep) % REAPTIMEOUT_NODE; in init_cache_node()
878 (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num; in init_cache_node()
885 cachep->node[node] = n; in init_cache_node()
903 struct kmem_cache *cachep; in init_cache_node_node() local
905 list_for_each_entry(cachep, &slab_caches, list) { in init_cache_node_node()
906 ret = init_cache_node(cachep, node, GFP_KERNEL); in init_cache_node_node()
915 static int setup_kmem_cache_node(struct kmem_cache *cachep, in setup_kmem_cache_node() argument
926 new_alien = alloc_alien_cache(node, cachep->limit, gfp); in setup_kmem_cache_node()
931 if (cachep->shared) { in setup_kmem_cache_node()
933 cachep->shared * cachep->batchcount, 0xbaadf00d, gfp); in setup_kmem_cache_node()
938 ret = init_cache_node(cachep, node, gfp); in setup_kmem_cache_node()
942 n = get_node(cachep, node); in setup_kmem_cache_node()
945 free_block(cachep, n->shared->entry, in setup_kmem_cache_node()
962 slabs_destroy(cachep, &list); in setup_kmem_cache_node()
985 struct kmem_cache *cachep; in cpuup_canceled() local
990 list_for_each_entry(cachep, &slab_caches, list) { in cpuup_canceled()
996 n = get_node(cachep, node); in cpuup_canceled()
1003 n->free_limit -= cachep->batchcount; in cpuup_canceled()
1006 nc = per_cpu_ptr(cachep->cpu_cache, cpu); in cpuup_canceled()
1008 free_block(cachep, nc->entry, nc->avail, node, &list); in cpuup_canceled()
1019 free_block(cachep, shared->entry, in cpuup_canceled()
1031 drain_alien_cache(cachep, alien); in cpuup_canceled()
1036 slabs_destroy(cachep, &list); in cpuup_canceled()
1043 list_for_each_entry(cachep, &slab_caches, list) { in cpuup_canceled()
1044 n = get_node(cachep, node); in cpuup_canceled()
1047 drain_freelist(cachep, n, INT_MAX); in cpuup_canceled()
1053 struct kmem_cache *cachep; in cpuup_prepare() local
1071 list_for_each_entry(cachep, &slab_caches, list) { in cpuup_prepare()
1072 err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false); in cpuup_prepare()
1142 struct kmem_cache *cachep; in drain_cache_node_node() local
1145 list_for_each_entry(cachep, &slab_caches, list) { in drain_cache_node_node()
1148 n = get_node(cachep, node); in drain_cache_node_node()
1152 drain_freelist(cachep, n, INT_MAX); in drain_cache_node_node()
1199 static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list, in init_list() argument
1213 MAKE_ALL_LISTS(cachep, ptr, nodeid); in init_list()
1214 cachep->node[nodeid] = ptr; in init_list()
1221 static void __init set_up_node(struct kmem_cache *cachep, int index) in set_up_node() argument
1226 cachep->node[node] = &init_kmem_cache_node[index + node]; in set_up_node()
1227 cachep->node[node]->next_reap = jiffies + in set_up_node()
1229 ((unsigned long)cachep) % REAPTIMEOUT_NODE; in set_up_node()
1319 struct kmem_cache *cachep; in kmem_cache_init_late() local
1325 list_for_each_entry(cachep, &slab_caches, list) in kmem_cache_init_late()
1326 if (enable_cpucache(cachep, GFP_NOWAIT)) in kmem_cache_init_late()
1365 slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) in slab_out_of_memory() argument
1381 cachep->name, cachep->size, cachep->gfporder); in slab_out_of_memory()
1383 for_each_kmem_cache_node(cachep, node, n) { in slab_out_of_memory()
1401 num_objs = num_slabs * cachep->num; in slab_out_of_memory()
1405 active_objs += (num_slabs_full * cachep->num); in slab_out_of_memory()
1422 static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, in kmem_getpages() argument
1428 flags |= cachep->allocflags; in kmem_getpages()
1429 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) in kmem_getpages()
1432 page = __alloc_pages_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); in kmem_getpages()
1434 slab_out_of_memory(cachep, flags, nodeid); in kmem_getpages()
1438 if (memcg_charge_slab(page, flags, cachep->gfporder, cachep)) { in kmem_getpages()
1439 __free_pages(page, cachep->gfporder); in kmem_getpages()
1443 nr_pages = (1 << cachep->gfporder); in kmem_getpages()
1444 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) in kmem_getpages()
1456 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { in kmem_getpages()
1457 kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid); in kmem_getpages()
1459 if (cachep->ctor) in kmem_getpages()
1471 static void kmem_freepages(struct kmem_cache *cachep, struct page *page) in kmem_freepages() argument
1473 int order = cachep->gfporder; in kmem_freepages()
1478 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) in kmem_freepages()
1493 memcg_uncharge_slab(page, order, cachep); in kmem_freepages()
1499 struct kmem_cache *cachep; in kmem_rcu_free() local
1503 cachep = page->slab_cache; in kmem_rcu_free()
1505 kmem_freepages(cachep, page); in kmem_rcu_free()
1509 static bool is_debug_pagealloc_cache(struct kmem_cache *cachep) in is_debug_pagealloc_cache() argument
1511 if (debug_pagealloc_enabled() && OFF_SLAB(cachep) && in is_debug_pagealloc_cache()
1512 (cachep->size % PAGE_SIZE) == 0) in is_debug_pagealloc_cache()
1519 static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, in store_stackinfo() argument
1522 int size = cachep->object_size; in store_stackinfo()
1524 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)]; in store_stackinfo()
1551 static void slab_kernel_map(struct kmem_cache *cachep, void *objp, in slab_kernel_map() argument
1554 if (!is_debug_pagealloc_cache(cachep)) in slab_kernel_map()
1558 store_stackinfo(cachep, objp, caller); in slab_kernel_map()
1560 kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map); in slab_kernel_map()
1564 static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp, in slab_kernel_map() argument
1569 static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) in poison_obj() argument
1571 int size = cachep->object_size; in poison_obj()
1572 addr = &((char *)addr)[obj_offset(cachep)]; in poison_obj()
1610 static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) in print_objinfo() argument
1615 if (cachep->flags & SLAB_RED_ZONE) { in print_objinfo()
1617 *dbg_redzone1(cachep, objp), in print_objinfo()
1618 *dbg_redzone2(cachep, objp)); in print_objinfo()
1621 if (cachep->flags & SLAB_STORE_USER) { in print_objinfo()
1623 *dbg_userword(cachep, objp), in print_objinfo()
1624 *dbg_userword(cachep, objp)); in print_objinfo()
1626 realobj = (char *)objp + obj_offset(cachep); in print_objinfo()
1627 size = cachep->object_size; in print_objinfo()
1637 static void check_poison_obj(struct kmem_cache *cachep, void *objp) in check_poison_obj() argument
1643 if (is_debug_pagealloc_cache(cachep)) in check_poison_obj()
1646 realobj = (char *)objp + obj_offset(cachep); in check_poison_obj()
1647 size = cachep->object_size; in check_poison_obj()
1659 print_tainted(), cachep->name, in check_poison_obj()
1661 print_objinfo(cachep, objp, 0); in check_poison_obj()
1683 objnr = obj_to_index(cachep, page, objp); in check_poison_obj()
1685 objp = index_to_obj(cachep, page, objnr - 1); in check_poison_obj()
1686 realobj = (char *)objp + obj_offset(cachep); in check_poison_obj()
1688 print_objinfo(cachep, objp, 2); in check_poison_obj()
1690 if (objnr + 1 < cachep->num) { in check_poison_obj()
1691 objp = index_to_obj(cachep, page, objnr + 1); in check_poison_obj()
1692 realobj = (char *)objp + obj_offset(cachep); in check_poison_obj()
1694 print_objinfo(cachep, objp, 2); in check_poison_obj()
1701 static void slab_destroy_debugcheck(struct kmem_cache *cachep, in slab_destroy_debugcheck() argument
1706 if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) { in slab_destroy_debugcheck()
1707 poison_obj(cachep, page->freelist - obj_offset(cachep), in slab_destroy_debugcheck()
1711 for (i = 0; i < cachep->num; i++) { in slab_destroy_debugcheck()
1712 void *objp = index_to_obj(cachep, page, i); in slab_destroy_debugcheck()
1714 if (cachep->flags & SLAB_POISON) { in slab_destroy_debugcheck()
1715 check_poison_obj(cachep, objp); in slab_destroy_debugcheck()
1716 slab_kernel_map(cachep, objp, 1, 0); in slab_destroy_debugcheck()
1718 if (cachep->flags & SLAB_RED_ZONE) { in slab_destroy_debugcheck()
1719 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) in slab_destroy_debugcheck()
1720 slab_error(cachep, "start of a freed object was overwritten"); in slab_destroy_debugcheck()
1721 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) in slab_destroy_debugcheck()
1722 slab_error(cachep, "end of a freed object was overwritten"); in slab_destroy_debugcheck()
1727 static void slab_destroy_debugcheck(struct kmem_cache *cachep, in slab_destroy_debugcheck() argument
1742 static void slab_destroy(struct kmem_cache *cachep, struct page *page) in slab_destroy() argument
1747 slab_destroy_debugcheck(cachep, page); in slab_destroy()
1748 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) in slab_destroy()
1751 kmem_freepages(cachep, page); in slab_destroy()
1757 if (OFF_SLAB(cachep)) in slab_destroy()
1758 kmem_cache_free(cachep->freelist_cache, freelist); in slab_destroy()
1761 static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list) in slabs_destroy() argument
1767 slab_destroy(cachep, page); in slabs_destroy()
1783 static size_t calculate_slab_order(struct kmem_cache *cachep, in calculate_slab_order() argument
1818 if (freelist_cache->size > cachep->size / 2) in calculate_slab_order()
1823 cachep->num = num; in calculate_slab_order()
1824 cachep->gfporder = gfporder; in calculate_slab_order()
1852 struct kmem_cache *cachep, int entries, int batchcount) in alloc_kmem_cache_cpus() argument
1872 static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) in setup_cpu_cache() argument
1875 return enable_cpucache(cachep, gfp); in setup_cpu_cache()
1877 cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1); in setup_cpu_cache()
1878 if (!cachep->cpu_cache) in setup_cpu_cache()
1886 set_up_node(cachep, SIZE_NODE); in setup_cpu_cache()
1891 cachep->node[node] = kmalloc_node( in setup_cpu_cache()
1893 BUG_ON(!cachep->node[node]); in setup_cpu_cache()
1894 kmem_cache_node_init(cachep->node[node]); in setup_cpu_cache()
1898 cachep->node[numa_mem_id()]->next_reap = in setup_cpu_cache()
1900 ((unsigned long)cachep) % REAPTIMEOUT_NODE; in setup_cpu_cache()
1902 cpu_cache_get(cachep)->avail = 0; in setup_cpu_cache()
1903 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; in setup_cpu_cache()
1904 cpu_cache_get(cachep)->batchcount = 1; in setup_cpu_cache()
1905 cpu_cache_get(cachep)->touched = 0; in setup_cpu_cache()
1906 cachep->batchcount = 1; in setup_cpu_cache()
1907 cachep->limit = BOOT_CPUCACHE_ENTRIES; in setup_cpu_cache()
1922 struct kmem_cache *cachep; in __kmem_cache_alias() local
1924 cachep = find_mergeable(size, align, flags, name, ctor); in __kmem_cache_alias()
1925 if (cachep) { in __kmem_cache_alias()
1926 cachep->refcount++; in __kmem_cache_alias()
1932 cachep->object_size = max_t(int, cachep->object_size, size); in __kmem_cache_alias()
1934 return cachep; in __kmem_cache_alias()
1937 static bool set_objfreelist_slab_cache(struct kmem_cache *cachep, in set_objfreelist_slab_cache() argument
1942 cachep->num = 0; in set_objfreelist_slab_cache()
1944 if (cachep->ctor || flags & SLAB_DESTROY_BY_RCU) in set_objfreelist_slab_cache()
1947 left = calculate_slab_order(cachep, size, in set_objfreelist_slab_cache()
1949 if (!cachep->num) in set_objfreelist_slab_cache()
1952 if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size) in set_objfreelist_slab_cache()
1955 cachep->colour = left / cachep->colour_off; in set_objfreelist_slab_cache()
1960 static bool set_off_slab_cache(struct kmem_cache *cachep, in set_off_slab_cache() argument
1965 cachep->num = 0; in set_off_slab_cache()
1978 left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB); in set_off_slab_cache()
1979 if (!cachep->num) in set_off_slab_cache()
1986 if (left >= cachep->num * sizeof(freelist_idx_t)) in set_off_slab_cache()
1989 cachep->colour = left / cachep->colour_off; in set_off_slab_cache()
1994 static bool set_on_slab_cache(struct kmem_cache *cachep, in set_on_slab_cache() argument
1999 cachep->num = 0; in set_on_slab_cache()
2001 left = calculate_slab_order(cachep, size, flags); in set_on_slab_cache()
2002 if (!cachep->num) in set_on_slab_cache()
2005 cachep->colour = left / cachep->colour_off; in set_on_slab_cache()
2032 __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) in __kmem_cache_create() argument
2037 size_t size = cachep->size; in __kmem_cache_create()
2074 if (ralign < cachep->align) { in __kmem_cache_create()
2075 ralign = cachep->align; in __kmem_cache_create()
2083 cachep->align = ralign; in __kmem_cache_create()
2084 cachep->colour_off = cache_line_size(); in __kmem_cache_create()
2086 if (cachep->colour_off < cachep->align) in __kmem_cache_create()
2087 cachep->colour_off = cachep->align; in __kmem_cache_create()
2102 cachep->obj_offset += sizeof(unsigned long long); in __kmem_cache_create()
2117 kasan_cache_create(cachep, &size, &flags); in __kmem_cache_create()
2119 size = ALIGN(size, cachep->align); in __kmem_cache_create()
2125 size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align); in __kmem_cache_create()
2136 size >= 256 && cachep->object_size > cache_line_size()) { in __kmem_cache_create()
2140 if (set_off_slab_cache(cachep, tmp_size, flags)) { in __kmem_cache_create()
2142 cachep->obj_offset += tmp_size - size; in __kmem_cache_create()
2150 if (set_objfreelist_slab_cache(cachep, size, flags)) { in __kmem_cache_create()
2155 if (set_off_slab_cache(cachep, size, flags)) { in __kmem_cache_create()
2160 if (set_on_slab_cache(cachep, size, flags)) in __kmem_cache_create()
2166 cachep->freelist_size = cachep->num * sizeof(freelist_idx_t); in __kmem_cache_create()
2167 cachep->flags = flags; in __kmem_cache_create()
2168 cachep->allocflags = __GFP_COMP; in __kmem_cache_create()
2170 cachep->allocflags |= GFP_DMA; in __kmem_cache_create()
2171 cachep->size = size; in __kmem_cache_create()
2172 cachep->reciprocal_buffer_size = reciprocal_value(size); in __kmem_cache_create()
2181 (cachep->flags & SLAB_POISON) && in __kmem_cache_create()
2182 is_debug_pagealloc_cache(cachep)) in __kmem_cache_create()
2183 cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); in __kmem_cache_create()
2186 if (OFF_SLAB(cachep)) { in __kmem_cache_create()
2187 cachep->freelist_cache = in __kmem_cache_create()
2188 kmalloc_slab(cachep->freelist_size, 0u); in __kmem_cache_create()
2191 err = setup_cpu_cache(cachep, gfp); in __kmem_cache_create()
2193 __kmem_cache_release(cachep); in __kmem_cache_create()
2216 static void check_spinlock_acquired(struct kmem_cache *cachep) in check_spinlock_acquired() argument
2220 assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock); in check_spinlock_acquired()
2224 static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) in check_spinlock_acquired_node() argument
2228 assert_spin_locked(&get_node(cachep, node)->list_lock); in check_spinlock_acquired_node()
2240 static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac, in drain_array_locked() argument
2252 free_block(cachep, ac->entry, tofree, node, list); in drain_array_locked()
2259 struct kmem_cache *cachep = arg; in do_drain() local
2266 ac = cpu_cache_get(cachep); in do_drain()
2267 n = get_node(cachep, node); in do_drain()
2269 free_block(cachep, ac->entry, ac->avail, node, &list); in do_drain()
2271 slabs_destroy(cachep, &list); in do_drain()
2275 static void drain_cpu_caches(struct kmem_cache *cachep) in drain_cpu_caches() argument
2281 on_each_cpu(do_drain, cachep, 1); in drain_cpu_caches()
2283 for_each_kmem_cache_node(cachep, node, n) in drain_cpu_caches()
2285 drain_alien_cache(cachep, n->alien); in drain_cpu_caches()
2287 for_each_kmem_cache_node(cachep, node, n) { in drain_cpu_caches()
2289 drain_array_locked(cachep, n->shared, node, true, &list); in drain_cpu_caches()
2292 slabs_destroy(cachep, &list); in drain_cpu_caches()
2335 int __kmem_cache_shrink(struct kmem_cache *cachep) in __kmem_cache_shrink() argument
2341 drain_cpu_caches(cachep); in __kmem_cache_shrink()
2344 for_each_kmem_cache_node(cachep, node, n) { in __kmem_cache_shrink()
2345 drain_freelist(cachep, n, INT_MAX); in __kmem_cache_shrink()
2353 int __kmem_cache_shutdown(struct kmem_cache *cachep) in __kmem_cache_shutdown() argument
2355 return __kmem_cache_shrink(cachep); in __kmem_cache_shutdown()
2358 void __kmem_cache_release(struct kmem_cache *cachep) in __kmem_cache_release() argument
2363 cache_random_seq_destroy(cachep); in __kmem_cache_release()
2365 free_percpu(cachep->cpu_cache); in __kmem_cache_release()
2368 for_each_kmem_cache_node(cachep, i, n) { in __kmem_cache_release()
2372 cachep->node[i] = NULL; in __kmem_cache_release()
2390 static void *alloc_slabmgmt(struct kmem_cache *cachep, in alloc_slabmgmt() argument
2400 if (OBJFREELIST_SLAB(cachep)) in alloc_slabmgmt()
2402 else if (OFF_SLAB(cachep)) { in alloc_slabmgmt()
2404 freelist = kmem_cache_alloc_node(cachep->freelist_cache, in alloc_slabmgmt()
2410 freelist = addr + (PAGE_SIZE << cachep->gfporder) - in alloc_slabmgmt()
2411 cachep->freelist_size; in alloc_slabmgmt()
2428 static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page) in cache_init_objs_debug() argument
2433 for (i = 0; i < cachep->num; i++) { in cache_init_objs_debug()
2434 void *objp = index_to_obj(cachep, page, i); in cache_init_objs_debug()
2436 if (cachep->flags & SLAB_STORE_USER) in cache_init_objs_debug()
2437 *dbg_userword(cachep, objp) = NULL; in cache_init_objs_debug()
2439 if (cachep->flags & SLAB_RED_ZONE) { in cache_init_objs_debug()
2440 *dbg_redzone1(cachep, objp) = RED_INACTIVE; in cache_init_objs_debug()
2441 *dbg_redzone2(cachep, objp) = RED_INACTIVE; in cache_init_objs_debug()
2448 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) { in cache_init_objs_debug()
2449 kasan_unpoison_object_data(cachep, in cache_init_objs_debug()
2450 objp + obj_offset(cachep)); in cache_init_objs_debug()
2451 cachep->ctor(objp + obj_offset(cachep)); in cache_init_objs_debug()
2453 cachep, objp + obj_offset(cachep)); in cache_init_objs_debug()
2456 if (cachep->flags & SLAB_RED_ZONE) { in cache_init_objs_debug()
2457 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) in cache_init_objs_debug()
2458 slab_error(cachep, "constructor overwrote the end of an object"); in cache_init_objs_debug()
2459 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) in cache_init_objs_debug()
2460 slab_error(cachep, "constructor overwrote the start of an object"); in cache_init_objs_debug()
2463 if (cachep->flags & SLAB_POISON) { in cache_init_objs_debug()
2464 poison_obj(cachep, objp, POISON_FREE); in cache_init_objs_debug()
2465 slab_kernel_map(cachep, objp, 0, 0); in cache_init_objs_debug()
2487 struct kmem_cache *cachep, in freelist_state_initialize() argument
2497 if (!cachep->random_seq) { in freelist_state_initialize()
2501 state->list = cachep->random_seq; in freelist_state_initialize()
2528 static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page) in shuffle_freelist() argument
2530 unsigned int objfreelist = 0, i, rand, count = cachep->num; in shuffle_freelist()
2537 precomputed = freelist_state_initialize(&state, cachep, count); in shuffle_freelist()
2540 if (OBJFREELIST_SLAB(cachep)) { in shuffle_freelist()
2545 page->freelist = index_to_obj(cachep, page, objfreelist) + in shuffle_freelist()
2546 obj_offset(cachep); in shuffle_freelist()
2569 if (OBJFREELIST_SLAB(cachep)) in shuffle_freelist()
2570 set_free_obj(page, cachep->num - 1, objfreelist); in shuffle_freelist()
2575 static inline bool shuffle_freelist(struct kmem_cache *cachep, in shuffle_freelist() argument
2582 static void cache_init_objs(struct kmem_cache *cachep, in cache_init_objs() argument
2589 cache_init_objs_debug(cachep, page); in cache_init_objs()
2592 shuffled = shuffle_freelist(cachep, page); in cache_init_objs()
2594 if (!shuffled && OBJFREELIST_SLAB(cachep)) { in cache_init_objs()
2595 page->freelist = index_to_obj(cachep, page, cachep->num - 1) + in cache_init_objs()
2596 obj_offset(cachep); in cache_init_objs()
2599 for (i = 0; i < cachep->num; i++) { in cache_init_objs()
2600 objp = index_to_obj(cachep, page, i); in cache_init_objs()
2601 kasan_init_slab_obj(cachep, objp); in cache_init_objs()
2604 if (DEBUG == 0 && cachep->ctor) { in cache_init_objs()
2605 kasan_unpoison_object_data(cachep, objp); in cache_init_objs()
2606 cachep->ctor(objp); in cache_init_objs()
2607 kasan_poison_object_data(cachep, objp); in cache_init_objs()
2615 static void *slab_get_obj(struct kmem_cache *cachep, struct page *page) in slab_get_obj() argument
2619 objp = index_to_obj(cachep, page, get_free_obj(page, page->active)); in slab_get_obj()
2623 if (cachep->flags & SLAB_STORE_USER) in slab_get_obj()
2624 set_store_user_dirty(cachep); in slab_get_obj()
2630 static void slab_put_obj(struct kmem_cache *cachep, in slab_put_obj() argument
2633 unsigned int objnr = obj_to_index(cachep, page, objp); in slab_put_obj()
2638 for (i = page->active; i < cachep->num; i++) { in slab_put_obj()
2641 cachep->name, objp); in slab_put_obj()
2648 page->freelist = objp + obj_offset(cachep); in slab_put_obj()
2669 static struct page *cache_grow_begin(struct kmem_cache *cachep, in cache_grow_begin() argument
2700 page = kmem_getpages(cachep, local_flags, nodeid); in cache_grow_begin()
2705 n = get_node(cachep, page_node); in cache_grow_begin()
2709 if (n->colour_next >= cachep->colour) in cache_grow_begin()
2713 if (offset >= cachep->colour) in cache_grow_begin()
2716 offset *= cachep->colour_off; in cache_grow_begin()
2719 freelist = alloc_slabmgmt(cachep, page, offset, in cache_grow_begin()
2721 if (OFF_SLAB(cachep) && !freelist) in cache_grow_begin()
2724 slab_map_pages(cachep, page, freelist); in cache_grow_begin()
2727 cache_init_objs(cachep, page); in cache_grow_begin()
2735 kmem_freepages(cachep, page); in cache_grow_begin()
2742 static void cache_grow_end(struct kmem_cache *cachep, struct page *page) in cache_grow_end() argument
2753 n = get_node(cachep, page_to_nid(page)); in cache_grow_end()
2759 fixup_slab_list(cachep, n, page, &list); in cache_grow_end()
2762 STATS_INC_GROWN(cachep); in cache_grow_end()
2763 n->free_objects += cachep->num - page->active; in cache_grow_end()
2766 fixup_objfreelist_debug(cachep, &list); in cache_grow_end()
2807 static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, in cache_free_debugcheck() argument
2813 BUG_ON(virt_to_cache(objp) != cachep); in cache_free_debugcheck()
2815 objp -= obj_offset(cachep); in cache_free_debugcheck()
2819 if (cachep->flags & SLAB_RED_ZONE) { in cache_free_debugcheck()
2820 verify_redzone_free(cachep, objp); in cache_free_debugcheck()
2821 *dbg_redzone1(cachep, objp) = RED_INACTIVE; in cache_free_debugcheck()
2822 *dbg_redzone2(cachep, objp) = RED_INACTIVE; in cache_free_debugcheck()
2824 if (cachep->flags & SLAB_STORE_USER) { in cache_free_debugcheck()
2825 set_store_user_dirty(cachep); in cache_free_debugcheck()
2826 *dbg_userword(cachep, objp) = (void *)caller; in cache_free_debugcheck()
2829 objnr = obj_to_index(cachep, page, objp); in cache_free_debugcheck()
2831 BUG_ON(objnr >= cachep->num); in cache_free_debugcheck()
2832 BUG_ON(objp != index_to_obj(cachep, page, objnr)); in cache_free_debugcheck()
2834 if (cachep->flags & SLAB_POISON) { in cache_free_debugcheck()
2835 poison_obj(cachep, objp, POISON_FREE); in cache_free_debugcheck()
2836 slab_kernel_map(cachep, objp, 0, caller); in cache_free_debugcheck()
2846 static inline void fixup_objfreelist_debug(struct kmem_cache *cachep, in fixup_objfreelist_debug() argument
2854 objp = next - obj_offset(cachep); in fixup_objfreelist_debug()
2856 poison_obj(cachep, objp, POISON_FREE); in fixup_objfreelist_debug()
2861 static inline void fixup_slab_list(struct kmem_cache *cachep, in fixup_slab_list() argument
2867 if (page->active == cachep->num) { in fixup_slab_list()
2869 if (OBJFREELIST_SLAB(cachep)) { in fixup_slab_list()
2872 if (cachep->flags & SLAB_POISON) { in fixup_slab_list()
2942 static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep, in cache_alloc_pfmemalloc() argument
2959 obj = slab_get_obj(cachep, page); in cache_alloc_pfmemalloc()
2962 fixup_slab_list(cachep, n, page, &list); in cache_alloc_pfmemalloc()
2965 fixup_objfreelist_debug(cachep, &list); in cache_alloc_pfmemalloc()
2974 static __always_inline int alloc_block(struct kmem_cache *cachep, in alloc_block() argument
2981 BUG_ON(page->active >= cachep->num); in alloc_block()
2983 while (page->active < cachep->num && batchcount--) { in alloc_block()
2984 STATS_INC_ALLOCED(cachep); in alloc_block()
2985 STATS_INC_ACTIVE(cachep); in alloc_block()
2986 STATS_SET_HIGH(cachep); in alloc_block()
2988 ac->entry[ac->avail++] = slab_get_obj(cachep, page); in alloc_block()
2994 static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) in cache_alloc_refill() argument
3006 ac = cpu_cache_get(cachep); in cache_alloc_refill()
3016 n = get_node(cachep, node); in cache_alloc_refill()
3038 check_spinlock_acquired(cachep); in cache_alloc_refill()
3040 batchcount = alloc_block(cachep, ac, page, batchcount); in cache_alloc_refill()
3041 fixup_slab_list(cachep, n, page, &list); in cache_alloc_refill()
3048 fixup_objfreelist_debug(cachep, &list); in cache_alloc_refill()
3054 void *obj = cache_alloc_pfmemalloc(cachep, n, flags); in cache_alloc_refill()
3060 page = cache_grow_begin(cachep, gfp_exact_node(flags), node); in cache_alloc_refill()
3066 ac = cpu_cache_get(cachep); in cache_alloc_refill()
3068 alloc_block(cachep, ac, page, batchcount); in cache_alloc_refill()
3069 cache_grow_end(cachep, page); in cache_alloc_refill()
3079 static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep, in cache_alloc_debugcheck_before() argument
3086 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, in cache_alloc_debugcheck_after() argument
3091 if (cachep->flags & SLAB_POISON) { in cache_alloc_debugcheck_after()
3092 check_poison_obj(cachep, objp); in cache_alloc_debugcheck_after()
3093 slab_kernel_map(cachep, objp, 1, 0); in cache_alloc_debugcheck_after()
3094 poison_obj(cachep, objp, POISON_INUSE); in cache_alloc_debugcheck_after()
3096 if (cachep->flags & SLAB_STORE_USER) in cache_alloc_debugcheck_after()
3097 *dbg_userword(cachep, objp) = (void *)caller; in cache_alloc_debugcheck_after()
3099 if (cachep->flags & SLAB_RED_ZONE) { in cache_alloc_debugcheck_after()
3100 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || in cache_alloc_debugcheck_after()
3101 *dbg_redzone2(cachep, objp) != RED_INACTIVE) { in cache_alloc_debugcheck_after()
3102 slab_error(cachep, "double free, or memory outside object was overwritten"); in cache_alloc_debugcheck_after()
3104 objp, *dbg_redzone1(cachep, objp), in cache_alloc_debugcheck_after()
3105 *dbg_redzone2(cachep, objp)); in cache_alloc_debugcheck_after()
3107 *dbg_redzone1(cachep, objp) = RED_ACTIVE; in cache_alloc_debugcheck_after()
3108 *dbg_redzone2(cachep, objp) = RED_ACTIVE; in cache_alloc_debugcheck_after()
3111 objp += obj_offset(cachep); in cache_alloc_debugcheck_after()
3112 if (cachep->ctor && cachep->flags & SLAB_POISON) in cache_alloc_debugcheck_after()
3113 cachep->ctor(objp); in cache_alloc_debugcheck_after()
3125 static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) in ____cache_alloc() argument
3132 ac = cpu_cache_get(cachep); in ____cache_alloc()
3137 STATS_INC_ALLOCHIT(cachep); in ____cache_alloc()
3141 STATS_INC_ALLOCMISS(cachep); in ____cache_alloc()
3142 objp = cache_alloc_refill(cachep, flags); in ____cache_alloc()
3147 ac = cpu_cache_get(cachep); in ____cache_alloc()
3167 static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) in alternate_node_alloc() argument
3174 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) in alternate_node_alloc()
3179 return ____cache_alloc_node(cachep, flags, nid_alloc); in alternate_node_alloc()
3258 static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, in ____cache_alloc_node() argument
3267 n = get_node(cachep, nodeid); in ____cache_alloc_node()
3276 check_spinlock_acquired_node(cachep, nodeid); in ____cache_alloc_node()
3278 STATS_INC_NODEALLOCS(cachep); in ____cache_alloc_node()
3279 STATS_INC_ACTIVE(cachep); in ____cache_alloc_node()
3280 STATS_SET_HIGH(cachep); in ____cache_alloc_node()
3282 BUG_ON(page->active == cachep->num); in ____cache_alloc_node()
3284 obj = slab_get_obj(cachep, page); in ____cache_alloc_node()
3287 fixup_slab_list(cachep, n, page, &list); in ____cache_alloc_node()
3290 fixup_objfreelist_debug(cachep, &list); in ____cache_alloc_node()
3295 page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid); in ____cache_alloc_node()
3298 obj = slab_get_obj(cachep, page); in ____cache_alloc_node()
3300 cache_grow_end(cachep, page); in ____cache_alloc_node()
3302 return obj ? obj : fallback_alloc(cachep, flags); in ____cache_alloc_node()
3306 slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, in slab_alloc_node() argument
3314 cachep = slab_pre_alloc_hook(cachep, flags); in slab_alloc_node()
3315 if (unlikely(!cachep)) in slab_alloc_node()
3318 cache_alloc_debugcheck_before(cachep, flags); in slab_alloc_node()
3324 if (unlikely(!get_node(cachep, nodeid))) { in slab_alloc_node()
3326 ptr = fallback_alloc(cachep, flags); in slab_alloc_node()
3337 ptr = ____cache_alloc(cachep, flags); in slab_alloc_node()
3342 ptr = ____cache_alloc_node(cachep, flags, nodeid); in slab_alloc_node()
3345 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); in slab_alloc_node()
3348 memset(ptr, 0, cachep->object_size); in slab_alloc_node()
3350 slab_post_alloc_hook(cachep, flags, 1, &ptr); in slab_alloc_node()
3379 __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags) in __do_cache_alloc() argument
3381 return ____cache_alloc(cachep, flags); in __do_cache_alloc()
3387 slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller) in slab_alloc() argument
3393 cachep = slab_pre_alloc_hook(cachep, flags); in slab_alloc()
3394 if (unlikely(!cachep)) in slab_alloc()
3397 cache_alloc_debugcheck_before(cachep, flags); in slab_alloc()
3399 objp = __do_cache_alloc(cachep, flags); in slab_alloc()
3401 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); in slab_alloc()
3405 memset(objp, 0, cachep->object_size); in slab_alloc()
3407 slab_post_alloc_hook(cachep, flags, 1, &objp); in slab_alloc()
3415 static void free_block(struct kmem_cache *cachep, void **objpp, in free_block() argument
3419 struct kmem_cache_node *n = get_node(cachep, node); in free_block()
3432 check_spinlock_acquired_node(cachep, node); in free_block()
3433 slab_put_obj(cachep, page, objp); in free_block()
3434 STATS_DEC_ACTIVE(cachep); in free_block()
3449 n->free_objects -= cachep->num; in free_block()
3457 static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) in cache_flusharray() argument
3467 n = get_node(cachep, node); in cache_flusharray()
3482 free_block(cachep, ac->entry, batchcount, node, &list); in cache_flusharray()
3494 STATS_SET_FREEABLE(cachep, i); in cache_flusharray()
3498 slabs_destroy(cachep, &list); in cache_flusharray()
3507 static inline void __cache_free(struct kmem_cache *cachep, void *objp, in __cache_free() argument
3511 if (kasan_slab_free(cachep, objp)) in __cache_free()
3514 ___cache_free(cachep, objp, caller); in __cache_free()
3517 void ___cache_free(struct kmem_cache *cachep, void *objp, in ___cache_free() argument
3520 struct array_cache *ac = cpu_cache_get(cachep); in ___cache_free()
3523 kmemleak_free_recursive(objp, cachep->flags); in ___cache_free()
3524 objp = cache_free_debugcheck(cachep, objp, caller); in ___cache_free()
3526 kmemcheck_slab_free(cachep, objp, cachep->object_size); in ___cache_free()
3535 if (nr_online_nodes > 1 && cache_free_alien(cachep, objp)) in ___cache_free()
3539 STATS_INC_FREEHIT(cachep); in ___cache_free()
3541 STATS_INC_FREEMISS(cachep); in ___cache_free()
3542 cache_flusharray(cachep, ac); in ___cache_free()
3549 cache_free_pfmemalloc(cachep, page, objp); in ___cache_free()
3565 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) in kmem_cache_alloc() argument
3567 void *ret = slab_alloc(cachep, flags, _RET_IP_); in kmem_cache_alloc()
3569 kasan_slab_alloc(cachep, ret, flags); in kmem_cache_alloc()
3571 cachep->object_size, cachep->size, flags); in kmem_cache_alloc()
3629 kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) in kmem_cache_alloc_trace() argument
3633 ret = slab_alloc(cachep, flags, _RET_IP_); in kmem_cache_alloc_trace()
3635 kasan_kmalloc(cachep, ret, size, flags); in kmem_cache_alloc_trace()
3637 size, cachep->size, flags); in kmem_cache_alloc_trace()
3655 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) in kmem_cache_alloc_node() argument
3657 void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); in kmem_cache_alloc_node()
3659 kasan_slab_alloc(cachep, ret, flags); in kmem_cache_alloc_node()
3661 cachep->object_size, cachep->size, in kmem_cache_alloc_node()
3669 void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, in kmem_cache_alloc_node_trace() argument
3676 ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); in kmem_cache_alloc_node_trace()
3678 kasan_kmalloc(cachep, ret, size, flags); in kmem_cache_alloc_node_trace()
3680 size, cachep->size, in kmem_cache_alloc_node_trace()
3690 struct kmem_cache *cachep; in __do_kmalloc_node() local
3693 cachep = kmalloc_slab(size, flags); in __do_kmalloc_node()
3694 if (unlikely(ZERO_OR_NULL_PTR(cachep))) in __do_kmalloc_node()
3695 return cachep; in __do_kmalloc_node()
3696 ret = kmem_cache_alloc_node_trace(cachep, flags, node, size); in __do_kmalloc_node()
3697 kasan_kmalloc(cachep, ret, size, flags); in __do_kmalloc_node()
3725 struct kmem_cache *cachep; in __do_kmalloc() local
3728 cachep = kmalloc_slab(size, flags); in __do_kmalloc()
3729 if (unlikely(ZERO_OR_NULL_PTR(cachep))) in __do_kmalloc()
3730 return cachep; in __do_kmalloc()
3731 ret = slab_alloc(cachep, flags, caller); in __do_kmalloc()
3733 kasan_kmalloc(cachep, ret, size, flags); in __do_kmalloc()
3735 size, cachep->size, flags); in __do_kmalloc()
3760 void kmem_cache_free(struct kmem_cache *cachep, void *objp) in kmem_cache_free() argument
3763 cachep = cache_from_obj(cachep, objp); in kmem_cache_free()
3764 if (!cachep) in kmem_cache_free()
3768 debug_check_no_locks_freed(objp, cachep->object_size); in kmem_cache_free()
3769 if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) in kmem_cache_free()
3770 debug_check_no_obj_freed(objp, cachep->object_size); in kmem_cache_free()
3771 __cache_free(cachep, objp, _RET_IP_); in kmem_cache_free()
3836 static int setup_kmem_cache_nodes(struct kmem_cache *cachep, gfp_t gfp) in setup_kmem_cache_nodes() argument
3843 ret = setup_kmem_cache_node(cachep, node, gfp, true); in setup_kmem_cache_nodes()
3852 if (!cachep->list.next) { in setup_kmem_cache_nodes()
3856 n = get_node(cachep, node); in setup_kmem_cache_nodes()
3861 cachep->node[node] = NULL; in setup_kmem_cache_nodes()
3870 static int __do_tune_cpucache(struct kmem_cache *cachep, int limit, in __do_tune_cpucache() argument
3876 cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount); in __do_tune_cpucache()
3880 prev = cachep->cpu_cache; in __do_tune_cpucache()
3881 cachep->cpu_cache = cpu_cache; in __do_tune_cpucache()
3885 cachep->batchcount = batchcount; in __do_tune_cpucache()
3886 cachep->limit = limit; in __do_tune_cpucache()
3887 cachep->shared = shared; in __do_tune_cpucache()
3899 n = get_node(cachep, node); in __do_tune_cpucache()
3901 free_block(cachep, ac->entry, ac->avail, node, &list); in __do_tune_cpucache()
3903 slabs_destroy(cachep, &list); in __do_tune_cpucache()
3908 return setup_kmem_cache_nodes(cachep, gfp); in __do_tune_cpucache()
3911 static int do_tune_cpucache(struct kmem_cache *cachep, int limit, in do_tune_cpucache() argument
3917 ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp); in do_tune_cpucache()
3922 if ((ret < 0) || !is_root_cache(cachep)) in do_tune_cpucache()
3926 for_each_memcg_cache(c, cachep) { in do_tune_cpucache()
3935 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) in enable_cpucache() argument
3942 err = cache_random_seq_create(cachep, cachep->num, gfp); in enable_cpucache()
3946 if (!is_root_cache(cachep)) { in enable_cpucache()
3947 struct kmem_cache *root = memcg_root_cache(cachep); in enable_cpucache()
3964 if (cachep->size > 131072) in enable_cpucache()
3966 else if (cachep->size > PAGE_SIZE) in enable_cpucache()
3968 else if (cachep->size > 1024) in enable_cpucache()
3970 else if (cachep->size > 256) in enable_cpucache()
3985 if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1) in enable_cpucache()
3998 err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp); in enable_cpucache()
4002 cachep->name, -err); in enable_cpucache()
4011 static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n, in drain_array() argument
4028 drain_array_locked(cachep, ac, node, false, &list); in drain_array()
4031 slabs_destroy(cachep, &list); in drain_array()
4104 void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) in get_slabinfo() argument
4120 for_each_kmem_cache_node(cachep, node, n) { in get_slabinfo()
4128 if (page->active == cachep->num && !error) in get_slabinfo()
4148 num_objs = num_slabs * cachep->num; in get_slabinfo()
4151 active_objs += (num_slabs_full * cachep->num); in get_slabinfo()
4156 name = cachep->name; in get_slabinfo()
4165 sinfo->limit = cachep->limit; in get_slabinfo()
4166 sinfo->batchcount = cachep->batchcount; in get_slabinfo()
4167 sinfo->shared = cachep->shared; in get_slabinfo()
4168 sinfo->objects_per_slab = cachep->num; in get_slabinfo()
4169 sinfo->cache_order = cachep->gfporder; in get_slabinfo()
4172 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep) in slabinfo_show_stats() argument
4176 unsigned long high = cachep->high_mark; in slabinfo_show_stats()
4177 unsigned long allocs = cachep->num_allocations; in slabinfo_show_stats()
4178 unsigned long grown = cachep->grown; in slabinfo_show_stats()
4179 unsigned long reaped = cachep->reaped; in slabinfo_show_stats()
4180 unsigned long errors = cachep->errors; in slabinfo_show_stats()
4181 unsigned long max_freeable = cachep->max_freeable; in slabinfo_show_stats()
4182 unsigned long node_allocs = cachep->node_allocs; in slabinfo_show_stats()
4183 unsigned long node_frees = cachep->node_frees; in slabinfo_show_stats()
4184 unsigned long overflows = cachep->node_overflow; in slabinfo_show_stats()
4193 unsigned long allochit = atomic_read(&cachep->allochit); in slabinfo_show_stats()
4194 unsigned long allocmiss = atomic_read(&cachep->allocmiss); in slabinfo_show_stats()
4195 unsigned long freehit = atomic_read(&cachep->freehit); in slabinfo_show_stats()
4196 unsigned long freemiss = atomic_read(&cachep->freemiss); in slabinfo_show_stats()
4217 struct kmem_cache *cachep; in slabinfo_write() local
4236 list_for_each_entry(cachep, &slab_caches, list) { in slabinfo_write()
4237 if (!strcmp(cachep->name, kbuf)) { in slabinfo_write()
4242 res = do_tune_cpucache(cachep, limit, in slabinfo_write()
4341 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list); in leaks_show() local
4349 if (!(cachep->flags & SLAB_STORE_USER)) in leaks_show()
4351 if (!(cachep->flags & SLAB_RED_ZONE)) in leaks_show()
4361 set_store_user_clean(cachep); in leaks_show()
4362 drain_cpu_caches(cachep); in leaks_show()
4366 for_each_kmem_cache_node(cachep, node, n) { in leaks_show()
4372 handle_slab(x, cachep, page); in leaks_show()
4374 handle_slab(x, cachep, page); in leaks_show()
4377 } while (!is_store_user_clean(cachep)); in leaks_show()
4379 name = cachep->name; in leaks_show()
4454 struct kmem_cache *cachep; in __check_heap_object() local
4459 cachep = page->slab_cache; in __check_heap_object()
4460 objnr = obj_to_index(cachep, page, (void *)ptr); in __check_heap_object()
4461 BUG_ON(objnr >= cachep->num); in __check_heap_object()
4464 offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep); in __check_heap_object()
4467 if (offset <= cachep->object_size && n <= cachep->object_size - offset) in __check_heap_object()
4470 return cachep->name; in __check_heap_object()