Lines Matching refs:s
119 static inline int kmem_cache_debug(struct kmem_cache *s) in kmem_cache_debug() argument
122 return unlikely(s->flags & SLAB_DEBUG_FLAGS); in kmem_cache_debug()
128 void *fixup_red_left(struct kmem_cache *s, void *p) in fixup_red_left() argument
130 if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) in fixup_red_left()
131 p += s->red_left_pad; in fixup_red_left()
136 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) in kmem_cache_has_cpu_partial() argument
139 return !kmem_cache_debug(s); in kmem_cache_has_cpu_partial()
219 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
220 static void sysfs_slab_remove(struct kmem_cache *s);
222 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } in sysfs_slab_add() argument
223 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) in sysfs_slab_alias() argument
225 static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { } in memcg_propagate_slab_attrs() argument
226 static inline void sysfs_slab_remove(struct kmem_cache *s) { } in sysfs_slab_remove() argument
229 static inline void stat(const struct kmem_cache *s, enum stat_item si) in stat() argument
236 raw_cpu_inc(s->cpu_slab->stat[si]); in stat()
249 static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr, in freelist_ptr() argument
263 return (void *)((unsigned long)ptr ^ s->random ^ in freelist_ptr()
271 static inline void *freelist_dereference(const struct kmem_cache *s, in freelist_dereference() argument
274 return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr), in freelist_dereference()
278 static inline void *get_freepointer(struct kmem_cache *s, void *object) in get_freepointer() argument
280 return freelist_dereference(s, object + s->offset); in get_freepointer()
283 static void prefetch_freepointer(const struct kmem_cache *s, void *object) in prefetch_freepointer() argument
285 prefetch(object + s->offset); in prefetch_freepointer()
288 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) in get_freepointer_safe() argument
294 return get_freepointer(s, object); in get_freepointer_safe()
296 freepointer_addr = (unsigned long)object + s->offset; in get_freepointer_safe()
298 return freelist_ptr(s, p, freepointer_addr); in get_freepointer_safe()
301 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) in set_freepointer() argument
303 unsigned long freeptr_addr = (unsigned long)object + s->offset; in set_freepointer()
309 *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr); in set_freepointer()
319 static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr) in slab_index() argument
321 return (kasan_reset_tag(p) - addr) / s->size; in slab_index()
365 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page, in __cmpxchg_double_slab() argument
373 if (s->flags & __CMPXCHG_DOUBLE) { in __cmpxchg_double_slab()
393 stat(s, CMPXCHG_DOUBLE_FAIL); in __cmpxchg_double_slab()
396 pr_info("%s %s: cmpxchg double redo ", n, s->name); in __cmpxchg_double_slab()
402 static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, in cmpxchg_double_slab() argument
409 if (s->flags & __CMPXCHG_DOUBLE) { in cmpxchg_double_slab()
434 stat(s, CMPXCHG_DOUBLE_FAIL); in cmpxchg_double_slab()
437 pr_info("%s %s: cmpxchg double redo ", n, s->name); in cmpxchg_double_slab()
450 static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map) in get_map() argument
455 for (p = page->freelist; p; p = get_freepointer(s, p)) in get_map()
456 set_bit(slab_index(p, s, addr), map); in get_map()
459 static inline unsigned int size_from_object(struct kmem_cache *s) in size_from_object() argument
461 if (s->flags & SLAB_RED_ZONE) in size_from_object()
462 return s->size - s->red_left_pad; in size_from_object()
464 return s->size; in size_from_object()
467 static inline void *restore_red_left(struct kmem_cache *s, void *p) in restore_red_left() argument
469 if (s->flags & SLAB_RED_ZONE) in restore_red_left()
470 p -= s->red_left_pad; in restore_red_left()
508 static inline int check_valid_pointer(struct kmem_cache *s, in check_valid_pointer() argument
518 object = restore_red_left(s, object); in check_valid_pointer()
519 if (object < base || object >= base + page->objects * s->size || in check_valid_pointer()
520 (object - base) % s->size) { in check_valid_pointer()
536 static struct track *get_track(struct kmem_cache *s, void *object, in get_track() argument
541 if (s->offset) in get_track()
542 p = object + s->offset + sizeof(void *); in get_track()
544 p = object + s->inuse; in get_track()
549 static void set_track(struct kmem_cache *s, void *object, in set_track() argument
552 struct track *p = get_track(s, object, alloc); in set_track()
574 static void init_tracking(struct kmem_cache *s, void *object) in init_tracking() argument
576 if (!(s->flags & SLAB_STORE_USER)) in init_tracking()
579 set_track(s, object, TRACK_FREE, 0UL); in init_tracking()
580 set_track(s, object, TRACK_ALLOC, 0UL); in init_tracking()
583 static void print_track(const char *s, struct track *t, unsigned long pr_time) in print_track() argument
589 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid); in print_track()
602 static void print_tracking(struct kmem_cache *s, void *object) in print_tracking() argument
605 if (!(s->flags & SLAB_STORE_USER)) in print_tracking()
608 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time); in print_tracking()
609 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time); in print_tracking()
619 static void slab_bug(struct kmem_cache *s, char *fmt, ...) in slab_bug() argument
628 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf); in slab_bug()
635 static void slab_fix(struct kmem_cache *s, char *fmt, ...) in slab_fix() argument
643 pr_err("FIX %s: %pV\n", s->name, &vaf); in slab_fix()
647 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) in print_trailer() argument
652 print_tracking(s, p); in print_trailer()
657 p, p - addr, get_freepointer(s, p)); in print_trailer()
659 if (s->flags & SLAB_RED_ZONE) in print_trailer()
660 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad, in print_trailer()
661 s->red_left_pad); in print_trailer()
666 min_t(unsigned int, s->object_size, PAGE_SIZE)); in print_trailer()
667 if (s->flags & SLAB_RED_ZONE) in print_trailer()
668 print_section(KERN_ERR, "Redzone ", p + s->object_size, in print_trailer()
669 s->inuse - s->object_size); in print_trailer()
671 if (s->offset) in print_trailer()
672 off = s->offset + sizeof(void *); in print_trailer()
674 off = s->inuse; in print_trailer()
676 if (s->flags & SLAB_STORE_USER) in print_trailer()
679 off += kasan_metadata_size(s); in print_trailer()
681 if (off != size_from_object(s)) in print_trailer()
684 size_from_object(s) - off); in print_trailer()
689 void object_err(struct kmem_cache *s, struct page *page, in object_err() argument
692 slab_bug(s, "%s", reason); in object_err()
693 print_trailer(s, page, object); in object_err()
696 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page, in slab_err() argument
705 slab_bug(s, "%s", buf); in slab_err()
710 static void init_object(struct kmem_cache *s, void *object, u8 val) in init_object() argument
714 if (s->flags & SLAB_RED_ZONE) in init_object()
715 memset(p - s->red_left_pad, val, s->red_left_pad); in init_object()
717 if (s->flags & __OBJECT_POISON) { in init_object()
718 memset(p, POISON_FREE, s->object_size - 1); in init_object()
719 p[s->object_size - 1] = POISON_END; in init_object()
722 if (s->flags & SLAB_RED_ZONE) in init_object()
723 memset(p + s->object_size, val, s->inuse - s->object_size); in init_object()
726 static void restore_bytes(struct kmem_cache *s, char *message, u8 data, in restore_bytes() argument
729 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data); in restore_bytes()
733 static int check_bytes_and_report(struct kmem_cache *s, struct page *page, in check_bytes_and_report() argument
750 slab_bug(s, "%s overwritten", what); in check_bytes_and_report()
753 print_trailer(s, page, object); in check_bytes_and_report()
755 restore_bytes(s, what, value, fault, end); in check_bytes_and_report()
797 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) in check_pad_bytes() argument
799 unsigned long off = s->inuse; /* The end of info */ in check_pad_bytes()
801 if (s->offset) in check_pad_bytes()
805 if (s->flags & SLAB_STORE_USER) in check_pad_bytes()
809 off += kasan_metadata_size(s); in check_pad_bytes()
811 if (size_from_object(s) == off) in check_pad_bytes()
814 return check_bytes_and_report(s, page, p, "Object padding", in check_pad_bytes()
815 p + off, POISON_INUSE, size_from_object(s) - off); in check_pad_bytes()
819 static int slab_pad_check(struct kmem_cache *s, struct page *page) in slab_pad_check() argument
828 if (!(s->flags & SLAB_POISON)) in slab_pad_check()
834 remainder = length % s->size; in slab_pad_check()
847 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); in slab_pad_check()
850 restore_bytes(s, "slab padding", POISON_INUSE, fault, end); in slab_pad_check()
854 static int check_object(struct kmem_cache *s, struct page *page, in check_object() argument
858 u8 *endobject = object + s->object_size; in check_object()
860 if (s->flags & SLAB_RED_ZONE) { in check_object()
861 if (!check_bytes_and_report(s, page, object, "Redzone", in check_object()
862 object - s->red_left_pad, val, s->red_left_pad)) in check_object()
865 if (!check_bytes_and_report(s, page, object, "Redzone", in check_object()
866 endobject, val, s->inuse - s->object_size)) in check_object()
869 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { in check_object()
870 check_bytes_and_report(s, page, p, "Alignment padding", in check_object()
872 s->inuse - s->object_size); in check_object()
876 if (s->flags & SLAB_POISON) { in check_object()
877 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) && in check_object()
878 (!check_bytes_and_report(s, page, p, "Poison", p, in check_object()
879 POISON_FREE, s->object_size - 1) || in check_object()
880 !check_bytes_and_report(s, page, p, "Poison", in check_object()
881 p + s->object_size - 1, POISON_END, 1))) in check_object()
886 check_pad_bytes(s, page, p); in check_object()
889 if (!s->offset && val == SLUB_RED_ACTIVE) in check_object()
897 if (!check_valid_pointer(s, page, get_freepointer(s, p))) { in check_object()
898 object_err(s, page, p, "Freepointer corrupt"); in check_object()
904 set_freepointer(s, p, NULL); in check_object()
910 static int check_slab(struct kmem_cache *s, struct page *page) in check_slab() argument
917 slab_err(s, page, "Not a valid slab page"); in check_slab()
921 maxobj = order_objects(compound_order(page), s->size); in check_slab()
923 slab_err(s, page, "objects %u > max %u", in check_slab()
928 slab_err(s, page, "inuse %u > max %u", in check_slab()
933 slab_pad_check(s, page); in check_slab()
941 static int on_freelist(struct kmem_cache *s, struct page *page, void *search) in on_freelist() argument
952 if (!check_valid_pointer(s, page, fp)) { in on_freelist()
954 object_err(s, page, object, in on_freelist()
956 set_freepointer(s, object, NULL); in on_freelist()
958 slab_err(s, page, "Freepointer corrupt"); in on_freelist()
961 slab_fix(s, "Freelist cleared"); in on_freelist()
967 fp = get_freepointer(s, object); in on_freelist()
971 max_objects = order_objects(compound_order(page), s->size); in on_freelist()
976 slab_err(s, page, "Wrong number of objects. Found %d but should be %d", in on_freelist()
979 slab_fix(s, "Number of objects adjusted."); in on_freelist()
982 slab_err(s, page, "Wrong object count. Counter is %d but counted were %d", in on_freelist()
985 slab_fix(s, "Object count adjusted."); in on_freelist()
990 static void trace(struct kmem_cache *s, struct page *page, void *object, in trace() argument
993 if (s->flags & SLAB_TRACE) { in trace()
995 s->name, in trace()
1002 s->object_size); in trace()
1011 static void add_full(struct kmem_cache *s, in add_full() argument
1014 if (!(s->flags & SLAB_STORE_USER)) in add_full()
1021 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) in remove_full() argument
1023 if (!(s->flags & SLAB_STORE_USER)) in remove_full()
1031 static inline unsigned long slabs_node(struct kmem_cache *s, int node) in slabs_node() argument
1033 struct kmem_cache_node *n = get_node(s, node); in slabs_node()
1043 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) in inc_slabs_node() argument
1045 struct kmem_cache_node *n = get_node(s, node); in inc_slabs_node()
1058 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) in dec_slabs_node() argument
1060 struct kmem_cache_node *n = get_node(s, node); in dec_slabs_node()
1067 static void setup_object_debug(struct kmem_cache *s, struct page *page, in setup_object_debug() argument
1070 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) in setup_object_debug()
1073 init_object(s, object, SLUB_RED_INACTIVE); in setup_object_debug()
1074 init_tracking(s, object); in setup_object_debug()
1078 void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) in setup_page_debug() argument
1080 if (!(s->flags & SLAB_POISON)) in setup_page_debug()
1088 static inline int alloc_consistency_checks(struct kmem_cache *s, in alloc_consistency_checks() argument
1091 if (!check_slab(s, page)) in alloc_consistency_checks()
1094 if (!check_valid_pointer(s, page, object)) { in alloc_consistency_checks()
1095 object_err(s, page, object, "Freelist Pointer check fails"); in alloc_consistency_checks()
1099 if (!check_object(s, page, object, SLUB_RED_INACTIVE)) in alloc_consistency_checks()
1105 static noinline int alloc_debug_processing(struct kmem_cache *s, in alloc_debug_processing() argument
1109 if (s->flags & SLAB_CONSISTENCY_CHECKS) { in alloc_debug_processing()
1110 if (!alloc_consistency_checks(s, page, object)) in alloc_debug_processing()
1115 if (s->flags & SLAB_STORE_USER) in alloc_debug_processing()
1116 set_track(s, object, TRACK_ALLOC, addr); in alloc_debug_processing()
1117 trace(s, page, object, 1); in alloc_debug_processing()
1118 init_object(s, object, SLUB_RED_ACTIVE); in alloc_debug_processing()
1128 slab_fix(s, "Marking all objects used"); in alloc_debug_processing()
1135 static inline int free_consistency_checks(struct kmem_cache *s, in free_consistency_checks() argument
1138 if (!check_valid_pointer(s, page, object)) { in free_consistency_checks()
1139 slab_err(s, page, "Invalid object pointer 0x%p", object); in free_consistency_checks()
1143 if (on_freelist(s, page, object)) { in free_consistency_checks()
1144 object_err(s, page, object, "Object already free"); in free_consistency_checks()
1148 if (!check_object(s, page, object, SLUB_RED_ACTIVE)) in free_consistency_checks()
1151 if (unlikely(s != page->slab_cache)) { in free_consistency_checks()
1153 slab_err(s, page, "Attempt to free object(0x%p) outside of slab", in free_consistency_checks()
1160 object_err(s, page, object, in free_consistency_checks()
1169 struct kmem_cache *s, struct page *page, in free_debug_processing() argument
1173 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); in free_debug_processing()
1182 if (s->flags & SLAB_CONSISTENCY_CHECKS) { in free_debug_processing()
1183 if (!check_slab(s, page)) in free_debug_processing()
1190 if (s->flags & SLAB_CONSISTENCY_CHECKS) { in free_debug_processing()
1191 if (!free_consistency_checks(s, page, object, addr)) in free_debug_processing()
1195 if (s->flags & SLAB_STORE_USER) in free_debug_processing()
1196 set_track(s, object, TRACK_FREE, addr); in free_debug_processing()
1197 trace(s, page, object, 0); in free_debug_processing()
1199 init_object(s, object, SLUB_RED_INACTIVE); in free_debug_processing()
1203 object = get_freepointer(s, object); in free_debug_processing()
1210 slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n", in free_debug_processing()
1216 slab_fix(s, "Object at 0x%p not freed", object); in free_debug_processing()
1342 static inline void setup_object_debug(struct kmem_cache *s, in setup_object_debug() argument
1345 void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {} in setup_page_debug() argument
1347 static inline int alloc_debug_processing(struct kmem_cache *s, in alloc_debug_processing() argument
1351 struct kmem_cache *s, struct page *page, in free_debug_processing() argument
1355 static inline int slab_pad_check(struct kmem_cache *s, struct page *page) in slab_pad_check() argument
1357 static inline int check_object(struct kmem_cache *s, struct page *page, in check_object() argument
1359 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, in add_full() argument
1361 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, in remove_full() argument
1373 static inline unsigned long slabs_node(struct kmem_cache *s, int node) in slabs_node() argument
1377 static inline void inc_slabs_node(struct kmem_cache *s, int node, in inc_slabs_node() argument
1379 static inline void dec_slabs_node(struct kmem_cache *s, int node, in dec_slabs_node() argument
1402 static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x) in slab_free_hook() argument
1404 kmemleak_free_recursive(x, s->flags); in slab_free_hook()
1416 debug_check_no_locks_freed(x, s->object_size); in slab_free_hook()
1420 if (!(s->flags & SLAB_DEBUG_OBJECTS)) in slab_free_hook()
1421 debug_check_no_obj_freed(x, s->object_size); in slab_free_hook()
1424 return kasan_slab_free(s, x, _RET_IP_); in slab_free_hook()
1427 static inline bool slab_free_freelist_hook(struct kmem_cache *s, in slab_free_freelist_hook() argument
1442 next = get_freepointer(s, object); in slab_free_freelist_hook()
1444 if (slab_want_init_on_free(s)) { in slab_free_freelist_hook()
1449 memset(object, 0, s->object_size); in slab_free_freelist_hook()
1450 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad in slab_free_freelist_hook()
1452 memset((char *)object + s->inuse, 0, in slab_free_freelist_hook()
1453 s->size - s->inuse - rsize); in slab_free_freelist_hook()
1457 if (!slab_free_hook(s, object)) { in slab_free_freelist_hook()
1459 set_freepointer(s, object, *head); in slab_free_freelist_hook()
1472 static void *setup_object(struct kmem_cache *s, struct page *page, in setup_object() argument
1475 setup_object_debug(s, page, object); in setup_object()
1476 object = kasan_init_slab_obj(s, object); in setup_object()
1477 if (unlikely(s->ctor)) { in setup_object()
1478 kasan_unpoison_object_data(s, object); in setup_object()
1479 s->ctor(object); in setup_object()
1480 kasan_poison_object_data(s, object); in setup_object()
1488 static inline struct page *alloc_slab_page(struct kmem_cache *s, in alloc_slab_page() argument
1499 if (page && charge_slab_page(page, flags, order, s)) { in alloc_slab_page()
1509 static int init_cache_random_seq(struct kmem_cache *s) in init_cache_random_seq() argument
1511 unsigned int count = oo_objects(s->oo); in init_cache_random_seq()
1515 if (s->random_seq) in init_cache_random_seq()
1518 err = cache_random_seq_create(s, count, GFP_KERNEL); in init_cache_random_seq()
1521 s->name); in init_cache_random_seq()
1526 if (s->random_seq) { in init_cache_random_seq()
1530 s->random_seq[i] *= s->size; in init_cache_random_seq()
1538 struct kmem_cache *s; in init_freelist_randomization() local
1542 list_for_each_entry(s, &slab_caches, list) in init_freelist_randomization()
1543 init_cache_random_seq(s); in init_freelist_randomization()
1549 static void *next_freelist_entry(struct kmem_cache *s, struct page *page, in next_freelist_entry() argument
1561 idx = s->random_seq[*pos]; in next_freelist_entry()
1571 static bool shuffle_freelist(struct kmem_cache *s, struct page *page) in shuffle_freelist() argument
1578 if (page->objects < 2 || !s->random_seq) in shuffle_freelist()
1581 freelist_count = oo_objects(s->oo); in shuffle_freelist()
1584 page_limit = page->objects * s->size; in shuffle_freelist()
1585 start = fixup_red_left(s, page_address(page)); in shuffle_freelist()
1588 cur = next_freelist_entry(s, page, &pos, start, page_limit, in shuffle_freelist()
1590 cur = setup_object(s, page, cur); in shuffle_freelist()
1594 next = next_freelist_entry(s, page, &pos, start, page_limit, in shuffle_freelist()
1596 next = setup_object(s, page, next); in shuffle_freelist()
1597 set_freepointer(s, cur, next); in shuffle_freelist()
1600 set_freepointer(s, cur, NULL); in shuffle_freelist()
1605 static inline int init_cache_random_seq(struct kmem_cache *s) in init_cache_random_seq() argument
1610 static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page) in shuffle_freelist() argument
1616 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) in allocate_slab() argument
1619 struct kmem_cache_order_objects oo = s->oo; in allocate_slab()
1630 flags |= s->allocflags; in allocate_slab()
1637 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) in allocate_slab()
1640 page = alloc_slab_page(s, alloc_gfp, node, oo); in allocate_slab()
1642 oo = s->min; in allocate_slab()
1648 page = alloc_slab_page(s, alloc_gfp, node, oo); in allocate_slab()
1651 stat(s, ORDER_FALLBACK); in allocate_slab()
1656 page->slab_cache = s; in allocate_slab()
1665 setup_page_debug(s, page, start); in allocate_slab()
1667 shuffle = shuffle_freelist(s, page); in allocate_slab()
1670 start = fixup_red_left(s, start); in allocate_slab()
1671 start = setup_object(s, page, start); in allocate_slab()
1674 next = p + s->size; in allocate_slab()
1675 next = setup_object(s, page, next); in allocate_slab()
1676 set_freepointer(s, p, next); in allocate_slab()
1679 set_freepointer(s, p, NULL); in allocate_slab()
1691 inc_slabs_node(s, page_to_nid(page), page->objects); in allocate_slab()
1696 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) in new_slab() argument
1706 return allocate_slab(s, in new_slab()
1710 static void __free_slab(struct kmem_cache *s, struct page *page) in __free_slab() argument
1715 if (s->flags & SLAB_CONSISTENCY_CHECKS) { in __free_slab()
1718 slab_pad_check(s, page); in __free_slab()
1719 for_each_object(p, s, page_address(page), in __free_slab()
1721 check_object(s, page, p, SLUB_RED_INACTIVE); in __free_slab()
1730 uncharge_slab_page(page, order, s); in __free_slab()
1741 static void free_slab(struct kmem_cache *s, struct page *page) in free_slab() argument
1743 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) { in free_slab()
1746 __free_slab(s, page); in free_slab()
1749 static void discard_slab(struct kmem_cache *s, struct page *page) in discard_slab() argument
1751 dec_slabs_node(s, page_to_nid(page), page->objects); in discard_slab()
1752 free_slab(s, page); in discard_slab()
1789 static inline void *acquire_slab(struct kmem_cache *s, in acquire_slab() argument
1818 if (!__cmpxchg_double_slab(s, page, in acquire_slab()
1829 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
1835 static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, in get_partial_node() argument
1859 t = acquire_slab(s, n, page, object == NULL, &objects); in get_partial_node()
1866 stat(s, ALLOC_FROM_PARTIAL); in get_partial_node()
1869 put_cpu_partial(s, page, 0); in get_partial_node()
1870 stat(s, CPU_PARTIAL_NODE); in get_partial_node()
1872 if (!kmem_cache_has_cpu_partial(s) in get_partial_node()
1873 || available > slub_cpu_partial(s) / 2) in get_partial_node()
1884 static void *get_any_partial(struct kmem_cache *s, gfp_t flags, in get_any_partial() argument
1913 if (!s->remote_node_defrag_ratio || in get_any_partial()
1914 get_cycles() % 1024 > s->remote_node_defrag_ratio) in get_any_partial()
1923 n = get_node(s, zone_to_nid(zone)); in get_any_partial()
1926 n->nr_partial > s->min_partial) { in get_any_partial()
1927 object = get_partial_node(s, n, c, flags); in get_any_partial()
1948 static void *get_partial(struct kmem_cache *s, gfp_t flags, int node, in get_partial() argument
1959 object = get_partial_node(s, get_node(s, searchnode), c, flags); in get_partial()
1963 return get_any_partial(s, flags, c); in get_partial()
2004 const struct kmem_cache *s, unsigned long tid) in note_cmpxchg_failure() argument
2007 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); in note_cmpxchg_failure()
2009 pr_info("%s %s: cmpxchg redo ", n, s->name); in note_cmpxchg_failure()
2024 stat(s, CMPXCHG_DOUBLE_CPU_FAIL); in note_cmpxchg_failure()
2027 static void init_kmem_cache_cpus(struct kmem_cache *s) in init_kmem_cache_cpus() argument
2032 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu); in init_kmem_cache_cpus()
2038 static void deactivate_slab(struct kmem_cache *s, struct page *page, in deactivate_slab() argument
2042 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); in deactivate_slab()
2051 stat(s, DEACTIVATE_REMOTE_FREES); in deactivate_slab()
2063 while (freelist && (nextfree = get_freepointer(s, freelist))) { in deactivate_slab()
2070 set_freepointer(s, freelist, prior); in deactivate_slab()
2075 } while (!__cmpxchg_double_slab(s, page, in deactivate_slab()
2107 set_freepointer(s, freelist, old.freelist); in deactivate_slab()
2114 if (!new.inuse && n->nr_partial >= s->min_partial) in deactivate_slab()
2129 if (kmem_cache_debug(s) && !lock) { in deactivate_slab()
2144 remove_full(s, n, page); in deactivate_slab()
2149 add_full(s, n, page); in deactivate_slab()
2153 if (!__cmpxchg_double_slab(s, page, in deactivate_slab()
2163 stat(s, tail); in deactivate_slab()
2165 stat(s, DEACTIVATE_FULL); in deactivate_slab()
2167 stat(s, DEACTIVATE_EMPTY); in deactivate_slab()
2168 discard_slab(s, page); in deactivate_slab()
2169 stat(s, FREE_SLAB); in deactivate_slab()
2183 static void unfreeze_partials(struct kmem_cache *s, in unfreeze_partials() argument
2196 n2 = get_node(s, page_to_nid(page)); in unfreeze_partials()
2216 } while (!__cmpxchg_double_slab(s, page, in unfreeze_partials()
2221 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) { in unfreeze_partials()
2226 stat(s, FREE_ADD_PARTIAL); in unfreeze_partials()
2237 stat(s, DEACTIVATE_EMPTY); in unfreeze_partials()
2238 discard_slab(s, page); in unfreeze_partials()
2239 stat(s, FREE_SLAB); in unfreeze_partials()
2251 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) in put_cpu_partial() argument
2262 oldpage = this_cpu_read(s->cpu_slab->partial); in put_cpu_partial()
2267 if (drain && pobjects > s->cpu_partial) { in put_cpu_partial()
2274 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); in put_cpu_partial()
2279 stat(s, CPU_PARTIAL_DRAIN); in put_cpu_partial()
2290 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) in put_cpu_partial()
2292 if (unlikely(!s->cpu_partial)) { in put_cpu_partial()
2296 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); in put_cpu_partial()
2303 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) in flush_slab() argument
2305 stat(s, CPUSLAB_FLUSH); in flush_slab()
2306 deactivate_slab(s, c->page, c->freelist, c); in flush_slab()
2316 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) in __flush_cpu_slab() argument
2318 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); in __flush_cpu_slab()
2321 flush_slab(s, c); in __flush_cpu_slab()
2323 unfreeze_partials(s, c); in __flush_cpu_slab()
2328 struct kmem_cache *s = d; in flush_cpu_slab() local
2330 __flush_cpu_slab(s, smp_processor_id()); in flush_cpu_slab()
2335 struct kmem_cache *s = info; in has_cpu_slab() local
2336 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); in has_cpu_slab()
2341 static void flush_all(struct kmem_cache *s) in flush_all() argument
2343 on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC); in flush_all()
2352 struct kmem_cache *s; in slub_cpu_dead() local
2356 list_for_each_entry(s, &slab_caches, list) { in slub_cpu_dead()
2358 __flush_cpu_slab(s, cpu); in slub_cpu_dead()
2407 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) in slab_out_of_memory() argument
2421 s->name, s->object_size, s->size, oo_order(s->oo), in slab_out_of_memory()
2422 oo_order(s->min)); in slab_out_of_memory()
2424 if (oo_order(s->min) > get_order(s->object_size)) in slab_out_of_memory()
2426 s->name); in slab_out_of_memory()
2428 for_each_kmem_cache_node(s, node, n) { in slab_out_of_memory()
2443 static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, in new_slab_objects() argument
2450 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO)); in new_slab_objects()
2452 freelist = get_partial(s, flags, node, c); in new_slab_objects()
2457 page = new_slab(s, flags, node); in new_slab_objects()
2459 c = raw_cpu_ptr(s->cpu_slab); in new_slab_objects()
2461 flush_slab(s, c); in new_slab_objects()
2470 stat(s, ALLOC_SLAB); in new_slab_objects()
2496 static inline void *get_freelist(struct kmem_cache *s, struct page *page) in get_freelist() argument
2512 } while (!__cmpxchg_double_slab(s, page, in get_freelist()
2539 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, in ___slab_alloc() argument
2557 stat(s, ALLOC_NODE_MISMATCH); in ___slab_alloc()
2558 deactivate_slab(s, page, c->freelist, c); in ___slab_alloc()
2569 deactivate_slab(s, page, c->freelist, c); in ___slab_alloc()
2578 freelist = get_freelist(s, page); in ___slab_alloc()
2582 stat(s, DEACTIVATE_BYPASS); in ___slab_alloc()
2586 stat(s, ALLOC_REFILL); in ___slab_alloc()
2595 c->freelist = get_freepointer(s, freelist); in ___slab_alloc()
2604 stat(s, CPU_PARTIAL_ALLOC); in ___slab_alloc()
2608 freelist = new_slab_objects(s, gfpflags, node, &c); in ___slab_alloc()
2611 slab_out_of_memory(s, gfpflags, node); in ___slab_alloc()
2616 if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags))) in ___slab_alloc()
2620 if (kmem_cache_debug(s) && in ___slab_alloc()
2621 !alloc_debug_processing(s, page, freelist, addr)) in ___slab_alloc()
2624 deactivate_slab(s, page, get_freepointer(s, freelist), c); in ___slab_alloc()
2632 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, in __slab_alloc() argument
2645 c = this_cpu_ptr(s->cpu_slab); in __slab_alloc()
2648 p = ___slab_alloc(s, gfpflags, node, addr, c); in __slab_alloc()
2657 static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, in maybe_wipe_obj_freeptr() argument
2660 if (unlikely(slab_want_init_on_free(s)) && obj) in maybe_wipe_obj_freeptr()
2661 memset((void *)((char *)obj + s->offset), 0, sizeof(void *)); in maybe_wipe_obj_freeptr()
2674 static __always_inline void *slab_alloc_node(struct kmem_cache *s, in slab_alloc_node() argument
2682 s = slab_pre_alloc_hook(s, gfpflags); in slab_alloc_node()
2683 if (!s) in slab_alloc_node()
2697 tid = this_cpu_read(s->cpu_slab->tid); in slab_alloc_node()
2698 c = raw_cpu_ptr(s->cpu_slab); in slab_alloc_node()
2722 object = __slab_alloc(s, gfpflags, node, addr, c); in slab_alloc_node()
2723 stat(s, ALLOC_SLOWPATH); in slab_alloc_node()
2725 void *next_object = get_freepointer_safe(s, object); in slab_alloc_node()
2742 s->cpu_slab->freelist, s->cpu_slab->tid, in slab_alloc_node()
2746 note_cmpxchg_failure("slab_alloc", s, tid); in slab_alloc_node()
2749 prefetch_freepointer(s, next_object); in slab_alloc_node()
2750 stat(s, ALLOC_FASTPATH); in slab_alloc_node()
2753 maybe_wipe_obj_freeptr(s, object); in slab_alloc_node()
2755 if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object) in slab_alloc_node()
2756 memset(object, 0, s->object_size); in slab_alloc_node()
2758 slab_post_alloc_hook(s, gfpflags, 1, &object); in slab_alloc_node()
2763 static __always_inline void *slab_alloc(struct kmem_cache *s, in slab_alloc() argument
2766 return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr); in slab_alloc()
2769 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) in kmem_cache_alloc() argument
2771 void *ret = slab_alloc(s, gfpflags, _RET_IP_); in kmem_cache_alloc()
2773 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, in kmem_cache_alloc()
2774 s->size, gfpflags); in kmem_cache_alloc()
2781 void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) in kmem_cache_alloc_trace() argument
2783 void *ret = slab_alloc(s, gfpflags, _RET_IP_); in kmem_cache_alloc_trace()
2784 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); in kmem_cache_alloc_trace()
2785 ret = kasan_kmalloc(s, ret, size, gfpflags); in kmem_cache_alloc_trace()
2792 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) in kmem_cache_alloc_node() argument
2794 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); in kmem_cache_alloc_node()
2797 s->object_size, s->size, gfpflags, node); in kmem_cache_alloc_node()
2804 void *kmem_cache_alloc_node_trace(struct kmem_cache *s, in kmem_cache_alloc_node_trace() argument
2808 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); in kmem_cache_alloc_node_trace()
2811 size, s->size, gfpflags, node); in kmem_cache_alloc_node_trace()
2813 ret = kasan_kmalloc(s, ret, size, gfpflags); in kmem_cache_alloc_node_trace()
2828 static void __slab_free(struct kmem_cache *s, struct page *page, in __slab_free() argument
2840 stat(s, FREE_SLOWPATH); in __slab_free()
2842 if (kmem_cache_debug(s) && in __slab_free()
2843 !free_debug_processing(s, page, head, tail, cnt, addr)) in __slab_free()
2853 set_freepointer(s, tail, prior); in __slab_free()
2859 if (kmem_cache_has_cpu_partial(s) && !prior) { in __slab_free()
2871 n = get_node(s, page_to_nid(page)); in __slab_free()
2885 } while (!cmpxchg_double_slab(s, page, in __slab_free()
2897 put_cpu_partial(s, page, 1); in __slab_free()
2898 stat(s, CPU_PARTIAL_FREE); in __slab_free()
2905 stat(s, FREE_FROZEN); in __slab_free()
2909 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) in __slab_free()
2916 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { in __slab_free()
2917 remove_full(s, n, page); in __slab_free()
2919 stat(s, FREE_ADD_PARTIAL); in __slab_free()
2930 stat(s, FREE_REMOVE_PARTIAL); in __slab_free()
2933 remove_full(s, n, page); in __slab_free()
2937 stat(s, FREE_SLAB); in __slab_free()
2938 discard_slab(s, page); in __slab_free()
2956 static __always_inline void do_slab_free(struct kmem_cache *s, in do_slab_free() argument
2971 tid = this_cpu_read(s->cpu_slab->tid); in do_slab_free()
2972 c = raw_cpu_ptr(s->cpu_slab); in do_slab_free()
2980 set_freepointer(s, tail_obj, c->freelist); in do_slab_free()
2983 s->cpu_slab->freelist, s->cpu_slab->tid, in do_slab_free()
2987 note_cmpxchg_failure("slab_free", s, tid); in do_slab_free()
2990 stat(s, FREE_FASTPATH); in do_slab_free()
2992 __slab_free(s, page, head, tail_obj, cnt, addr); in do_slab_free()
2996 static __always_inline void slab_free(struct kmem_cache *s, struct page *page, in slab_free() argument
3004 if (slab_free_freelist_hook(s, &head, &tail)) in slab_free()
3005 do_slab_free(s, page, head, tail, cnt, addr); in slab_free()
3015 void kmem_cache_free(struct kmem_cache *s, void *x) in kmem_cache_free() argument
3017 s = cache_from_obj(s, x); in kmem_cache_free()
3018 if (!s) in kmem_cache_free()
3020 slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_); in kmem_cache_free()
3030 struct kmem_cache *s; member
3046 int build_detached_freelist(struct kmem_cache *s, size_t size, in build_detached_freelist() argument
3066 if (!s) { in build_detached_freelist()
3076 df->s = page->slab_cache; in build_detached_freelist()
3078 df->s = cache_from_obj(s, object); /* Support for memcg */ in build_detached_freelist()
3083 set_freepointer(df->s, object, NULL); in build_detached_freelist()
3097 set_freepointer(df->s, object, df->freelist); in build_detached_freelist()
3117 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) in kmem_cache_free_bulk() argument
3125 size = build_detached_freelist(s, size, p, &df); in kmem_cache_free_bulk()
3129 slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_); in kmem_cache_free_bulk()
3135 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, in kmem_cache_alloc_bulk() argument
3142 s = slab_pre_alloc_hook(s, flags); in kmem_cache_alloc_bulk()
3143 if (unlikely(!s)) in kmem_cache_alloc_bulk()
3151 c = this_cpu_ptr(s->cpu_slab); in kmem_cache_alloc_bulk()
3161 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, in kmem_cache_alloc_bulk()
3166 c = this_cpu_ptr(s->cpu_slab); in kmem_cache_alloc_bulk()
3167 maybe_wipe_obj_freeptr(s, p[i]); in kmem_cache_alloc_bulk()
3171 c->freelist = get_freepointer(s, object); in kmem_cache_alloc_bulk()
3173 maybe_wipe_obj_freeptr(s, p[i]); in kmem_cache_alloc_bulk()
3179 if (unlikely(slab_want_init_on_alloc(flags, s))) { in kmem_cache_alloc_bulk()
3183 memset(p[j], 0, s->object_size); in kmem_cache_alloc_bulk()
3187 slab_post_alloc_hook(s, flags, size, p); in kmem_cache_alloc_bulk()
3191 slab_post_alloc_hook(s, flags, i, p); in kmem_cache_alloc_bulk()
3192 __kmem_cache_free_bulk(s, i, p); in kmem_cache_alloc_bulk()
3335 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) in alloc_kmem_cache_cpus() argument
3344 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), in alloc_kmem_cache_cpus()
3347 if (!s->cpu_slab) in alloc_kmem_cache_cpus()
3350 init_kmem_cache_cpus(s); in alloc_kmem_cache_cpus()
3403 static void free_kmem_cache_nodes(struct kmem_cache *s) in free_kmem_cache_nodes() argument
3408 for_each_kmem_cache_node(s, node, n) { in free_kmem_cache_nodes()
3409 s->node[node] = NULL; in free_kmem_cache_nodes()
3414 void __kmem_cache_release(struct kmem_cache *s) in __kmem_cache_release() argument
3416 cache_random_seq_destroy(s); in __kmem_cache_release()
3417 free_percpu(s->cpu_slab); in __kmem_cache_release()
3418 free_kmem_cache_nodes(s); in __kmem_cache_release()
3421 static int init_kmem_cache_nodes(struct kmem_cache *s) in init_kmem_cache_nodes() argument
3436 free_kmem_cache_nodes(s); in init_kmem_cache_nodes()
3441 s->node[node] = n; in init_kmem_cache_nodes()
3446 static void set_min_partial(struct kmem_cache *s, unsigned long min) in set_min_partial() argument
3452 s->min_partial = min; in set_min_partial()
3455 static void set_cpu_partial(struct kmem_cache *s) in set_cpu_partial() argument
3475 if (!kmem_cache_has_cpu_partial(s)) in set_cpu_partial()
3476 s->cpu_partial = 0; in set_cpu_partial()
3477 else if (s->size >= PAGE_SIZE) in set_cpu_partial()
3478 s->cpu_partial = 2; in set_cpu_partial()
3479 else if (s->size >= 1024) in set_cpu_partial()
3480 s->cpu_partial = 6; in set_cpu_partial()
3481 else if (s->size >= 256) in set_cpu_partial()
3482 s->cpu_partial = 13; in set_cpu_partial()
3484 s->cpu_partial = 30; in set_cpu_partial()
3492 static int calculate_sizes(struct kmem_cache *s, int forced_order) in calculate_sizes() argument
3494 slab_flags_t flags = s->flags; in calculate_sizes()
3495 unsigned int size = s->object_size; in calculate_sizes()
3512 !s->ctor) in calculate_sizes()
3513 s->flags |= __OBJECT_POISON; in calculate_sizes()
3515 s->flags &= ~__OBJECT_POISON; in calculate_sizes()
3523 if ((flags & SLAB_RED_ZONE) && size == s->object_size) in calculate_sizes()
3531 s->inuse = size; in calculate_sizes()
3534 s->ctor)) { in calculate_sizes()
3543 s->offset = size; in calculate_sizes()
3556 kasan_cache_create(s, &size, &s->flags); in calculate_sizes()
3568 s->red_left_pad = sizeof(void *); in calculate_sizes()
3569 s->red_left_pad = ALIGN(s->red_left_pad, s->align); in calculate_sizes()
3570 size += s->red_left_pad; in calculate_sizes()
3579 size = ALIGN(size, s->align); in calculate_sizes()
3580 s->size = size; in calculate_sizes()
3589 s->allocflags = 0; in calculate_sizes()
3591 s->allocflags |= __GFP_COMP; in calculate_sizes()
3593 if (s->flags & SLAB_CACHE_DMA) in calculate_sizes()
3594 s->allocflags |= GFP_DMA; in calculate_sizes()
3596 if (s->flags & SLAB_CACHE_DMA32) in calculate_sizes()
3597 s->allocflags |= GFP_DMA32; in calculate_sizes()
3599 if (s->flags & SLAB_RECLAIM_ACCOUNT) in calculate_sizes()
3600 s->allocflags |= __GFP_RECLAIMABLE; in calculate_sizes()
3605 s->oo = oo_make(order, size); in calculate_sizes()
3606 s->min = oo_make(get_order(size), size); in calculate_sizes()
3607 if (oo_objects(s->oo) > oo_objects(s->max)) in calculate_sizes()
3608 s->max = s->oo; in calculate_sizes()
3610 return !!oo_objects(s->oo); in calculate_sizes()
3613 static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) in kmem_cache_open() argument
3615 s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor); in kmem_cache_open()
3617 s->random = get_random_long(); in kmem_cache_open()
3620 if (!calculate_sizes(s, -1)) in kmem_cache_open()
3627 if (get_order(s->size) > get_order(s->object_size)) { in kmem_cache_open()
3628 s->flags &= ~DEBUG_METADATA_FLAGS; in kmem_cache_open()
3629 s->offset = 0; in kmem_cache_open()
3630 if (!calculate_sizes(s, -1)) in kmem_cache_open()
3637 if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0) in kmem_cache_open()
3639 s->flags |= __CMPXCHG_DOUBLE; in kmem_cache_open()
3646 set_min_partial(s, ilog2(s->size) / 2); in kmem_cache_open()
3648 set_cpu_partial(s); in kmem_cache_open()
3651 s->remote_node_defrag_ratio = 1000; in kmem_cache_open()
3656 if (init_cache_random_seq(s)) in kmem_cache_open()
3660 if (!init_kmem_cache_nodes(s)) in kmem_cache_open()
3663 if (alloc_kmem_cache_cpus(s)) in kmem_cache_open()
3666 free_kmem_cache_nodes(s); in kmem_cache_open()
3671 static void list_slab_objects(struct kmem_cache *s, struct page *page, in list_slab_objects() argument
3680 slab_err(s, page, text, s->name); in list_slab_objects()
3683 get_map(s, page, map); in list_slab_objects()
3684 for_each_object(p, s, addr, page->objects) { in list_slab_objects()
3686 if (!test_bit(slab_index(p, s, addr), map)) { in list_slab_objects()
3688 print_tracking(s, p); in list_slab_objects()
3701 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) in free_partial() argument
3713 list_slab_objects(s, page, in free_partial()
3720 discard_slab(s, page); in free_partial()
3723 bool __kmem_cache_empty(struct kmem_cache *s) in __kmem_cache_empty() argument
3728 for_each_kmem_cache_node(s, node, n) in __kmem_cache_empty()
3729 if (n->nr_partial || slabs_node(s, node)) in __kmem_cache_empty()
3737 int __kmem_cache_shutdown(struct kmem_cache *s) in __kmem_cache_shutdown() argument
3742 flush_all(s); in __kmem_cache_shutdown()
3744 for_each_kmem_cache_node(s, node, n) { in __kmem_cache_shutdown()
3745 free_partial(s, n); in __kmem_cache_shutdown()
3746 if (n->nr_partial || slabs_node(s, node)) in __kmem_cache_shutdown()
3749 sysfs_slab_remove(s); in __kmem_cache_shutdown()
3787 struct kmem_cache *s; in __kmalloc() local
3793 s = kmalloc_slab(size, flags); in __kmalloc()
3795 if (unlikely(ZERO_OR_NULL_PTR(s))) in __kmalloc()
3796 return s; in __kmalloc()
3798 ret = slab_alloc(s, flags, _RET_IP_); in __kmalloc()
3800 trace_kmalloc(_RET_IP_, ret, size, s->size, flags); in __kmalloc()
3802 ret = kasan_kmalloc(s, ret, size, flags); in __kmalloc()
3828 struct kmem_cache *s; in __kmalloc_node() local
3841 s = kmalloc_slab(size, flags); in __kmalloc_node()
3843 if (unlikely(ZERO_OR_NULL_PTR(s))) in __kmalloc_node()
3844 return s; in __kmalloc_node()
3846 ret = slab_alloc_node(s, flags, node, _RET_IP_); in __kmalloc_node()
3848 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); in __kmalloc_node()
3850 ret = kasan_kmalloc(s, ret, size, flags); in __kmalloc_node()
3869 struct kmem_cache *s; in __check_heap_object() local
3876 s = page->slab_cache; in __check_heap_object()
3884 offset = (ptr - page_address(page)) % s->size; in __check_heap_object()
3887 if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) { in __check_heap_object()
3888 if (offset < s->red_left_pad) in __check_heap_object()
3890 s->name, to_user, offset, n); in __check_heap_object()
3891 offset -= s->red_left_pad; in __check_heap_object()
3895 if (offset >= s->useroffset && in __check_heap_object()
3896 offset - s->useroffset <= s->usersize && in __check_heap_object()
3897 n <= s->useroffset - offset + s->usersize) in __check_heap_object()
3906 object_size = slab_ksize(s); in __check_heap_object()
3909 usercopy_warn("SLUB object", s->name, to_user, offset, n); in __check_heap_object()
3913 usercopy_abort("SLUB object", s->name, to_user, offset, n); in __check_heap_object()
3971 int __kmem_cache_shrink(struct kmem_cache *s) in __kmem_cache_shrink() argument
3983 flush_all(s); in __kmem_cache_shrink()
3984 for_each_kmem_cache_node(s, node, n) { in __kmem_cache_shrink()
4024 discard_slab(s, page); in __kmem_cache_shrink()
4026 if (slabs_node(s, node)) in __kmem_cache_shrink()
4034 void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s) in __kmemcg_cache_deactivate_after_rcu() argument
4048 if (!__kmem_cache_shrink(s)) in __kmemcg_cache_deactivate_after_rcu()
4049 sysfs_slab_remove(s); in __kmemcg_cache_deactivate_after_rcu()
4052 void __kmemcg_cache_deactivate(struct kmem_cache *s) in __kmemcg_cache_deactivate() argument
4058 slub_set_cpu_partial(s, 0); in __kmemcg_cache_deactivate()
4059 s->min_partial = 0; in __kmemcg_cache_deactivate()
4065 struct kmem_cache *s; in slab_mem_going_offline_callback() local
4068 list_for_each_entry(s, &slab_caches, list) in slab_mem_going_offline_callback()
4069 __kmem_cache_shrink(s); in slab_mem_going_offline_callback()
4078 struct kmem_cache *s; in slab_mem_offline_callback() local
4092 list_for_each_entry(s, &slab_caches, list) { in slab_mem_offline_callback()
4093 n = get_node(s, offline_node); in slab_mem_offline_callback()
4101 BUG_ON(slabs_node(s, offline_node)); in slab_mem_offline_callback()
4103 s->node[offline_node] = NULL; in slab_mem_offline_callback()
4113 struct kmem_cache *s; in slab_mem_going_online_callback() local
4131 list_for_each_entry(s, &slab_caches, list) { in slab_mem_going_online_callback()
4143 s->node[nid] = n; in slab_mem_going_online_callback()
4195 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); in bootstrap() local
4198 memcpy(s, static_cache, kmem_cache->object_size); in bootstrap()
4205 __flush_cpu_slab(s, smp_processor_id()); in bootstrap()
4206 for_each_kmem_cache_node(s, node, n) { in bootstrap()
4210 p->slab_cache = s; in bootstrap()
4214 p->slab_cache = s; in bootstrap()
4217 slab_init_memcg_params(s); in bootstrap()
4218 list_add(&s->list, &slab_caches); in bootstrap()
4219 memcg_link_cache(s, NULL); in bootstrap()
4220 return s; in bootstrap()
4274 struct kmem_cache *s, *c; in __kmem_cache_alias() local
4276 s = find_mergeable(size, align, flags, name, ctor); in __kmem_cache_alias()
4277 if (s) { in __kmem_cache_alias()
4278 s->refcount++; in __kmem_cache_alias()
4284 s->object_size = max(s->object_size, size); in __kmem_cache_alias()
4285 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *))); in __kmem_cache_alias()
4287 for_each_memcg_cache(c, s) { in __kmem_cache_alias()
4288 c->object_size = s->object_size; in __kmem_cache_alias()
4292 if (sysfs_slab_alias(s, name)) { in __kmem_cache_alias()
4293 s->refcount--; in __kmem_cache_alias()
4294 s = NULL; in __kmem_cache_alias()
4298 return s; in __kmem_cache_alias()
4301 int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags) in __kmem_cache_create() argument
4305 err = kmem_cache_open(s, flags); in __kmem_cache_create()
4313 memcg_propagate_slab_attrs(s); in __kmem_cache_create()
4314 err = sysfs_slab_add(s); in __kmem_cache_create()
4316 __kmem_cache_release(s); in __kmem_cache_create()
4323 struct kmem_cache *s; in __kmalloc_track_caller() local
4329 s = kmalloc_slab(size, gfpflags); in __kmalloc_track_caller()
4331 if (unlikely(ZERO_OR_NULL_PTR(s))) in __kmalloc_track_caller()
4332 return s; in __kmalloc_track_caller()
4334 ret = slab_alloc(s, gfpflags, caller); in __kmalloc_track_caller()
4337 trace_kmalloc(caller, ret, size, s->size, gfpflags); in __kmalloc_track_caller()
4346 struct kmem_cache *s; in __kmalloc_node_track_caller() local
4359 s = kmalloc_slab(size, gfpflags); in __kmalloc_node_track_caller()
4361 if (unlikely(ZERO_OR_NULL_PTR(s))) in __kmalloc_node_track_caller()
4362 return s; in __kmalloc_node_track_caller()
4364 ret = slab_alloc_node(s, gfpflags, node, caller); in __kmalloc_node_track_caller()
4367 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); in __kmalloc_node_track_caller()
4386 static int validate_slab(struct kmem_cache *s, struct page *page, in validate_slab() argument
4392 if (!check_slab(s, page) || in validate_slab()
4393 !on_freelist(s, page, NULL)) in validate_slab()
4399 get_map(s, page, map); in validate_slab()
4400 for_each_object(p, s, addr, page->objects) { in validate_slab()
4401 if (test_bit(slab_index(p, s, addr), map)) in validate_slab()
4402 if (!check_object(s, page, p, SLUB_RED_INACTIVE)) in validate_slab()
4406 for_each_object(p, s, addr, page->objects) in validate_slab()
4407 if (!test_bit(slab_index(p, s, addr), map)) in validate_slab()
4408 if (!check_object(s, page, p, SLUB_RED_ACTIVE)) in validate_slab()
4413 static void validate_slab_slab(struct kmem_cache *s, struct page *page, in validate_slab_slab() argument
4417 validate_slab(s, page, map); in validate_slab_slab()
4421 static int validate_slab_node(struct kmem_cache *s, in validate_slab_node() argument
4431 validate_slab_slab(s, page, map); in validate_slab_node()
4436 s->name, count, n->nr_partial); in validate_slab_node()
4438 if (!(s->flags & SLAB_STORE_USER)) in validate_slab_node()
4442 validate_slab_slab(s, page, map); in validate_slab_node()
4447 s->name, count, atomic_long_read(&n->nr_slabs)); in validate_slab_node()
4454 static long validate_slab_cache(struct kmem_cache *s) in validate_slab_cache() argument
4459 unsigned long *map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL); in validate_slab_cache()
4464 flush_all(s); in validate_slab_cache()
4465 for_each_kmem_cache_node(s, node, n) in validate_slab_cache()
4466 count += validate_slab_node(s, n, map); in validate_slab_cache()
4520 static int add_location(struct loc_track *t, struct kmem_cache *s, in add_location() argument
4596 static void process_slab(struct loc_track *t, struct kmem_cache *s, in process_slab() argument
4604 get_map(s, page, map); in process_slab()
4606 for_each_object(p, s, addr, page->objects) in process_slab()
4607 if (!test_bit(slab_index(p, s, addr), map)) in process_slab()
4608 add_location(t, s, get_track(s, p, alloc)); in process_slab()
4611 static int list_locations(struct kmem_cache *s, char *buf, in list_locations() argument
4619 unsigned long *map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL); in list_locations()
4627 flush_all(s); in list_locations()
4629 for_each_kmem_cache_node(s, node, n) { in list_locations()
4638 process_slab(&t, s, page, alloc, map); in list_locations()
4640 process_slab(&t, s, page, alloc, map); in list_locations()
4787 static ssize_t show_slab_objects(struct kmem_cache *s, in show_slab_objects() argument
4803 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, in show_slab_objects()
4853 for_each_kmem_cache_node(s, node, n) { in show_slab_objects()
4871 for_each_kmem_cache_node(s, node, n) { in show_slab_objects()
4894 static int any_slab_objects(struct kmem_cache *s) in any_slab_objects() argument
4899 for_each_kmem_cache_node(s, node, n) in any_slab_objects()
4912 ssize_t (*show)(struct kmem_cache *s, char *buf);
4913 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
4924 static ssize_t slab_size_show(struct kmem_cache *s, char *buf) in slab_size_show() argument
4926 return sprintf(buf, "%u\n", s->size); in slab_size_show()
4930 static ssize_t align_show(struct kmem_cache *s, char *buf) in align_show() argument
4932 return sprintf(buf, "%u\n", s->align); in align_show()
4936 static ssize_t object_size_show(struct kmem_cache *s, char *buf) in object_size_show() argument
4938 return sprintf(buf, "%u\n", s->object_size); in object_size_show()
4942 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) in objs_per_slab_show() argument
4944 return sprintf(buf, "%u\n", oo_objects(s->oo)); in objs_per_slab_show()
4948 static ssize_t order_store(struct kmem_cache *s, in order_store() argument
4961 calculate_sizes(s, order); in order_store()
4965 static ssize_t order_show(struct kmem_cache *s, char *buf) in order_show() argument
4967 return sprintf(buf, "%u\n", oo_order(s->oo)); in order_show()
4971 static ssize_t min_partial_show(struct kmem_cache *s, char *buf) in min_partial_show() argument
4973 return sprintf(buf, "%lu\n", s->min_partial); in min_partial_show()
4976 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, in min_partial_store() argument
4986 set_min_partial(s, min); in min_partial_store()
4991 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf) in cpu_partial_show() argument
4993 return sprintf(buf, "%u\n", slub_cpu_partial(s)); in cpu_partial_show()
4996 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf, in cpu_partial_store() argument
5005 if (objects && !kmem_cache_has_cpu_partial(s)) in cpu_partial_store()
5008 slub_set_cpu_partial(s, objects); in cpu_partial_store()
5009 flush_all(s); in cpu_partial_store()
5014 static ssize_t ctor_show(struct kmem_cache *s, char *buf) in ctor_show() argument
5016 if (!s->ctor) in ctor_show()
5018 return sprintf(buf, "%pS\n", s->ctor); in ctor_show()
5022 static ssize_t aliases_show(struct kmem_cache *s, char *buf) in aliases_show() argument
5024 return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1); in aliases_show()
5028 static ssize_t partial_show(struct kmem_cache *s, char *buf) in partial_show() argument
5030 return show_slab_objects(s, buf, SO_PARTIAL); in partial_show()
5034 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) in cpu_slabs_show() argument
5036 return show_slab_objects(s, buf, SO_CPU); in cpu_slabs_show()
5040 static ssize_t objects_show(struct kmem_cache *s, char *buf) in objects_show() argument
5042 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); in objects_show()
5046 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) in objects_partial_show() argument
5048 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); in objects_partial_show()
5052 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) in slabs_cpu_partial_show() argument
5062 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); in slabs_cpu_partial_show()
5076 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); in slabs_cpu_partial_show()
5087 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) in reclaim_account_show() argument
5089 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); in reclaim_account_show()
5092 static ssize_t reclaim_account_store(struct kmem_cache *s, in reclaim_account_store() argument
5095 s->flags &= ~SLAB_RECLAIM_ACCOUNT; in reclaim_account_store()
5097 s->flags |= SLAB_RECLAIM_ACCOUNT; in reclaim_account_store()
5102 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) in hwcache_align_show() argument
5104 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); in hwcache_align_show()
5109 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) in cache_dma_show() argument
5111 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); in cache_dma_show()
5116 static ssize_t usersize_show(struct kmem_cache *s, char *buf) in usersize_show() argument
5118 return sprintf(buf, "%u\n", s->usersize); in usersize_show()
5122 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) in destroy_by_rcu_show() argument
5124 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU)); in destroy_by_rcu_show()
5129 static ssize_t slabs_show(struct kmem_cache *s, char *buf) in slabs_show() argument
5131 return show_slab_objects(s, buf, SO_ALL); in slabs_show()
5135 static ssize_t total_objects_show(struct kmem_cache *s, char *buf) in total_objects_show() argument
5137 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); in total_objects_show()
5141 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) in sanity_checks_show() argument
5143 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS)); in sanity_checks_show()
5146 static ssize_t sanity_checks_store(struct kmem_cache *s, in sanity_checks_store() argument
5149 s->flags &= ~SLAB_CONSISTENCY_CHECKS; in sanity_checks_store()
5151 s->flags &= ~__CMPXCHG_DOUBLE; in sanity_checks_store()
5152 s->flags |= SLAB_CONSISTENCY_CHECKS; in sanity_checks_store()
5158 static ssize_t trace_show(struct kmem_cache *s, char *buf) in trace_show() argument
5160 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE)); in trace_show()
5163 static ssize_t trace_store(struct kmem_cache *s, const char *buf, in trace_store() argument
5171 if (s->refcount > 1) in trace_store()
5174 s->flags &= ~SLAB_TRACE; in trace_store()
5176 s->flags &= ~__CMPXCHG_DOUBLE; in trace_store()
5177 s->flags |= SLAB_TRACE; in trace_store()
5183 static ssize_t red_zone_show(struct kmem_cache *s, char *buf) in red_zone_show() argument
5185 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); in red_zone_show()
5188 static ssize_t red_zone_store(struct kmem_cache *s, in red_zone_store() argument
5191 if (any_slab_objects(s)) in red_zone_store()
5194 s->flags &= ~SLAB_RED_ZONE; in red_zone_store()
5196 s->flags |= SLAB_RED_ZONE; in red_zone_store()
5198 calculate_sizes(s, -1); in red_zone_store()
5203 static ssize_t poison_show(struct kmem_cache *s, char *buf) in poison_show() argument
5205 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON)); in poison_show()
5208 static ssize_t poison_store(struct kmem_cache *s, in poison_store() argument
5211 if (any_slab_objects(s)) in poison_store()
5214 s->flags &= ~SLAB_POISON; in poison_store()
5216 s->flags |= SLAB_POISON; in poison_store()
5218 calculate_sizes(s, -1); in poison_store()
5223 static ssize_t store_user_show(struct kmem_cache *s, char *buf) in store_user_show() argument
5225 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); in store_user_show()
5228 static ssize_t store_user_store(struct kmem_cache *s, in store_user_store() argument
5231 if (any_slab_objects(s)) in store_user_store()
5234 s->flags &= ~SLAB_STORE_USER; in store_user_store()
5236 s->flags &= ~__CMPXCHG_DOUBLE; in store_user_store()
5237 s->flags |= SLAB_STORE_USER; in store_user_store()
5239 calculate_sizes(s, -1); in store_user_store()
5244 static ssize_t validate_show(struct kmem_cache *s, char *buf) in validate_show() argument
5249 static ssize_t validate_store(struct kmem_cache *s, in validate_store() argument
5255 ret = validate_slab_cache(s); in validate_store()
5263 static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf) in alloc_calls_show() argument
5265 if (!(s->flags & SLAB_STORE_USER)) in alloc_calls_show()
5267 return list_locations(s, buf, TRACK_ALLOC); in alloc_calls_show()
5271 static ssize_t free_calls_show(struct kmem_cache *s, char *buf) in free_calls_show() argument
5273 if (!(s->flags & SLAB_STORE_USER)) in free_calls_show()
5275 return list_locations(s, buf, TRACK_FREE); in free_calls_show()
5281 static ssize_t failslab_show(struct kmem_cache *s, char *buf) in failslab_show() argument
5283 return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); in failslab_show()
5286 static ssize_t failslab_store(struct kmem_cache *s, const char *buf, in failslab_store() argument
5289 if (s->refcount > 1) in failslab_store()
5292 s->flags &= ~SLAB_FAILSLAB; in failslab_store()
5294 s->flags |= SLAB_FAILSLAB; in failslab_store()
5300 static ssize_t shrink_show(struct kmem_cache *s, char *buf) in shrink_show() argument
5305 static ssize_t shrink_store(struct kmem_cache *s, in shrink_store() argument
5309 kmem_cache_shrink_all(s); in shrink_store()
5317 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) in remote_node_defrag_ratio_show() argument
5319 return sprintf(buf, "%u\n", s->remote_node_defrag_ratio / 10); in remote_node_defrag_ratio_show()
5322 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, in remote_node_defrag_ratio_store() argument
5334 s->remote_node_defrag_ratio = ratio * 10; in remote_node_defrag_ratio_store()
5342 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) in show_stat() argument
5353 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; in show_stat()
5371 static void clear_stat(struct kmem_cache *s, enum stat_item si) in clear_stat() argument
5376 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; in clear_stat()
5380 static ssize_t text##_show(struct kmem_cache *s, char *buf) \
5382 return show_stat(s, buf, si); \
5384 static ssize_t text##_store(struct kmem_cache *s, \
5389 clear_stat(s, si); \
5504 struct kmem_cache *s; in slab_attr_show() local
5508 s = to_slab(kobj); in slab_attr_show()
5513 err = attribute->show(s, buf); in slab_attr_show()
5523 struct kmem_cache *s; in slab_attr_store() local
5527 s = to_slab(kobj); in slab_attr_store()
5532 err = attribute->store(s, buf, len); in slab_attr_store()
5534 if (slab_state >= FULL && err >= 0 && is_root_cache(s)) { in slab_attr_store()
5538 if (s->max_attr_size < len) in slab_attr_store()
5539 s->max_attr_size = len; in slab_attr_store()
5558 for_each_memcg_cache(c, s) in slab_attr_store()
5566 static void memcg_propagate_slab_attrs(struct kmem_cache *s) in memcg_propagate_slab_attrs() argument
5573 if (is_root_cache(s)) in memcg_propagate_slab_attrs()
5576 root_cache = s->memcg_params.root_cache; in memcg_propagate_slab_attrs()
5616 attr->store(s, buf, len); in memcg_propagate_slab_attrs()
5654 static inline struct kset *cache_kset(struct kmem_cache *s) in cache_kset() argument
5657 if (!is_root_cache(s)) in cache_kset()
5658 return s->memcg_params.root_cache->memcg_kset; in cache_kset()
5669 static char *create_unique_id(struct kmem_cache *s) in create_unique_id() argument
5684 if (s->flags & SLAB_CACHE_DMA) in create_unique_id()
5686 if (s->flags & SLAB_CACHE_DMA32) in create_unique_id()
5688 if (s->flags & SLAB_RECLAIM_ACCOUNT) in create_unique_id()
5690 if (s->flags & SLAB_CONSISTENCY_CHECKS) in create_unique_id()
5692 if (s->flags & SLAB_ACCOUNT) in create_unique_id()
5696 p += sprintf(p, "%07u", s->size); in create_unique_id()
5704 struct kmem_cache *s = in sysfs_slab_remove_workfn() local
5707 if (!s->kobj.state_in_sysfs) in sysfs_slab_remove_workfn()
5717 kset_unregister(s->memcg_kset); in sysfs_slab_remove_workfn()
5719 kobject_uevent(&s->kobj, KOBJ_REMOVE); in sysfs_slab_remove_workfn()
5721 kobject_put(&s->kobj); in sysfs_slab_remove_workfn()
5724 static int sysfs_slab_add(struct kmem_cache *s) in sysfs_slab_add() argument
5728 struct kset *kset = cache_kset(s); in sysfs_slab_add()
5729 int unmergeable = slab_unmergeable(s); in sysfs_slab_add()
5731 INIT_WORK(&s->kobj_remove_work, sysfs_slab_remove_workfn); in sysfs_slab_add()
5734 kobject_init(&s->kobj, &slab_ktype); in sysfs_slab_add()
5748 sysfs_remove_link(&slab_kset->kobj, s->name); in sysfs_slab_add()
5749 name = s->name; in sysfs_slab_add()
5755 name = create_unique_id(s); in sysfs_slab_add()
5758 s->kobj.kset = kset; in sysfs_slab_add()
5759 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); in sysfs_slab_add()
5763 err = sysfs_create_group(&s->kobj, &slab_attr_group); in sysfs_slab_add()
5768 if (is_root_cache(s) && memcg_sysfs_enabled) { in sysfs_slab_add()
5769 s->memcg_kset = kset_create_and_add("cgroup", NULL, &s->kobj); in sysfs_slab_add()
5770 if (!s->memcg_kset) { in sysfs_slab_add()
5777 kobject_uevent(&s->kobj, KOBJ_ADD); in sysfs_slab_add()
5780 sysfs_slab_alias(s, s->name); in sysfs_slab_add()
5787 kobject_del(&s->kobj); in sysfs_slab_add()
5791 static void sysfs_slab_remove(struct kmem_cache *s) in sysfs_slab_remove() argument
5800 kobject_get(&s->kobj); in sysfs_slab_remove()
5801 schedule_work(&s->kobj_remove_work); in sysfs_slab_remove()
5804 void sysfs_slab_unlink(struct kmem_cache *s) in sysfs_slab_unlink() argument
5807 kobject_del(&s->kobj); in sysfs_slab_unlink()
5810 void sysfs_slab_release(struct kmem_cache *s) in sysfs_slab_release() argument
5813 kobject_put(&s->kobj); in sysfs_slab_release()
5821 struct kmem_cache *s; member
5828 static int sysfs_slab_alias(struct kmem_cache *s, const char *name) in sysfs_slab_alias() argument
5837 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); in sysfs_slab_alias()
5844 al->s = s; in sysfs_slab_alias()
5853 struct kmem_cache *s; in slab_sysfs_init() local
5867 list_for_each_entry(s, &slab_caches, list) { in slab_sysfs_init()
5868 err = sysfs_slab_add(s); in slab_sysfs_init()
5871 s->name); in slab_sysfs_init()
5878 err = sysfs_slab_alias(al->s, al->name); in slab_sysfs_init()
5897 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo) in get_slabinfo() argument
5905 for_each_kmem_cache_node(s, node, n) { in get_slabinfo()
5915 sinfo->objects_per_slab = oo_objects(s->oo); in get_slabinfo()
5916 sinfo->cache_order = oo_order(s->oo); in get_slabinfo()
5919 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s) in slabinfo_show_stats() argument