• Home
  • Raw
  • Download

Lines Matching refs:s

129 static inline bool kmem_cache_debug(struct kmem_cache *s)  in kmem_cache_debug()  argument
131 return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS); in kmem_cache_debug()
134 void *fixup_red_left(struct kmem_cache *s, void *p) in fixup_red_left() argument
136 if (kmem_cache_debug_flags(s, SLAB_RED_ZONE)) in fixup_red_left()
137 p += s->red_left_pad; in fixup_red_left()
142 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) in kmem_cache_has_cpu_partial() argument
145 return !kmem_cache_debug(s); in kmem_cache_has_cpu_partial()
210 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } in sysfs_slab_add() argument
211 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) in sysfs_slab_alias() argument
218 static inline void debugfs_slab_add(struct kmem_cache *s) { } in debugfs_slab_add() argument
221 static inline void stat(const struct kmem_cache *s, enum stat_item si) in stat() argument
228 raw_cpu_inc(s->cpu_slab->stat[si]); in stat()
241 static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr, in freelist_ptr() argument
255 return (void *)((unsigned long)ptr ^ s->random ^ in freelist_ptr()
263 static inline void *freelist_dereference(const struct kmem_cache *s, in freelist_dereference() argument
266 return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr), in freelist_dereference()
270 static inline void *get_freepointer(struct kmem_cache *s, void *object) in get_freepointer() argument
273 return freelist_dereference(s, object + s->offset); in get_freepointer()
276 static void prefetch_freepointer(const struct kmem_cache *s, void *object) in prefetch_freepointer() argument
278 prefetch(object + s->offset); in prefetch_freepointer()
281 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) in get_freepointer_safe() argument
287 return get_freepointer(s, object); in get_freepointer_safe()
290 freepointer_addr = (unsigned long)object + s->offset; in get_freepointer_safe()
292 return freelist_ptr(s, p, freepointer_addr); in get_freepointer_safe()
295 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) in set_freepointer() argument
297 unsigned long freeptr_addr = (unsigned long)object + s->offset; in set_freepointer()
304 *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr); in set_freepointer()
354 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page, in __cmpxchg_double_slab() argument
362 if (s->flags & __CMPXCHG_DOUBLE) { in __cmpxchg_double_slab()
382 stat(s, CMPXCHG_DOUBLE_FAIL); in __cmpxchg_double_slab()
385 pr_info("%s %s: cmpxchg double redo ", n, s->name); in __cmpxchg_double_slab()
391 static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, in cmpxchg_double_slab() argument
398 if (s->flags & __CMPXCHG_DOUBLE) { in cmpxchg_double_slab()
423 stat(s, CMPXCHG_DOUBLE_FAIL); in cmpxchg_double_slab()
426 pr_info("%s %s: cmpxchg double redo ", n, s->name); in cmpxchg_double_slab()
436 static void __fill_map(unsigned long *obj_map, struct kmem_cache *s, in __fill_map() argument
444 for (p = page->freelist; p; p = get_freepointer(s, p)) in __fill_map()
445 set_bit(__obj_to_index(s, addr, p), obj_map); in __fill_map()
454 static unsigned long *get_map(struct kmem_cache *s, struct page *page) in get_map() argument
461 __fill_map(object_map, s, page); in get_map()
472 static inline unsigned int size_from_object(struct kmem_cache *s) in size_from_object() argument
474 if (s->flags & SLAB_RED_ZONE) in size_from_object()
475 return s->size - s->red_left_pad; in size_from_object()
477 return s->size; in size_from_object()
480 static inline void *restore_red_left(struct kmem_cache *s, void *p) in restore_red_left() argument
482 if (s->flags & SLAB_RED_ZONE) in restore_red_left()
483 p -= s->red_left_pad; in restore_red_left()
521 static inline int check_valid_pointer(struct kmem_cache *s, in check_valid_pointer() argument
531 object = restore_red_left(s, object); in check_valid_pointer()
532 if (object < base || object >= base + page->objects * s->size || in check_valid_pointer()
533 (object - base) % s->size) { in check_valid_pointer()
552 static inline bool freeptr_outside_object(struct kmem_cache *s) in freeptr_outside_object() argument
554 return s->offset >= s->inuse; in freeptr_outside_object()
561 static inline unsigned int get_info_end(struct kmem_cache *s) in get_info_end() argument
563 if (freeptr_outside_object(s)) in get_info_end()
564 return s->inuse + sizeof(void *); in get_info_end()
566 return s->inuse; in get_info_end()
569 static struct track *get_track(struct kmem_cache *s, void *object, in get_track() argument
574 p = object + get_info_end(s); in get_track()
586 unsigned long get_each_object_track(struct kmem_cache *s, in get_each_object_track() argument
596 if (!slub_debug || !(s->flags & SLAB_STORE_USER)) in get_each_object_track()
600 for_each_object(p, s, page_address(page), page->objects) { in get_each_object_track()
601 t = get_track(s, p, alloc); in get_each_object_track()
603 ret = fn(s, p, t, private); in get_each_object_track()
614 static void set_track(struct kmem_cache *s, void *object, in set_track() argument
617 struct track *p = get_track(s, object, alloc); in set_track()
642 static void init_tracking(struct kmem_cache *s, void *object) in init_tracking() argument
644 if (!(s->flags & SLAB_STORE_USER)) in init_tracking()
647 set_track(s, object, TRACK_FREE, 0UL); in init_tracking()
648 set_track(s, object, TRACK_ALLOC, 0UL); in init_tracking()
651 static void print_track(const char *s, struct track *t, unsigned long pr_time) in print_track() argument
657 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid); in print_track()
670 void print_tracking(struct kmem_cache *s, void *object) in print_tracking() argument
673 if (!(s->flags & SLAB_STORE_USER)) in print_tracking()
676 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time); in print_tracking()
677 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time); in print_tracking()
687 static void slab_bug(struct kmem_cache *s, char *fmt, ...) in slab_bug() argument
696 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf); in slab_bug()
701 static void slab_fix(struct kmem_cache *s, char *fmt, ...) in slab_fix() argument
709 pr_err("FIX %s: %pV\n", s->name, &vaf); in slab_fix()
713 static bool freelist_corrupted(struct kmem_cache *s, struct page *page, in freelist_corrupted() argument
716 if ((s->flags & SLAB_CONSISTENCY_CHECKS) && in freelist_corrupted()
717 !check_valid_pointer(s, page, nextfree) && freelist) { in freelist_corrupted()
718 object_err(s, page, *freelist, "Freechain corrupt"); in freelist_corrupted()
720 slab_fix(s, "Isolate corrupted freechain"); in freelist_corrupted()
727 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) in print_trailer() argument
732 print_tracking(s, p); in print_trailer()
737 p, p - addr, get_freepointer(s, p)); in print_trailer()
739 if (s->flags & SLAB_RED_ZONE) in print_trailer()
740 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad, in print_trailer()
741 s->red_left_pad); in print_trailer()
746 min_t(unsigned int, s->object_size, PAGE_SIZE)); in print_trailer()
747 if (s->flags & SLAB_RED_ZONE) in print_trailer()
748 print_section(KERN_ERR, "Redzone ", p + s->object_size, in print_trailer()
749 s->inuse - s->object_size); in print_trailer()
751 off = get_info_end(s); in print_trailer()
753 if (s->flags & SLAB_STORE_USER) in print_trailer()
756 off += kasan_metadata_size(s); in print_trailer()
758 if (off != size_from_object(s)) in print_trailer()
761 size_from_object(s) - off); in print_trailer()
766 void object_err(struct kmem_cache *s, struct page *page, in object_err() argument
769 slab_bug(s, "%s", reason); in object_err()
770 print_trailer(s, page, object); in object_err()
774 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page, in slab_err() argument
783 slab_bug(s, "%s", buf); in slab_err()
789 static void init_object(struct kmem_cache *s, void *object, u8 val) in init_object() argument
793 if (s->flags & SLAB_RED_ZONE) in init_object()
794 memset(p - s->red_left_pad, val, s->red_left_pad); in init_object()
796 if (s->flags & __OBJECT_POISON) { in init_object()
797 memset(p, POISON_FREE, s->object_size - 1); in init_object()
798 p[s->object_size - 1] = POISON_END; in init_object()
801 if (s->flags & SLAB_RED_ZONE) in init_object()
802 memset(p + s->object_size, val, s->inuse - s->object_size); in init_object()
805 static void restore_bytes(struct kmem_cache *s, char *message, u8 data, in restore_bytes() argument
808 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data); in restore_bytes()
812 static int check_bytes_and_report(struct kmem_cache *s, struct page *page, in check_bytes_and_report() argument
830 slab_bug(s, "%s overwritten", what); in check_bytes_and_report()
834 print_trailer(s, page, object); in check_bytes_and_report()
837 restore_bytes(s, what, value, fault, end); in check_bytes_and_report()
879 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) in check_pad_bytes() argument
881 unsigned long off = get_info_end(s); /* The end of info */ in check_pad_bytes()
883 if (s->flags & SLAB_STORE_USER) in check_pad_bytes()
887 off += kasan_metadata_size(s); in check_pad_bytes()
889 if (size_from_object(s) == off) in check_pad_bytes()
892 return check_bytes_and_report(s, page, p, "Object padding", in check_pad_bytes()
893 p + off, POISON_INUSE, size_from_object(s) - off); in check_pad_bytes()
897 static int slab_pad_check(struct kmem_cache *s, struct page *page) in slab_pad_check() argument
906 if (!(s->flags & SLAB_POISON)) in slab_pad_check()
912 remainder = length % s->size; in slab_pad_check()
925 slab_err(s, page, "Padding overwritten. 0x%p-0x%p @offset=%tu", in slab_pad_check()
929 restore_bytes(s, "slab padding", POISON_INUSE, fault, end); in slab_pad_check()
933 static int check_object(struct kmem_cache *s, struct page *page, in check_object() argument
937 u8 *endobject = object + s->object_size; in check_object()
939 if (s->flags & SLAB_RED_ZONE) { in check_object()
940 if (!check_bytes_and_report(s, page, object, "Left Redzone", in check_object()
941 object - s->red_left_pad, val, s->red_left_pad)) in check_object()
944 if (!check_bytes_and_report(s, page, object, "Right Redzone", in check_object()
945 endobject, val, s->inuse - s->object_size)) in check_object()
948 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { in check_object()
949 check_bytes_and_report(s, page, p, "Alignment padding", in check_object()
951 s->inuse - s->object_size); in check_object()
955 if (s->flags & SLAB_POISON) { in check_object()
956 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) && in check_object()
957 (!check_bytes_and_report(s, page, p, "Poison", p, in check_object()
958 POISON_FREE, s->object_size - 1) || in check_object()
959 !check_bytes_and_report(s, page, p, "End Poison", in check_object()
960 p + s->object_size - 1, POISON_END, 1))) in check_object()
965 check_pad_bytes(s, page, p); in check_object()
968 if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE) in check_object()
976 if (!check_valid_pointer(s, page, get_freepointer(s, p))) { in check_object()
977 object_err(s, page, p, "Freepointer corrupt"); in check_object()
983 set_freepointer(s, p, NULL); in check_object()
989 static int check_slab(struct kmem_cache *s, struct page *page) in check_slab() argument
996 slab_err(s, page, "Not a valid slab page"); in check_slab()
1000 maxobj = order_objects(compound_order(page), s->size); in check_slab()
1002 slab_err(s, page, "objects %u > max %u", in check_slab()
1007 slab_err(s, page, "inuse %u > max %u", in check_slab()
1012 slab_pad_check(s, page); in check_slab()
1020 static int on_freelist(struct kmem_cache *s, struct page *page, void *search) in on_freelist() argument
1031 if (!check_valid_pointer(s, page, fp)) { in on_freelist()
1033 object_err(s, page, object, in on_freelist()
1035 set_freepointer(s, object, NULL); in on_freelist()
1037 slab_err(s, page, "Freepointer corrupt"); in on_freelist()
1040 slab_fix(s, "Freelist cleared"); in on_freelist()
1046 fp = get_freepointer(s, object); in on_freelist()
1050 max_objects = order_objects(compound_order(page), s->size); in on_freelist()
1055 slab_err(s, page, "Wrong number of objects. Found %d but should be %d", in on_freelist()
1058 slab_fix(s, "Number of objects adjusted."); in on_freelist()
1061 slab_err(s, page, "Wrong object count. Counter is %d but counted were %d", in on_freelist()
1064 slab_fix(s, "Object count adjusted."); in on_freelist()
1069 static void trace(struct kmem_cache *s, struct page *page, void *object, in trace() argument
1072 if (s->flags & SLAB_TRACE) { in trace()
1074 s->name, in trace()
1081 s->object_size); in trace()
1090 static void add_full(struct kmem_cache *s, in add_full() argument
1093 if (!(s->flags & SLAB_STORE_USER)) in add_full()
1100 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) in remove_full() argument
1102 if (!(s->flags & SLAB_STORE_USER)) in remove_full()
1110 static inline unsigned long slabs_node(struct kmem_cache *s, int node) in slabs_node() argument
1112 struct kmem_cache_node *n = get_node(s, node); in slabs_node()
1122 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) in inc_slabs_node() argument
1124 struct kmem_cache_node *n = get_node(s, node); in inc_slabs_node()
1137 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) in dec_slabs_node() argument
1139 struct kmem_cache_node *n = get_node(s, node); in dec_slabs_node()
1146 static void setup_object_debug(struct kmem_cache *s, struct page *page, in setup_object_debug() argument
1149 if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)) in setup_object_debug()
1152 init_object(s, object, SLUB_RED_INACTIVE); in setup_object_debug()
1153 init_tracking(s, object); in setup_object_debug()
1157 void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) in setup_page_debug() argument
1159 if (!kmem_cache_debug_flags(s, SLAB_POISON)) in setup_page_debug()
1167 static inline int alloc_consistency_checks(struct kmem_cache *s, in alloc_consistency_checks() argument
1170 if (!check_slab(s, page)) in alloc_consistency_checks()
1173 if (!check_valid_pointer(s, page, object)) { in alloc_consistency_checks()
1174 object_err(s, page, object, "Freelist Pointer check fails"); in alloc_consistency_checks()
1178 if (!check_object(s, page, object, SLUB_RED_INACTIVE)) in alloc_consistency_checks()
1184 static noinline int alloc_debug_processing(struct kmem_cache *s, in alloc_debug_processing() argument
1188 if (s->flags & SLAB_CONSISTENCY_CHECKS) { in alloc_debug_processing()
1189 if (!alloc_consistency_checks(s, page, object)) in alloc_debug_processing()
1194 if (s->flags & SLAB_STORE_USER) in alloc_debug_processing()
1195 set_track(s, object, TRACK_ALLOC, addr); in alloc_debug_processing()
1196 trace(s, page, object, 1); in alloc_debug_processing()
1197 init_object(s, object, SLUB_RED_ACTIVE); in alloc_debug_processing()
1207 slab_fix(s, "Marking all objects used"); in alloc_debug_processing()
1214 static inline int free_consistency_checks(struct kmem_cache *s, in free_consistency_checks() argument
1217 if (!check_valid_pointer(s, page, object)) { in free_consistency_checks()
1218 slab_err(s, page, "Invalid object pointer 0x%p", object); in free_consistency_checks()
1222 if (on_freelist(s, page, object)) { in free_consistency_checks()
1223 object_err(s, page, object, "Object already free"); in free_consistency_checks()
1227 if (!check_object(s, page, object, SLUB_RED_ACTIVE)) in free_consistency_checks()
1230 if (unlikely(s != page->slab_cache)) { in free_consistency_checks()
1232 slab_err(s, page, "Attempt to free object(0x%p) outside of slab", in free_consistency_checks()
1239 object_err(s, page, object, in free_consistency_checks()
1248 struct kmem_cache *s, struct page *page, in free_debug_processing() argument
1252 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); in free_debug_processing()
1261 if (s->flags & SLAB_CONSISTENCY_CHECKS) { in free_debug_processing()
1262 if (!check_slab(s, page)) in free_debug_processing()
1269 if (s->flags & SLAB_CONSISTENCY_CHECKS) { in free_debug_processing()
1270 if (!free_consistency_checks(s, page, object, addr)) in free_debug_processing()
1274 if (s->flags & SLAB_STORE_USER) in free_debug_processing()
1275 set_track(s, object, TRACK_FREE, addr); in free_debug_processing()
1276 trace(s, page, object, 0); in free_debug_processing()
1278 init_object(s, object, SLUB_RED_INACTIVE); in free_debug_processing()
1282 object = get_freepointer(s, object); in free_debug_processing()
1289 slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n", in free_debug_processing()
1295 slab_fix(s, "Object at 0x%p not freed", object); in free_debug_processing()
1495 static inline void setup_object_debug(struct kmem_cache *s, in setup_object_debug() argument
1498 void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {} in setup_page_debug() argument
1500 static inline int alloc_debug_processing(struct kmem_cache *s, in alloc_debug_processing() argument
1504 struct kmem_cache *s, struct page *page, in free_debug_processing() argument
1508 static inline int slab_pad_check(struct kmem_cache *s, struct page *page) in slab_pad_check() argument
1510 static inline int check_object(struct kmem_cache *s, struct page *page, in check_object() argument
1512 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, in add_full() argument
1514 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, in remove_full() argument
1525 static inline unsigned long slabs_node(struct kmem_cache *s, int node) in slabs_node() argument
1529 static inline void inc_slabs_node(struct kmem_cache *s, int node, in inc_slabs_node() argument
1531 static inline void dec_slabs_node(struct kmem_cache *s, int node, in dec_slabs_node() argument
1534 static bool freelist_corrupted(struct kmem_cache *s, struct page *page, in freelist_corrupted() argument
1559 static __always_inline bool slab_free_hook(struct kmem_cache *s, in slab_free_hook() argument
1562 kmemleak_free_recursive(x, s->flags); in slab_free_hook()
1574 debug_check_no_locks_freed(x, s->object_size); in slab_free_hook()
1578 if (!(s->flags & SLAB_DEBUG_OBJECTS)) in slab_free_hook()
1579 debug_check_no_obj_freed(x, s->object_size); in slab_free_hook()
1582 if (!(s->flags & SLAB_TYPESAFE_BY_RCU)) in slab_free_hook()
1583 __kcsan_check_access(x, s->object_size, in slab_free_hook()
1598 memset(kasan_reset_tag(x), 0, s->object_size); in slab_free_hook()
1599 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0; in slab_free_hook()
1600 memset((char *)kasan_reset_tag(x) + s->inuse, 0, in slab_free_hook()
1601 s->size - s->inuse - rsize); in slab_free_hook()
1604 return kasan_slab_free(s, x, init); in slab_free_hook()
1607 static inline bool slab_free_freelist_hook(struct kmem_cache *s, in slab_free_freelist_hook() argument
1617 slab_free_hook(s, next, false); in slab_free_freelist_hook()
1627 next = get_freepointer(s, object); in slab_free_freelist_hook()
1630 if (!slab_free_hook(s, object, slab_want_init_on_free(s))) { in slab_free_freelist_hook()
1632 set_freepointer(s, object, *head); in slab_free_freelist_hook()
1651 static void *setup_object(struct kmem_cache *s, struct page *page, in setup_object() argument
1654 setup_object_debug(s, page, object); in setup_object()
1655 object = kasan_init_slab_obj(s, object); in setup_object()
1656 if (unlikely(s->ctor)) { in setup_object()
1657 kasan_unpoison_object_data(s, object); in setup_object()
1658 s->ctor(object); in setup_object()
1659 kasan_poison_object_data(s, object); in setup_object()
1667 static inline struct page *alloc_slab_page(struct kmem_cache *s, in alloc_slab_page() argument
1679 account_slab_page(page, order, s); in alloc_slab_page()
1686 static int init_cache_random_seq(struct kmem_cache *s) in init_cache_random_seq() argument
1688 unsigned int count = oo_objects(s->oo); in init_cache_random_seq()
1692 if (s->random_seq) in init_cache_random_seq()
1695 err = cache_random_seq_create(s, count, GFP_KERNEL); in init_cache_random_seq()
1698 s->name); in init_cache_random_seq()
1703 if (s->random_seq) { in init_cache_random_seq()
1707 s->random_seq[i] *= s->size; in init_cache_random_seq()
1715 struct kmem_cache *s; in init_freelist_randomization() local
1719 list_for_each_entry(s, &slab_caches, list) in init_freelist_randomization()
1720 init_cache_random_seq(s); in init_freelist_randomization()
1726 static void *next_freelist_entry(struct kmem_cache *s, struct page *page, in next_freelist_entry() argument
1738 idx = s->random_seq[*pos]; in next_freelist_entry()
1748 static bool shuffle_freelist(struct kmem_cache *s, struct page *page) in shuffle_freelist() argument
1755 if (page->objects < 2 || !s->random_seq) in shuffle_freelist()
1758 freelist_count = oo_objects(s->oo); in shuffle_freelist()
1761 page_limit = page->objects * s->size; in shuffle_freelist()
1762 start = fixup_red_left(s, page_address(page)); in shuffle_freelist()
1765 cur = next_freelist_entry(s, page, &pos, start, page_limit, in shuffle_freelist()
1767 cur = setup_object(s, page, cur); in shuffle_freelist()
1771 next = next_freelist_entry(s, page, &pos, start, page_limit, in shuffle_freelist()
1773 next = setup_object(s, page, next); in shuffle_freelist()
1774 set_freepointer(s, cur, next); in shuffle_freelist()
1777 set_freepointer(s, cur, NULL); in shuffle_freelist()
1782 static inline int init_cache_random_seq(struct kmem_cache *s) in init_cache_random_seq() argument
1787 static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page) in shuffle_freelist() argument
1793 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) in allocate_slab() argument
1796 struct kmem_cache_order_objects oo = s->oo; in allocate_slab()
1807 flags |= s->allocflags; in allocate_slab()
1814 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) in allocate_slab()
1817 page = alloc_slab_page(s, alloc_gfp, node, oo); in allocate_slab()
1819 oo = s->min; in allocate_slab()
1825 page = alloc_slab_page(s, alloc_gfp, node, oo); in allocate_slab()
1828 stat(s, ORDER_FALLBACK); in allocate_slab()
1833 page->slab_cache = s; in allocate_slab()
1842 setup_page_debug(s, page, start); in allocate_slab()
1844 shuffle = shuffle_freelist(s, page); in allocate_slab()
1847 start = fixup_red_left(s, start); in allocate_slab()
1848 start = setup_object(s, page, start); in allocate_slab()
1851 next = p + s->size; in allocate_slab()
1852 next = setup_object(s, page, next); in allocate_slab()
1853 set_freepointer(s, p, next); in allocate_slab()
1856 set_freepointer(s, p, NULL); in allocate_slab()
1868 inc_slabs_node(s, page_to_nid(page), page->objects); in allocate_slab()
1873 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) in new_slab() argument
1878 return allocate_slab(s, in new_slab()
1882 static void __free_slab(struct kmem_cache *s, struct page *page) in __free_slab() argument
1887 if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) { in __free_slab()
1890 slab_pad_check(s, page); in __free_slab()
1891 for_each_object(p, s, page_address(page), in __free_slab()
1893 check_object(s, page, p, SLUB_RED_INACTIVE); in __free_slab()
1902 unaccount_slab_page(page, order, s); in __free_slab()
1913 static void free_slab(struct kmem_cache *s, struct page *page) in free_slab() argument
1915 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) { in free_slab()
1918 __free_slab(s, page); in free_slab()
1921 static void discard_slab(struct kmem_cache *s, struct page *page) in discard_slab() argument
1923 dec_slabs_node(s, page_to_nid(page), page->objects); in discard_slab()
1924 free_slab(s, page); in discard_slab()
1961 static inline void *acquire_slab(struct kmem_cache *s, in acquire_slab() argument
1990 if (!__cmpxchg_double_slab(s, page, in acquire_slab()
2001 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
2007 static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, in get_partial_node() argument
2031 t = acquire_slab(s, n, page, object == NULL, &objects); in get_partial_node()
2038 stat(s, ALLOC_FROM_PARTIAL); in get_partial_node()
2041 put_cpu_partial(s, page, 0); in get_partial_node()
2042 stat(s, CPU_PARTIAL_NODE); in get_partial_node()
2044 if (!kmem_cache_has_cpu_partial(s) in get_partial_node()
2045 || available > slub_cpu_partial(s) / 2) in get_partial_node()
2056 static void *get_any_partial(struct kmem_cache *s, gfp_t flags, in get_any_partial() argument
2085 if (!s->remote_node_defrag_ratio || in get_any_partial()
2086 get_cycles() % 1024 > s->remote_node_defrag_ratio) in get_any_partial()
2095 n = get_node(s, zone_to_nid(zone)); in get_any_partial()
2098 n->nr_partial > s->min_partial) { in get_any_partial()
2099 object = get_partial_node(s, n, c, flags); in get_any_partial()
2120 static void *get_partial(struct kmem_cache *s, gfp_t flags, int node, in get_partial() argument
2129 object = get_partial_node(s, get_node(s, searchnode), c, flags); in get_partial()
2133 return get_any_partial(s, flags, c); in get_partial()
2174 const struct kmem_cache *s, unsigned long tid) in note_cmpxchg_failure() argument
2177 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); in note_cmpxchg_failure()
2179 pr_info("%s %s: cmpxchg redo ", n, s->name); in note_cmpxchg_failure()
2194 stat(s, CMPXCHG_DOUBLE_CPU_FAIL); in note_cmpxchg_failure()
2197 static void init_kmem_cache_cpus(struct kmem_cache *s) in init_kmem_cache_cpus() argument
2202 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu); in init_kmem_cache_cpus()
2208 static void deactivate_slab(struct kmem_cache *s, struct page *page, in deactivate_slab() argument
2212 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); in deactivate_slab()
2221 stat(s, DEACTIVATE_REMOTE_FREES); in deactivate_slab()
2233 while (freelist && (nextfree = get_freepointer(s, freelist))) { in deactivate_slab()
2242 if (freelist_corrupted(s, page, &freelist, nextfree)) in deactivate_slab()
2248 set_freepointer(s, freelist, prior); in deactivate_slab()
2253 } while (!__cmpxchg_double_slab(s, page, in deactivate_slab()
2285 set_freepointer(s, freelist, old.freelist); in deactivate_slab()
2292 if (!new.inuse && n->nr_partial >= s->min_partial) in deactivate_slab()
2308 if ((s->flags & SLAB_STORE_USER) && !lock) { in deactivate_slab()
2324 remove_full(s, n, page); in deactivate_slab()
2329 add_full(s, n, page); in deactivate_slab()
2333 if (!__cmpxchg_double_slab(s, page, in deactivate_slab()
2343 stat(s, tail); in deactivate_slab()
2345 stat(s, DEACTIVATE_FULL); in deactivate_slab()
2347 stat(s, DEACTIVATE_EMPTY); in deactivate_slab()
2348 discard_slab(s, page); in deactivate_slab()
2349 stat(s, FREE_SLAB); in deactivate_slab()
2364 static void unfreeze_partials(struct kmem_cache *s, in unfreeze_partials() argument
2377 n2 = get_node(s, page_to_nid(page)); in unfreeze_partials()
2397 } while (!__cmpxchg_double_slab(s, page, in unfreeze_partials()
2402 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) { in unfreeze_partials()
2407 stat(s, FREE_ADD_PARTIAL); in unfreeze_partials()
2418 stat(s, DEACTIVATE_EMPTY); in unfreeze_partials()
2419 discard_slab(s, page); in unfreeze_partials()
2420 stat(s, FREE_SLAB); in unfreeze_partials()
2432 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) in put_cpu_partial() argument
2443 oldpage = this_cpu_read(s->cpu_slab->partial); in put_cpu_partial()
2448 if (drain && pobjects > slub_cpu_partial(s)) { in put_cpu_partial()
2455 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); in put_cpu_partial()
2460 stat(s, CPU_PARTIAL_DRAIN); in put_cpu_partial()
2471 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) in put_cpu_partial()
2473 if (unlikely(!slub_cpu_partial(s))) { in put_cpu_partial()
2477 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); in put_cpu_partial()
2484 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) in flush_slab() argument
2486 stat(s, CPUSLAB_FLUSH); in flush_slab()
2487 deactivate_slab(s, c->page, c->freelist, c); in flush_slab()
2495 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) in __flush_cpu_slab() argument
2497 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); in __flush_cpu_slab()
2500 flush_slab(s, c); in __flush_cpu_slab()
2502 unfreeze_partials(s, c); in __flush_cpu_slab()
2507 struct kmem_cache *s = d; in flush_cpu_slab() local
2509 __flush_cpu_slab(s, smp_processor_id()); in flush_cpu_slab()
2514 struct kmem_cache *s = info; in has_cpu_slab() local
2515 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); in has_cpu_slab()
2520 static void flush_all(struct kmem_cache *s) in flush_all() argument
2522 on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1); in flush_all()
2531 struct kmem_cache *s; in slub_cpu_dead() local
2535 list_for_each_entry(s, &slab_caches, list) { in slub_cpu_dead()
2537 __flush_cpu_slab(s, cpu); in slub_cpu_dead()
2586 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) in slab_out_of_memory() argument
2600 s->name, s->object_size, s->size, oo_order(s->oo), in slab_out_of_memory()
2601 oo_order(s->min)); in slab_out_of_memory()
2603 if (oo_order(s->min) > get_order(s->object_size)) in slab_out_of_memory()
2605 s->name); in slab_out_of_memory()
2607 for_each_kmem_cache_node(s, node, n) { in slab_out_of_memory()
2622 static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, in new_slab_objects() argument
2629 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO)); in new_slab_objects()
2631 freelist = get_partial(s, flags, node, c); in new_slab_objects()
2636 page = new_slab(s, flags, node); in new_slab_objects()
2638 c = raw_cpu_ptr(s->cpu_slab); in new_slab_objects()
2640 flush_slab(s, c); in new_slab_objects()
2649 stat(s, ALLOC_SLAB); in new_slab_objects()
2675 static inline void *get_freelist(struct kmem_cache *s, struct page *page) in get_freelist() argument
2691 } while (!__cmpxchg_double_slab(s, page, in get_freelist()
2718 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, in ___slab_alloc() argument
2724 stat(s, ALLOC_SLOWPATH); in ___slab_alloc()
2748 stat(s, ALLOC_NODE_MISMATCH); in ___slab_alloc()
2749 deactivate_slab(s, page, c->freelist, c); in ___slab_alloc()
2760 deactivate_slab(s, page, c->freelist, c); in ___slab_alloc()
2769 freelist = get_freelist(s, page); in ___slab_alloc()
2774 stat(s, DEACTIVATE_BYPASS); in ___slab_alloc()
2778 stat(s, ALLOC_REFILL); in ___slab_alloc()
2787 c->freelist = get_freepointer(s, freelist); in ___slab_alloc()
2796 stat(s, CPU_PARTIAL_ALLOC); in ___slab_alloc()
2800 freelist = new_slab_objects(s, gfpflags, node, &c); in ___slab_alloc()
2803 slab_out_of_memory(s, gfpflags, node); in ___slab_alloc()
2808 if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags))) in ___slab_alloc()
2812 if (kmem_cache_debug(s) && in ___slab_alloc()
2813 !alloc_debug_processing(s, page, freelist, addr)) in ___slab_alloc()
2816 deactivate_slab(s, page, get_freepointer(s, freelist), c); in ___slab_alloc()
2824 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, in __slab_alloc() argument
2837 c = this_cpu_ptr(s->cpu_slab); in __slab_alloc()
2840 p = ___slab_alloc(s, gfpflags, node, addr, c); in __slab_alloc()
2849 static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, in maybe_wipe_obj_freeptr() argument
2852 if (unlikely(slab_want_init_on_free(s)) && obj) in maybe_wipe_obj_freeptr()
2853 memset((void *)((char *)kasan_reset_tag(obj) + s->offset), in maybe_wipe_obj_freeptr()
2867 static __always_inline void *slab_alloc_node(struct kmem_cache *s, in slab_alloc_node() argument
2877 s = slab_pre_alloc_hook(s, &objcg, 1, gfpflags); in slab_alloc_node()
2878 if (!s) in slab_alloc_node()
2881 object = kfence_alloc(s, orig_size, gfpflags); in slab_alloc_node()
2897 tid = this_cpu_read(s->cpu_slab->tid); in slab_alloc_node()
2898 c = raw_cpu_ptr(s->cpu_slab); in slab_alloc_node()
2922 object = __slab_alloc(s, gfpflags, node, addr, c); in slab_alloc_node()
2924 void *next_object = get_freepointer_safe(s, object); in slab_alloc_node()
2941 s->cpu_slab->freelist, s->cpu_slab->tid, in slab_alloc_node()
2945 note_cmpxchg_failure("slab_alloc", s, tid); in slab_alloc_node()
2948 prefetch_freepointer(s, next_object); in slab_alloc_node()
2949 stat(s, ALLOC_FASTPATH); in slab_alloc_node()
2952 maybe_wipe_obj_freeptr(s, object); in slab_alloc_node()
2953 init = slab_want_init_on_alloc(gfpflags, s); in slab_alloc_node()
2956 slab_post_alloc_hook(s, objcg, gfpflags, 1, &object, init); in slab_alloc_node()
2961 static __always_inline void *slab_alloc(struct kmem_cache *s, in slab_alloc() argument
2964 return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr, orig_size); in slab_alloc()
2967 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) in kmem_cache_alloc() argument
2969 void *ret = slab_alloc(s, gfpflags, _RET_IP_, s->object_size); in kmem_cache_alloc()
2971 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, in kmem_cache_alloc()
2972 s->size, gfpflags); in kmem_cache_alloc()
2979 void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) in kmem_cache_alloc_trace() argument
2981 void *ret = slab_alloc(s, gfpflags, _RET_IP_, size); in kmem_cache_alloc_trace()
2982 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); in kmem_cache_alloc_trace()
2983 ret = kasan_kmalloc(s, ret, size, gfpflags); in kmem_cache_alloc_trace()
2990 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) in kmem_cache_alloc_node() argument
2992 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, s->object_size); in kmem_cache_alloc_node()
2995 s->object_size, s->size, gfpflags, node); in kmem_cache_alloc_node()
3002 void *kmem_cache_alloc_node_trace(struct kmem_cache *s, in kmem_cache_alloc_node_trace() argument
3006 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, size); in kmem_cache_alloc_node_trace()
3009 size, s->size, gfpflags, node); in kmem_cache_alloc_node_trace()
3011 ret = kasan_kmalloc(s, ret, size, gfpflags); in kmem_cache_alloc_node_trace()
3026 static void __slab_free(struct kmem_cache *s, struct page *page, in __slab_free() argument
3038 stat(s, FREE_SLOWPATH); in __slab_free()
3043 if (kmem_cache_debug(s) && in __slab_free()
3044 !free_debug_processing(s, page, head, tail, cnt, addr)) in __slab_free()
3054 set_freepointer(s, tail, prior); in __slab_free()
3060 if (kmem_cache_has_cpu_partial(s) && !prior) { in __slab_free()
3072 n = get_node(s, page_to_nid(page)); in __slab_free()
3086 } while (!cmpxchg_double_slab(s, page, in __slab_free()
3098 stat(s, FREE_FROZEN); in __slab_free()
3104 put_cpu_partial(s, page, 1); in __slab_free()
3105 stat(s, CPU_PARTIAL_FREE); in __slab_free()
3111 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) in __slab_free()
3118 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { in __slab_free()
3119 remove_full(s, n, page); in __slab_free()
3121 stat(s, FREE_ADD_PARTIAL); in __slab_free()
3132 stat(s, FREE_REMOVE_PARTIAL); in __slab_free()
3135 remove_full(s, n, page); in __slab_free()
3139 stat(s, FREE_SLAB); in __slab_free()
3140 discard_slab(s, page); in __slab_free()
3158 static __always_inline void do_slab_free(struct kmem_cache *s, in do_slab_free() argument
3168 memcg_slab_free_hook(s, &head, 1); in do_slab_free()
3177 tid = this_cpu_read(s->cpu_slab->tid); in do_slab_free()
3178 c = raw_cpu_ptr(s->cpu_slab); in do_slab_free()
3188 set_freepointer(s, tail_obj, freelist); in do_slab_free()
3191 s->cpu_slab->freelist, s->cpu_slab->tid, in do_slab_free()
3195 note_cmpxchg_failure("slab_free", s, tid); in do_slab_free()
3198 stat(s, FREE_FASTPATH); in do_slab_free()
3200 __slab_free(s, page, head, tail_obj, cnt, addr); in do_slab_free()
3204 static __always_inline void slab_free(struct kmem_cache *s, struct page *page, in slab_free() argument
3212 if (slab_free_freelist_hook(s, &head, &tail, &cnt)) in slab_free()
3213 do_slab_free(s, page, head, tail, cnt, addr); in slab_free()
3223 void kmem_cache_free(struct kmem_cache *s, void *x) in kmem_cache_free() argument
3225 s = cache_from_obj(s, x); in kmem_cache_free()
3226 if (!s) in kmem_cache_free()
3228 slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_); in kmem_cache_free()
3238 struct kmem_cache *s; member
3254 int build_detached_freelist(struct kmem_cache *s, size_t size, in build_detached_freelist() argument
3274 if (!s) { in build_detached_freelist()
3284 df->s = page->slab_cache; in build_detached_freelist()
3286 df->s = cache_from_obj(s, object); /* Support for memcg */ in build_detached_freelist()
3290 slab_free_hook(df->s, object, false); in build_detached_freelist()
3298 set_freepointer(df->s, object, NULL); in build_detached_freelist()
3312 set_freepointer(df->s, object, df->freelist); in build_detached_freelist()
3332 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) in kmem_cache_free_bulk() argument
3337 memcg_slab_free_hook(s, p, size); in kmem_cache_free_bulk()
3341 size = build_detached_freelist(s, size, p, &df); in kmem_cache_free_bulk()
3345 slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_); in kmem_cache_free_bulk()
3351 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, in kmem_cache_alloc_bulk() argument
3359 s = slab_pre_alloc_hook(s, &objcg, size, flags); in kmem_cache_alloc_bulk()
3360 if (unlikely(!s)) in kmem_cache_alloc_bulk()
3368 c = this_cpu_ptr(s->cpu_slab); in kmem_cache_alloc_bulk()
3371 void *object = kfence_alloc(s, s->object_size, flags); in kmem_cache_alloc_bulk()
3393 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, in kmem_cache_alloc_bulk()
3398 c = this_cpu_ptr(s->cpu_slab); in kmem_cache_alloc_bulk()
3399 maybe_wipe_obj_freeptr(s, p[i]); in kmem_cache_alloc_bulk()
3403 c->freelist = get_freepointer(s, object); in kmem_cache_alloc_bulk()
3405 maybe_wipe_obj_freeptr(s, p[i]); in kmem_cache_alloc_bulk()
3414 slab_post_alloc_hook(s, objcg, flags, size, p, in kmem_cache_alloc_bulk()
3415 slab_want_init_on_alloc(flags, s)); in kmem_cache_alloc_bulk()
3419 slab_post_alloc_hook(s, objcg, flags, i, p, false); in kmem_cache_alloc_bulk()
3420 __kmem_cache_free_bulk(s, i, p); in kmem_cache_alloc_bulk()
3563 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) in alloc_kmem_cache_cpus() argument
3572 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), in alloc_kmem_cache_cpus()
3575 if (!s->cpu_slab) in alloc_kmem_cache_cpus()
3578 init_kmem_cache_cpus(s); in alloc_kmem_cache_cpus()
3630 static void free_kmem_cache_nodes(struct kmem_cache *s) in free_kmem_cache_nodes() argument
3635 for_each_kmem_cache_node(s, node, n) { in free_kmem_cache_nodes()
3636 s->node[node] = NULL; in free_kmem_cache_nodes()
3641 void __kmem_cache_release(struct kmem_cache *s) in __kmem_cache_release() argument
3643 cache_random_seq_destroy(s); in __kmem_cache_release()
3644 free_percpu(s->cpu_slab); in __kmem_cache_release()
3645 free_kmem_cache_nodes(s); in __kmem_cache_release()
3648 static int init_kmem_cache_nodes(struct kmem_cache *s) in init_kmem_cache_nodes() argument
3663 free_kmem_cache_nodes(s); in init_kmem_cache_nodes()
3668 s->node[node] = n; in init_kmem_cache_nodes()
3673 static void set_min_partial(struct kmem_cache *s, unsigned long min) in set_min_partial() argument
3679 s->min_partial = min; in set_min_partial()
3682 static void set_cpu_partial(struct kmem_cache *s) in set_cpu_partial() argument
3702 if (!kmem_cache_has_cpu_partial(s)) in set_cpu_partial()
3703 slub_set_cpu_partial(s, 0); in set_cpu_partial()
3704 else if (s->size >= PAGE_SIZE) in set_cpu_partial()
3705 slub_set_cpu_partial(s, 2); in set_cpu_partial()
3706 else if (s->size >= 1024) in set_cpu_partial()
3707 slub_set_cpu_partial(s, 6); in set_cpu_partial()
3708 else if (s->size >= 256) in set_cpu_partial()
3709 slub_set_cpu_partial(s, 13); in set_cpu_partial()
3711 slub_set_cpu_partial(s, 30); in set_cpu_partial()
3719 static int calculate_sizes(struct kmem_cache *s, int forced_order) in calculate_sizes() argument
3721 slab_flags_t flags = s->flags; in calculate_sizes()
3722 unsigned int size = s->object_size; in calculate_sizes()
3739 !s->ctor) in calculate_sizes()
3740 s->flags |= __OBJECT_POISON; in calculate_sizes()
3742 s->flags &= ~__OBJECT_POISON; in calculate_sizes()
3750 if ((flags & SLAB_RED_ZONE) && size == s->object_size) in calculate_sizes()
3758 s->inuse = size; in calculate_sizes()
3761 ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) || in calculate_sizes()
3762 s->ctor) { in calculate_sizes()
3777 s->offset = size; in calculate_sizes()
3785 s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *)); in calculate_sizes()
3797 kasan_cache_create(s, &size, &s->flags); in calculate_sizes()
3809 s->red_left_pad = sizeof(void *); in calculate_sizes()
3810 s->red_left_pad = ALIGN(s->red_left_pad, s->align); in calculate_sizes()
3811 size += s->red_left_pad; in calculate_sizes()
3820 size = ALIGN(size, s->align); in calculate_sizes()
3821 s->size = size; in calculate_sizes()
3822 s->reciprocal_size = reciprocal_value(size); in calculate_sizes()
3831 s->allocflags = 0; in calculate_sizes()
3833 s->allocflags |= __GFP_COMP; in calculate_sizes()
3835 if (s->flags & SLAB_CACHE_DMA) in calculate_sizes()
3836 s->allocflags |= GFP_DMA; in calculate_sizes()
3838 if (s->flags & SLAB_CACHE_DMA32) in calculate_sizes()
3839 s->allocflags |= GFP_DMA32; in calculate_sizes()
3841 if (s->flags & SLAB_RECLAIM_ACCOUNT) in calculate_sizes()
3842 s->allocflags |= __GFP_RECLAIMABLE; in calculate_sizes()
3847 s->oo = oo_make(order, size); in calculate_sizes()
3848 s->min = oo_make(get_order(size), size); in calculate_sizes()
3849 if (oo_objects(s->oo) > oo_objects(s->max)) in calculate_sizes()
3850 s->max = s->oo; in calculate_sizes()
3852 return !!oo_objects(s->oo); in calculate_sizes()
3855 static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) in kmem_cache_open() argument
3857 s->flags = kmem_cache_flags(s->size, flags, s->name); in kmem_cache_open()
3859 s->random = get_random_long(); in kmem_cache_open()
3862 if (!calculate_sizes(s, -1)) in kmem_cache_open()
3869 if (get_order(s->size) > get_order(s->object_size)) { in kmem_cache_open()
3870 s->flags &= ~DEBUG_METADATA_FLAGS; in kmem_cache_open()
3871 s->offset = 0; in kmem_cache_open()
3872 if (!calculate_sizes(s, -1)) in kmem_cache_open()
3879 if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0) in kmem_cache_open()
3881 s->flags |= __CMPXCHG_DOUBLE; in kmem_cache_open()
3888 set_min_partial(s, ilog2(s->size) / 2); in kmem_cache_open()
3890 set_cpu_partial(s); in kmem_cache_open()
3893 s->remote_node_defrag_ratio = 1000; in kmem_cache_open()
3898 if (init_cache_random_seq(s)) in kmem_cache_open()
3902 if (!init_kmem_cache_nodes(s)) in kmem_cache_open()
3905 if (alloc_kmem_cache_cpus(s)) in kmem_cache_open()
3909 __kmem_cache_release(s); in kmem_cache_open()
3913 static void list_slab_objects(struct kmem_cache *s, struct page *page, in list_slab_objects() argument
3921 slab_err(s, page, text, s->name); in list_slab_objects()
3924 map = get_map(s, page); in list_slab_objects()
3925 for_each_object(p, s, addr, page->objects) { in list_slab_objects()
3927 if (!test_bit(__obj_to_index(s, addr, p), map)) { in list_slab_objects()
3929 print_tracking(s, p); in list_slab_objects()
3942 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) in free_partial() argument
3954 list_slab_objects(s, page, in free_partial()
3961 discard_slab(s, page); in free_partial()
3964 bool __kmem_cache_empty(struct kmem_cache *s) in __kmem_cache_empty() argument
3969 for_each_kmem_cache_node(s, node, n) in __kmem_cache_empty()
3970 if (n->nr_partial || slabs_node(s, node)) in __kmem_cache_empty()
3978 int __kmem_cache_shutdown(struct kmem_cache *s) in __kmem_cache_shutdown() argument
3983 flush_all(s); in __kmem_cache_shutdown()
3985 for_each_kmem_cache_node(s, node, n) { in __kmem_cache_shutdown()
3986 free_partial(s, n); in __kmem_cache_shutdown()
3987 if (n->nr_partial || slabs_node(s, node)) in __kmem_cache_shutdown()
4027 struct kmem_cache *s; in __kmalloc() local
4033 s = kmalloc_slab(size, flags); in __kmalloc()
4035 if (unlikely(ZERO_OR_NULL_PTR(s))) in __kmalloc()
4036 return s; in __kmalloc()
4038 ret = slab_alloc(s, flags, _RET_IP_, size); in __kmalloc()
4040 trace_kmalloc(_RET_IP_, ret, size, s->size, flags); in __kmalloc()
4042 ret = kasan_kmalloc(s, ret, size, flags); in __kmalloc()
4068 struct kmem_cache *s; in __kmalloc_node() local
4081 s = kmalloc_slab(size, flags); in __kmalloc_node()
4083 if (unlikely(ZERO_OR_NULL_PTR(s))) in __kmalloc_node()
4084 return s; in __kmalloc_node()
4086 ret = slab_alloc_node(s, flags, node, _RET_IP_, size); in __kmalloc_node()
4088 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); in __kmalloc_node()
4090 ret = kasan_kmalloc(s, ret, size, flags); in __kmalloc_node()
4109 struct kmem_cache *s; in __check_heap_object() local
4117 s = page->slab_cache; in __check_heap_object()
4128 offset = (ptr - page_address(page)) % s->size; in __check_heap_object()
4131 if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) { in __check_heap_object()
4132 if (offset < s->red_left_pad) in __check_heap_object()
4134 s->name, to_user, offset, n); in __check_heap_object()
4135 offset -= s->red_left_pad; in __check_heap_object()
4139 if (offset >= s->useroffset && in __check_heap_object()
4140 offset - s->useroffset <= s->usersize && in __check_heap_object()
4141 n <= s->useroffset - offset + s->usersize) in __check_heap_object()
4150 object_size = slab_ksize(s); in __check_heap_object()
4153 usercopy_warn("SLUB object", s->name, to_user, offset, n); in __check_heap_object()
4157 usercopy_abort("SLUB object", s->name, to_user, offset, n); in __check_heap_object()
4215 int __kmem_cache_shrink(struct kmem_cache *s) in __kmem_cache_shrink() argument
4227 flush_all(s); in __kmem_cache_shrink()
4228 for_each_kmem_cache_node(s, node, n) { in __kmem_cache_shrink()
4268 discard_slab(s, page); in __kmem_cache_shrink()
4270 if (slabs_node(s, node)) in __kmem_cache_shrink()
4279 struct kmem_cache *s; in slab_mem_going_offline_callback() local
4282 list_for_each_entry(s, &slab_caches, list) in slab_mem_going_offline_callback()
4283 __kmem_cache_shrink(s); in slab_mem_going_offline_callback()
4292 struct kmem_cache *s; in slab_mem_offline_callback() local
4306 list_for_each_entry(s, &slab_caches, list) { in slab_mem_offline_callback()
4307 n = get_node(s, offline_node); in slab_mem_offline_callback()
4315 BUG_ON(slabs_node(s, offline_node)); in slab_mem_offline_callback()
4317 s->node[offline_node] = NULL; in slab_mem_offline_callback()
4327 struct kmem_cache *s; in slab_mem_going_online_callback() local
4345 list_for_each_entry(s, &slab_caches, list) { in slab_mem_going_online_callback()
4357 s->node[nid] = n; in slab_mem_going_online_callback()
4409 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); in bootstrap() local
4412 memcpy(s, static_cache, kmem_cache->object_size); in bootstrap()
4419 __flush_cpu_slab(s, smp_processor_id()); in bootstrap()
4420 for_each_kmem_cache_node(s, node, n) { in bootstrap()
4424 p->slab_cache = s; in bootstrap()
4428 p->slab_cache = s; in bootstrap()
4431 list_add(&s->list, &slab_caches); in bootstrap()
4432 return s; in bootstrap()
4486 struct kmem_cache *s; in __kmem_cache_alias() local
4488 s = find_mergeable(size, align, flags, name, ctor); in __kmem_cache_alias()
4489 if (s) { in __kmem_cache_alias()
4490 s->refcount++; in __kmem_cache_alias()
4496 s->object_size = max(s->object_size, size); in __kmem_cache_alias()
4497 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *))); in __kmem_cache_alias()
4499 if (sysfs_slab_alias(s, name)) { in __kmem_cache_alias()
4500 s->refcount--; in __kmem_cache_alias()
4501 s = NULL; in __kmem_cache_alias()
4505 return s; in __kmem_cache_alias()
4508 int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags) in __kmem_cache_create() argument
4512 err = kmem_cache_open(s, flags); in __kmem_cache_create()
4520 err = sysfs_slab_add(s); in __kmem_cache_create()
4522 __kmem_cache_release(s); in __kmem_cache_create()
4526 if (s->flags & SLAB_STORE_USER) in __kmem_cache_create()
4527 debugfs_slab_add(s); in __kmem_cache_create()
4534 struct kmem_cache *s; in __kmalloc_track_caller() local
4540 s = kmalloc_slab(size, gfpflags); in __kmalloc_track_caller()
4542 if (unlikely(ZERO_OR_NULL_PTR(s))) in __kmalloc_track_caller()
4543 return s; in __kmalloc_track_caller()
4545 ret = slab_alloc(s, gfpflags, caller, size); in __kmalloc_track_caller()
4548 trace_kmalloc(caller, ret, size, s->size, gfpflags); in __kmalloc_track_caller()
4558 struct kmem_cache *s; in __kmalloc_node_track_caller() local
4571 s = kmalloc_slab(size, gfpflags); in __kmalloc_node_track_caller()
4573 if (unlikely(ZERO_OR_NULL_PTR(s))) in __kmalloc_node_track_caller()
4574 return s; in __kmalloc_node_track_caller()
4576 ret = slab_alloc_node(s, gfpflags, node, caller, size); in __kmalloc_node_track_caller()
4579 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); in __kmalloc_node_track_caller()
4599 static void validate_slab(struct kmem_cache *s, struct page *page) in validate_slab() argument
4607 if (!check_slab(s, page) || !on_freelist(s, page, NULL)) in validate_slab()
4611 map = get_map(s, page); in validate_slab()
4612 for_each_object(p, s, addr, page->objects) { in validate_slab()
4613 u8 val = test_bit(__obj_to_index(s, addr, p), map) ? in validate_slab()
4616 if (!check_object(s, page, p, val)) in validate_slab()
4624 static int validate_slab_node(struct kmem_cache *s, in validate_slab_node() argument
4634 validate_slab(s, page); in validate_slab_node()
4639 s->name, count, n->nr_partial); in validate_slab_node()
4641 if (!(s->flags & SLAB_STORE_USER)) in validate_slab_node()
4645 validate_slab(s, page); in validate_slab_node()
4650 s->name, count, atomic_long_read(&n->nr_slabs)); in validate_slab_node()
4657 static long validate_slab_cache(struct kmem_cache *s) in validate_slab_cache() argument
4663 flush_all(s); in validate_slab_cache()
4664 for_each_kmem_cache_node(s, node, n) in validate_slab_cache()
4665 count += validate_slab_node(s, n); in validate_slab_cache()
4724 static int add_location(struct loc_track *t, struct kmem_cache *s, in add_location() argument
4800 static void process_slab(struct loc_track *t, struct kmem_cache *s, in process_slab() argument
4807 __fill_map(obj_map, s, page); in process_slab()
4809 for_each_object(p, s, addr, page->objects) in process_slab()
4810 if (!test_bit(__obj_to_index(s, addr, p), obj_map)) in process_slab()
4811 add_location(t, s, get_track(s, p, alloc)); in process_slab()
4907 static ssize_t show_slab_objects(struct kmem_cache *s, in show_slab_objects() argument
4923 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, in show_slab_objects()
4973 for_each_kmem_cache_node(s, node, n) { in show_slab_objects()
4991 for_each_kmem_cache_node(s, node, n) { in show_slab_objects()
5018 ssize_t (*show)(struct kmem_cache *s, char *buf);
5019 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
5030 static ssize_t slab_size_show(struct kmem_cache *s, char *buf) in slab_size_show() argument
5032 return sprintf(buf, "%u\n", s->size); in slab_size_show()
5036 static ssize_t align_show(struct kmem_cache *s, char *buf) in align_show() argument
5038 return sprintf(buf, "%u\n", s->align); in align_show()
5042 static ssize_t object_size_show(struct kmem_cache *s, char *buf) in object_size_show() argument
5044 return sprintf(buf, "%u\n", s->object_size); in object_size_show()
5048 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) in objs_per_slab_show() argument
5050 return sprintf(buf, "%u\n", oo_objects(s->oo)); in objs_per_slab_show()
5054 static ssize_t order_show(struct kmem_cache *s, char *buf) in order_show() argument
5056 return sprintf(buf, "%u\n", oo_order(s->oo)); in order_show()
5060 static ssize_t min_partial_show(struct kmem_cache *s, char *buf) in min_partial_show() argument
5062 return sprintf(buf, "%lu\n", s->min_partial); in min_partial_show()
5065 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, in min_partial_store() argument
5075 set_min_partial(s, min); in min_partial_store()
5080 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf) in cpu_partial_show() argument
5082 return sprintf(buf, "%u\n", slub_cpu_partial(s)); in cpu_partial_show()
5085 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf, in cpu_partial_store() argument
5094 if (objects && !kmem_cache_has_cpu_partial(s)) in cpu_partial_store()
5097 slub_set_cpu_partial(s, objects); in cpu_partial_store()
5098 flush_all(s); in cpu_partial_store()
5103 static ssize_t ctor_show(struct kmem_cache *s, char *buf) in ctor_show() argument
5105 if (!s->ctor) in ctor_show()
5107 return sprintf(buf, "%pS\n", s->ctor); in ctor_show()
5111 static ssize_t aliases_show(struct kmem_cache *s, char *buf) in aliases_show() argument
5113 return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1); in aliases_show()
5117 static ssize_t partial_show(struct kmem_cache *s, char *buf) in partial_show() argument
5119 return show_slab_objects(s, buf, SO_PARTIAL); in partial_show()
5123 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) in cpu_slabs_show() argument
5125 return show_slab_objects(s, buf, SO_CPU); in cpu_slabs_show()
5129 static ssize_t objects_show(struct kmem_cache *s, char *buf) in objects_show() argument
5131 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); in objects_show()
5135 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) in objects_partial_show() argument
5137 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); in objects_partial_show()
5141 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) in slabs_cpu_partial_show() argument
5151 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); in slabs_cpu_partial_show()
5165 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); in slabs_cpu_partial_show()
5176 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) in reclaim_account_show() argument
5178 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); in reclaim_account_show()
5182 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) in hwcache_align_show() argument
5184 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); in hwcache_align_show()
5189 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) in cache_dma_show() argument
5191 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); in cache_dma_show()
5196 static ssize_t usersize_show(struct kmem_cache *s, char *buf) in usersize_show() argument
5198 return sprintf(buf, "%u\n", s->usersize); in usersize_show()
5202 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) in destroy_by_rcu_show() argument
5204 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU)); in destroy_by_rcu_show()
5209 static ssize_t slabs_show(struct kmem_cache *s, char *buf) in slabs_show() argument
5211 return show_slab_objects(s, buf, SO_ALL); in slabs_show()
5215 static ssize_t total_objects_show(struct kmem_cache *s, char *buf) in total_objects_show() argument
5217 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); in total_objects_show()
5221 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) in sanity_checks_show() argument
5223 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS)); in sanity_checks_show()
5227 static ssize_t trace_show(struct kmem_cache *s, char *buf) in trace_show() argument
5229 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE)); in trace_show()
5233 static ssize_t red_zone_show(struct kmem_cache *s, char *buf) in red_zone_show() argument
5235 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); in red_zone_show()
5240 static ssize_t poison_show(struct kmem_cache *s, char *buf) in poison_show() argument
5242 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON)); in poison_show()
5247 static ssize_t store_user_show(struct kmem_cache *s, char *buf) in store_user_show() argument
5249 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); in store_user_show()
5254 static ssize_t validate_show(struct kmem_cache *s, char *buf) in validate_show() argument
5259 static ssize_t validate_store(struct kmem_cache *s, in validate_store() argument
5265 ret = validate_slab_cache(s); in validate_store()
5276 static ssize_t failslab_show(struct kmem_cache *s, char *buf) in failslab_show() argument
5278 return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); in failslab_show()
5283 static ssize_t shrink_show(struct kmem_cache *s, char *buf) in shrink_show() argument
5288 static ssize_t shrink_store(struct kmem_cache *s, in shrink_store() argument
5292 kmem_cache_shrink(s); in shrink_store()
5300 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) in remote_node_defrag_ratio_show() argument
5302 return sprintf(buf, "%u\n", s->remote_node_defrag_ratio / 10); in remote_node_defrag_ratio_show()
5305 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, in remote_node_defrag_ratio_store() argument
5317 s->remote_node_defrag_ratio = ratio * 10; in remote_node_defrag_ratio_store()
5325 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) in show_stat() argument
5336 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; in show_stat()
5354 static void clear_stat(struct kmem_cache *s, enum stat_item si) in clear_stat() argument
5359 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; in clear_stat()
5363 static ssize_t text##_show(struct kmem_cache *s, char *buf) \
5365 return show_stat(s, buf, si); \
5367 static ssize_t text##_store(struct kmem_cache *s, \
5372 clear_stat(s, si); \
5485 struct kmem_cache *s; in slab_attr_show() local
5489 s = to_slab(kobj); in slab_attr_show()
5494 err = attribute->show(s, buf); in slab_attr_show()
5504 struct kmem_cache *s; in slab_attr_store() local
5508 s = to_slab(kobj); in slab_attr_store()
5513 err = attribute->store(s, buf, len); in slab_attr_store()
5534 static inline struct kset *cache_kset(struct kmem_cache *s) in cache_kset() argument
5545 static char *create_unique_id(struct kmem_cache *s) in create_unique_id() argument
5561 if (s->flags & SLAB_CACHE_DMA) in create_unique_id()
5563 if (s->flags & SLAB_CACHE_DMA32) in create_unique_id()
5565 if (s->flags & SLAB_RECLAIM_ACCOUNT) in create_unique_id()
5567 if (s->flags & SLAB_CONSISTENCY_CHECKS) in create_unique_id()
5569 if (s->flags & SLAB_ACCOUNT) in create_unique_id()
5573 p += sprintf(p, "%07u", s->size); in create_unique_id()
5579 static int sysfs_slab_add(struct kmem_cache *s) in sysfs_slab_add() argument
5583 struct kset *kset = cache_kset(s); in sysfs_slab_add()
5584 int unmergeable = slab_unmergeable(s); in sysfs_slab_add()
5587 kobject_init(&s->kobj, &slab_ktype); in sysfs_slab_add()
5601 sysfs_remove_link(&slab_kset->kobj, s->name); in sysfs_slab_add()
5602 name = s->name; in sysfs_slab_add()
5608 name = create_unique_id(s); in sysfs_slab_add()
5613 s->kobj.kset = kset; in sysfs_slab_add()
5614 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); in sysfs_slab_add()
5618 err = sysfs_create_group(&s->kobj, &slab_attr_group); in sysfs_slab_add()
5624 sysfs_slab_alias(s, s->name); in sysfs_slab_add()
5631 kobject_del(&s->kobj); in sysfs_slab_add()
5635 void sysfs_slab_unlink(struct kmem_cache *s) in sysfs_slab_unlink() argument
5638 kobject_del(&s->kobj); in sysfs_slab_unlink()
5641 void sysfs_slab_release(struct kmem_cache *s) in sysfs_slab_release() argument
5644 kobject_put(&s->kobj); in sysfs_slab_release()
5652 struct kmem_cache *s; member
5659 static int sysfs_slab_alias(struct kmem_cache *s, const char *name) in sysfs_slab_alias() argument
5668 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); in sysfs_slab_alias()
5675 al->s = s; in sysfs_slab_alias()
5684 struct kmem_cache *s; in slab_sysfs_init() local
5698 list_for_each_entry(s, &slab_caches, list) { in slab_sysfs_init()
5699 err = sysfs_slab_add(s); in slab_sysfs_init()
5702 s->name); in slab_sysfs_init()
5709 err = sysfs_slab_alias(al->s, al->name); in slab_sysfs_init()
5810 struct kmem_cache *s = file_inode(filep)->i_private; in slab_debug_trace_open() local
5816 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); in slab_debug_trace_open()
5834 flush_all(s); in slab_debug_trace_open()
5836 for_each_kmem_cache_node(s, node, n) { in slab_debug_trace_open()
5845 process_slab(t, s, page, alloc, obj_map); in slab_debug_trace_open()
5847 process_slab(t, s, page, alloc, obj_map); in slab_debug_trace_open()
5871 static void debugfs_slab_add(struct kmem_cache *s) in debugfs_slab_add() argument
5878 slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root); in debugfs_slab_add()
5881 slab_cache_dir, s, &slab_debugfs_fops); in debugfs_slab_add()
5884 slab_cache_dir, s, &slab_debugfs_fops); in debugfs_slab_add()
5887 void debugfs_slab_release(struct kmem_cache *s) in debugfs_slab_release() argument
5889 debugfs_remove_recursive(debugfs_lookup(s->name, slab_debugfs_root)); in debugfs_slab_release()
5894 struct kmem_cache *s; in slab_debugfs_init() local
5898 list_for_each_entry(s, &slab_caches, list) in slab_debugfs_init()
5899 if (s->flags & SLAB_STORE_USER) in slab_debugfs_init()
5900 debugfs_slab_add(s); in slab_debugfs_init()
5911 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo) in get_slabinfo() argument
5919 for_each_kmem_cache_node(s, node, n) { in get_slabinfo()
5929 sinfo->objects_per_slab = oo_objects(s->oo); in get_slabinfo()
5930 sinfo->cache_order = oo_order(s->oo); in get_slabinfo()
5934 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s) in slabinfo_show_stats() argument