• Home
  • Raw
  • Download

Lines Matching +full:page +full:- +full:size

1 // SPDX-License-Identifier: GPL-2.0
33 #include <linux/fault-inject.h>
46 * 2. node->list_lock
47 * 3. slab_lock(page) (Only on some arches and for debugging)
56 * A. page->freelist -> List of object free in a page
57 * B. page->inuse -> Number of objects in use
58 * C. page->objects -> Number of objects in page
59 * D. page->frozen -> frozen state
63 * slab is the one who can perform list operations on the page. Other
66 * page's freelist.
94 * minimal so we rely on the page allocators per cpu caches for
97 * page->frozen The slab is frozen and exempt from list processing.
134 p += s->red_left_pad; in fixup_red_left()
151 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
153 * - Variable sizing of the per node arrays
194 #define OO_MASK ((1 << OO_SHIFT) - 1)
195 #define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
233 * avoid this_cpu_add()'s irq-disable overhead. in stat()
235 raw_cpu_inc(s->cpu_slab->stat[si]); in stat()
245 * with an XOR of the address where the pointer is held and a per-cache
262 return (void *)((unsigned long)ptr ^ s->random ^ in freelist_ptr()
279 return freelist_dereference(s, object + s->offset); in get_freepointer()
284 prefetch(object + s->offset); in prefetch_freepointer()
295 freepointer_addr = (unsigned long)object + s->offset; in get_freepointer_safe()
302 unsigned long freeptr_addr = (unsigned long)object + s->offset; in set_freepointer()
314 __p < (__addr) + (__objects) * (__s)->size; \
315 __p += (__s)->size)
317 static inline unsigned int order_objects(unsigned int order, unsigned int size) in order_objects() argument
319 return ((unsigned int)PAGE_SIZE << order) / size; in order_objects()
323 unsigned int size) in oo_make() argument
326 (order << OO_SHIFT) + order_objects(order, size) in oo_make()
345 static __always_inline void slab_lock(struct page *page) in slab_lock() argument
347 VM_BUG_ON_PAGE(PageTail(page), page); in slab_lock()
348 bit_spin_lock(PG_locked, &page->flags); in slab_lock()
351 static __always_inline void slab_unlock(struct page *page) in slab_unlock() argument
353 VM_BUG_ON_PAGE(PageTail(page), page); in slab_unlock()
354 __bit_spin_unlock(PG_locked, &page->flags); in slab_unlock()
358 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page, in __cmpxchg_double_slab() argument
366 if (s->flags & __CMPXCHG_DOUBLE) { in __cmpxchg_double_slab()
367 if (cmpxchg_double(&page->freelist, &page->counters, in __cmpxchg_double_slab()
374 slab_lock(page); in __cmpxchg_double_slab()
375 if (page->freelist == freelist_old && in __cmpxchg_double_slab()
376 page->counters == counters_old) { in __cmpxchg_double_slab()
377 page->freelist = freelist_new; in __cmpxchg_double_slab()
378 page->counters = counters_new; in __cmpxchg_double_slab()
379 slab_unlock(page); in __cmpxchg_double_slab()
382 slab_unlock(page); in __cmpxchg_double_slab()
389 pr_info("%s %s: cmpxchg double redo ", n, s->name); in __cmpxchg_double_slab()
395 static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, in cmpxchg_double_slab() argument
402 if (s->flags & __CMPXCHG_DOUBLE) { in cmpxchg_double_slab()
403 if (cmpxchg_double(&page->freelist, &page->counters, in cmpxchg_double_slab()
413 slab_lock(page); in cmpxchg_double_slab()
414 if (page->freelist == freelist_old && in cmpxchg_double_slab()
415 page->counters == counters_old) { in cmpxchg_double_slab()
416 page->freelist = freelist_new; in cmpxchg_double_slab()
417 page->counters = counters_new; in cmpxchg_double_slab()
418 slab_unlock(page); in cmpxchg_double_slab()
422 slab_unlock(page); in cmpxchg_double_slab()
430 pr_info("%s %s: cmpxchg double redo ", n, s->name); in cmpxchg_double_slab()
441 * Determine a map of object in use on a page.
443 * Node listlock must be held to guarantee that the page does
446 static unsigned long *get_map(struct kmem_cache *s, struct page *page) in get_map() argument
450 void *addr = page_address(page); in get_map()
456 bitmap_zero(object_map, page->objects); in get_map()
458 for (p = page->freelist; p; p = get_freepointer(s, p)) in get_map()
472 if (s->flags & SLAB_RED_ZONE) in size_from_object()
473 return s->size - s->red_left_pad; in size_from_object()
475 return s->size; in size_from_object()
480 if (s->flags & SLAB_RED_ZONE) in restore_red_left()
481 p -= s->red_left_pad; in restore_red_left()
518 /* Verify that a pointer has an address that is valid within a slab page */
520 struct page *page, void *object) in check_valid_pointer() argument
527 base = page_address(page); in check_valid_pointer()
530 if (object < base || object >= base + page->objects * s->size || in check_valid_pointer()
531 (object - base) % s->size) { in check_valid_pointer()
552 return s->offset >= s->inuse; in freeptr_outside_object()
562 return s->inuse + sizeof(void *); in get_info_end()
564 return s->inuse; in get_info_end()
587 nr_entries = stack_trace_save(p->addrs, TRACK_ADDRS_COUNT, 3); in set_track()
591 p->addrs[nr_entries] = 0; in set_track()
593 p->addr = addr; in set_track()
594 p->cpu = smp_processor_id(); in set_track()
595 p->pid = current->pid; in set_track()
596 p->when = jiffies; in set_track()
604 if (!(s->flags & SLAB_STORE_USER)) in init_tracking()
613 if (!t->addr) in print_track()
617 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid); in print_track()
622 if (t->addrs[i]) in print_track()
623 pr_err("\t%pS\n", (void *)t->addrs[i]); in print_track()
633 if (!(s->flags & SLAB_STORE_USER)) in print_tracking()
640 static void print_page_info(struct page *page) in print_page_info() argument
643 page, page->objects, page->inuse, page->freelist, page->flags); in print_page_info()
656 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf); in slab_bug()
657 pr_err("-----------------------------------------------------------------------------\n\n"); in slab_bug()
671 pr_err("FIX %s: %pV\n", s->name, &vaf); in slab_fix()
675 static bool freelist_corrupted(struct kmem_cache *s, struct page *page, in freelist_corrupted() argument
678 if ((s->flags & SLAB_CONSISTENCY_CHECKS) && in freelist_corrupted()
679 !check_valid_pointer(s, page, nextfree) && freelist) { in freelist_corrupted()
680 object_err(s, page, *freelist, "Freechain corrupt"); in freelist_corrupted()
689 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) in print_trailer() argument
692 u8 *addr = page_address(page); in print_trailer()
696 print_page_info(page); in print_trailer()
699 p, p - addr, get_freepointer(s, p)); in print_trailer()
701 if (s->flags & SLAB_RED_ZONE) in print_trailer()
702 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad, in print_trailer()
703 s->red_left_pad); in print_trailer()
705 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16); in print_trailer()
708 min_t(unsigned int, s->object_size, PAGE_SIZE)); in print_trailer()
709 if (s->flags & SLAB_RED_ZONE) in print_trailer()
710 print_section(KERN_ERR, "Redzone ", p + s->object_size, in print_trailer()
711 s->inuse - s->object_size); in print_trailer()
715 if (s->flags & SLAB_STORE_USER) in print_trailer()
723 size_from_object(s) - off); in print_trailer()
728 void object_err(struct kmem_cache *s, struct page *page, in object_err() argument
732 print_trailer(s, page, object); in object_err()
735 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page, in slab_err() argument
745 print_page_info(page); in slab_err()
753 if (s->flags & SLAB_RED_ZONE) in init_object()
754 memset(p - s->red_left_pad, val, s->red_left_pad); in init_object()
756 if (s->flags & __OBJECT_POISON) { in init_object()
757 memset(p, POISON_FREE, s->object_size - 1); in init_object()
758 p[s->object_size - 1] = POISON_END; in init_object()
761 if (s->flags & SLAB_RED_ZONE) in init_object()
762 memset(p + s->object_size, val, s->inuse - s->object_size); in init_object()
768 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data); in restore_bytes()
769 memset(from, data, to - from); in restore_bytes()
772 static int check_bytes_and_report(struct kmem_cache *s, struct page *page, in check_bytes_and_report() argument
778 u8 *addr = page_address(page); in check_bytes_and_report()
787 while (end > fault && end[-1] == value) in check_bytes_and_report()
788 end--; in check_bytes_and_report()
791 pr_err("INFO: 0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n", in check_bytes_and_report()
792 fault, end - 1, fault - addr, in check_bytes_and_report()
794 print_trailer(s, page, object); in check_bytes_and_report()
811 * object + s->object_size
819 * object + s->inuse
830 * object + s->size
831 * Nothing is used beyond s->size.
838 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) in check_pad_bytes() argument
842 if (s->flags & SLAB_STORE_USER) in check_pad_bytes()
851 return check_bytes_and_report(s, page, p, "Object padding", in check_pad_bytes()
852 p + off, POISON_INUSE, size_from_object(s) - off); in check_pad_bytes()
855 /* Check the pad bytes at the end of a slab page */
856 static int slab_pad_check(struct kmem_cache *s, struct page *page) in slab_pad_check() argument
865 if (!(s->flags & SLAB_POISON)) in slab_pad_check()
868 start = page_address(page); in slab_pad_check()
869 length = page_size(page); in slab_pad_check()
871 remainder = length % s->size; in slab_pad_check()
875 pad = end - remainder; in slab_pad_check()
881 while (end > fault && end[-1] == POISON_INUSE) in slab_pad_check()
882 end--; in slab_pad_check()
884 slab_err(s, page, "Padding overwritten. 0x%p-0x%p @offset=%tu", in slab_pad_check()
885 fault, end - 1, fault - start); in slab_pad_check()
892 static int check_object(struct kmem_cache *s, struct page *page, in check_object() argument
896 u8 *endobject = object + s->object_size; in check_object()
898 if (s->flags & SLAB_RED_ZONE) { in check_object()
899 if (!check_bytes_and_report(s, page, object, "Left Redzone", in check_object()
900 object - s->red_left_pad, val, s->red_left_pad)) in check_object()
903 if (!check_bytes_and_report(s, page, object, "Right Redzone", in check_object()
904 endobject, val, s->inuse - s->object_size)) in check_object()
907 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { in check_object()
908 check_bytes_and_report(s, page, p, "Alignment padding", in check_object()
910 s->inuse - s->object_size); in check_object()
914 if (s->flags & SLAB_POISON) { in check_object()
915 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) && in check_object()
916 (!check_bytes_and_report(s, page, p, "Poison", p, in check_object()
917 POISON_FREE, s->object_size - 1) || in check_object()
918 !check_bytes_and_report(s, page, p, "End Poison", in check_object()
919 p + s->object_size - 1, POISON_END, 1))) in check_object()
924 check_pad_bytes(s, page, p); in check_object()
935 if (!check_valid_pointer(s, page, get_freepointer(s, p))) { in check_object()
936 object_err(s, page, p, "Freepointer corrupt"); in check_object()
948 static int check_slab(struct kmem_cache *s, struct page *page) in check_slab() argument
954 if (!PageSlab(page)) { in check_slab()
955 slab_err(s, page, "Not a valid slab page"); in check_slab()
959 maxobj = order_objects(compound_order(page), s->size); in check_slab()
960 if (page->objects > maxobj) { in check_slab()
961 slab_err(s, page, "objects %u > max %u", in check_slab()
962 page->objects, maxobj); in check_slab()
965 if (page->inuse > page->objects) { in check_slab()
966 slab_err(s, page, "inuse %u > max %u", in check_slab()
967 page->inuse, page->objects); in check_slab()
971 slab_pad_check(s, page); in check_slab()
976 * Determine if a certain object on a page is on the freelist. Must hold the
979 static int on_freelist(struct kmem_cache *s, struct page *page, void *search) in on_freelist() argument
986 fp = page->freelist; in on_freelist()
987 while (fp && nr <= page->objects) { in on_freelist()
990 if (!check_valid_pointer(s, page, fp)) { in on_freelist()
992 object_err(s, page, object, in on_freelist()
996 slab_err(s, page, "Freepointer corrupt"); in on_freelist()
997 page->freelist = NULL; in on_freelist()
998 page->inuse = page->objects; in on_freelist()
1009 max_objects = order_objects(compound_order(page), s->size); in on_freelist()
1013 if (page->objects != max_objects) { in on_freelist()
1014 slab_err(s, page, "Wrong number of objects. Found %d but should be %d", in on_freelist()
1015 page->objects, max_objects); in on_freelist()
1016 page->objects = max_objects; in on_freelist()
1019 if (page->inuse != page->objects - nr) { in on_freelist()
1020 slab_err(s, page, "Wrong object count. Counter is %d but counted were %d", in on_freelist()
1021 page->inuse, page->objects - nr); in on_freelist()
1022 page->inuse = page->objects - nr; in on_freelist()
1028 static void trace(struct kmem_cache *s, struct page *page, void *object, in trace() argument
1031 if (s->flags & SLAB_TRACE) { in trace()
1033 s->name, in trace()
1035 object, page->inuse, in trace()
1036 page->freelist); in trace()
1040 s->object_size); in trace()
1050 struct kmem_cache_node *n, struct page *page) in add_full() argument
1052 if (!(s->flags & SLAB_STORE_USER)) in add_full()
1055 lockdep_assert_held(&n->list_lock); in add_full()
1056 list_add(&page->slab_list, &n->full); in add_full()
1059 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) in remove_full() argument
1061 if (!(s->flags & SLAB_STORE_USER)) in remove_full()
1064 lockdep_assert_held(&n->list_lock); in remove_full()
1065 list_del(&page->slab_list); in remove_full()
1073 return atomic_long_read(&n->nr_slabs); in slabs_node()
1078 return atomic_long_read(&n->nr_slabs); in node_nr_slabs()
1087 * kmem_cache_node structure. Solve the chicken-egg in inc_slabs_node()
1092 atomic_long_inc(&n->nr_slabs); in inc_slabs_node()
1093 atomic_long_add(objects, &n->total_objects); in inc_slabs_node()
1100 atomic_long_dec(&n->nr_slabs); in dec_slabs_node()
1101 atomic_long_sub(objects, &n->total_objects); in dec_slabs_node()
1105 static void setup_object_debug(struct kmem_cache *s, struct page *page, in setup_object_debug() argument
1116 void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) in setup_page_debug() argument
1122 memset(addr, POISON_INUSE, page_size(page)); in setup_page_debug()
1127 struct page *page, void *object) in alloc_consistency_checks() argument
1129 if (!check_slab(s, page)) in alloc_consistency_checks()
1132 if (!check_valid_pointer(s, page, object)) { in alloc_consistency_checks()
1133 object_err(s, page, object, "Freelist Pointer check fails"); in alloc_consistency_checks()
1137 if (!check_object(s, page, object, SLUB_RED_INACTIVE)) in alloc_consistency_checks()
1144 struct page *page, in alloc_debug_processing() argument
1147 if (s->flags & SLAB_CONSISTENCY_CHECKS) { in alloc_debug_processing()
1148 if (!alloc_consistency_checks(s, page, object)) in alloc_debug_processing()
1153 if (s->flags & SLAB_STORE_USER) in alloc_debug_processing()
1155 trace(s, page, object, 1); in alloc_debug_processing()
1160 if (PageSlab(page)) { in alloc_debug_processing()
1162 * If this is a slab page then lets do the best we can in alloc_debug_processing()
1167 page->inuse = page->objects; in alloc_debug_processing()
1168 page->freelist = NULL; in alloc_debug_processing()
1174 struct page *page, void *object, unsigned long addr) in free_consistency_checks() argument
1176 if (!check_valid_pointer(s, page, object)) { in free_consistency_checks()
1177 slab_err(s, page, "Invalid object pointer 0x%p", object); in free_consistency_checks()
1181 if (on_freelist(s, page, object)) { in free_consistency_checks()
1182 object_err(s, page, object, "Object already free"); in free_consistency_checks()
1186 if (!check_object(s, page, object, SLUB_RED_ACTIVE)) in free_consistency_checks()
1189 if (unlikely(s != page->slab_cache)) { in free_consistency_checks()
1190 if (!PageSlab(page)) { in free_consistency_checks()
1191 slab_err(s, page, "Attempt to free object(0x%p) outside of slab", in free_consistency_checks()
1193 } else if (!page->slab_cache) { in free_consistency_checks()
1198 object_err(s, page, object, in free_consistency_checks()
1199 "page slab pointer corrupt."); in free_consistency_checks()
1207 struct kmem_cache *s, struct page *page, in free_debug_processing() argument
1211 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); in free_debug_processing()
1217 spin_lock_irqsave(&n->list_lock, flags); in free_debug_processing()
1218 slab_lock(page); in free_debug_processing()
1220 if (s->flags & SLAB_CONSISTENCY_CHECKS) { in free_debug_processing()
1221 if (!check_slab(s, page)) in free_debug_processing()
1228 if (s->flags & SLAB_CONSISTENCY_CHECKS) { in free_debug_processing()
1229 if (!free_consistency_checks(s, page, object, addr)) in free_debug_processing()
1233 if (s->flags & SLAB_STORE_USER) in free_debug_processing()
1235 trace(s, page, object, 0); in free_debug_processing()
1248 slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n", in free_debug_processing()
1251 slab_unlock(page); in free_debug_processing()
1252 spin_unlock_irqrestore(&n->list_lock, flags); in free_debug_processing()
1264 * @init: assume this is initial parsing and not per-kmem-create parsing
1290 case '-': in parse_slub_debug_flags()
1393 pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n"); in setup_slub_debug()
1400 * kmem_cache_flags - apply debugging options to the cache
1401 * @object_size: the size of an object without meta data
1407 * slub_debug=<Debug-Options>,<slab name1>,<slab name2> ...
1432 end = next_block - 1; in kmem_cache_flags()
1434 glob = strnchr(iter, end - iter, '*'); in kmem_cache_flags()
1436 cmplen = glob - iter; in kmem_cache_flags()
1438 cmplen = max_t(size_t, len, (end - iter)); in kmem_cache_flags()
1455 struct page *page, void *object) {} in setup_object_debug() argument
1457 void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {} in setup_page_debug() argument
1460 struct page *page, void *object, unsigned long addr) { return 0; } in alloc_debug_processing() argument
1463 struct kmem_cache *s, struct page *page, in free_debug_processing() argument
1467 static inline int slab_pad_check(struct kmem_cache *s, struct page *page) in slab_pad_check() argument
1469 static inline int check_object(struct kmem_cache *s, struct page *page, in check_object() argument
1472 struct page *page) {} in add_full() argument
1474 struct page *page) {} in remove_full() argument
1493 static bool freelist_corrupted(struct kmem_cache *s, struct page *page, in freelist_corrupted() argument
1504 static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) in kmalloc_large_node_hook() argument
1506 ptr = kasan_kmalloc_large(ptr, size, flags); in kmalloc_large_node_hook()
1508 kmemleak_alloc(ptr, size, 1, flags); in kmalloc_large_node_hook()
1520 kmemleak_free_recursive(x, s->flags); in slab_free_hook()
1532 debug_check_no_locks_freed(x, s->object_size); in slab_free_hook()
1536 if (!(s->flags & SLAB_DEBUG_OBJECTS)) in slab_free_hook()
1537 debug_check_no_obj_freed(x, s->object_size); in slab_free_hook()
1539 /* Use KCSAN to help debug racy use-after-free. */ in slab_free_hook()
1540 if (!(s->flags & SLAB_TYPESAFE_BY_RCU)) in slab_free_hook()
1541 __kcsan_check_access(x, s->object_size, in slab_free_hook()
1571 memset(object, 0, s->object_size); in slab_free_freelist_hook()
1572 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad in slab_free_freelist_hook()
1574 memset((char *)object + s->inuse, 0, in slab_free_freelist_hook()
1575 s->size - s->inuse - rsize); in slab_free_freelist_hook()
1590 --(*cnt); in slab_free_freelist_hook()
1600 static void *setup_object(struct kmem_cache *s, struct page *page, in setup_object() argument
1603 setup_object_debug(s, page, object); in setup_object()
1605 if (unlikely(s->ctor)) { in setup_object()
1607 s->ctor(object); in setup_object()
1616 static inline struct page *alloc_slab_page(struct kmem_cache *s, in alloc_slab_page()
1619 struct page *page; in alloc_slab_page() local
1623 page = alloc_pages(flags, order); in alloc_slab_page()
1625 page = __alloc_pages_node(node, flags, order); in alloc_slab_page()
1627 if (page) in alloc_slab_page()
1628 account_slab_page(page, order, s); in alloc_slab_page()
1630 return page; in alloc_slab_page()
1634 /* Pre-initialize the random sequence cache */
1637 unsigned int count = oo_objects(s->oo); in init_cache_random_seq()
1641 if (s->random_seq) in init_cache_random_seq()
1647 s->name); in init_cache_random_seq()
1652 if (s->random_seq) { in init_cache_random_seq()
1656 s->random_seq[i] *= s->size; in init_cache_random_seq()
1674 /* Get the next entry on the pre-computed freelist randomized */
1675 static void *next_freelist_entry(struct kmem_cache *s, struct page *page, in next_freelist_entry() argument
1683 * If the target page allocation failed, the number of objects on the in next_freelist_entry()
1684 * page might be smaller than the usual size defined by the cache. in next_freelist_entry()
1687 idx = s->random_seq[*pos]; in next_freelist_entry()
1696 /* Shuffle the single linked freelist based on a random pre-computed sequence */
1697 static bool shuffle_freelist(struct kmem_cache *s, struct page *page) in shuffle_freelist() argument
1704 if (page->objects < 2 || !s->random_seq) in shuffle_freelist()
1707 freelist_count = oo_objects(s->oo); in shuffle_freelist()
1710 page_limit = page->objects * s->size; in shuffle_freelist()
1711 start = fixup_red_left(s, page_address(page)); in shuffle_freelist()
1714 cur = next_freelist_entry(s, page, &pos, start, page_limit, in shuffle_freelist()
1716 cur = setup_object(s, page, cur); in shuffle_freelist()
1717 page->freelist = cur; in shuffle_freelist()
1719 for (idx = 1; idx < page->objects; idx++) { in shuffle_freelist()
1720 next = next_freelist_entry(s, page, &pos, start, page_limit, in shuffle_freelist()
1722 next = setup_object(s, page, next); in shuffle_freelist()
1736 static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page) in shuffle_freelist() argument
1742 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) in allocate_slab()
1744 struct page *page; in allocate_slab() local
1745 struct kmem_cache_order_objects oo = s->oo; in allocate_slab()
1756 flags |= s->allocflags; in allocate_slab()
1759 * Let the initial higher-order allocation fail under memory pressure in allocate_slab()
1760 * so we fall-back to the minimum order allocation. in allocate_slab()
1763 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) in allocate_slab()
1766 page = alloc_slab_page(s, alloc_gfp, node, oo); in allocate_slab()
1767 if (unlikely(!page)) { in allocate_slab()
1768 oo = s->min; in allocate_slab()
1774 page = alloc_slab_page(s, alloc_gfp, node, oo); in allocate_slab()
1775 if (unlikely(!page)) in allocate_slab()
1780 page->objects = oo_objects(oo); in allocate_slab()
1782 page->slab_cache = s; in allocate_slab()
1783 __SetPageSlab(page); in allocate_slab()
1784 if (page_is_pfmemalloc(page)) in allocate_slab()
1785 SetPageSlabPfmemalloc(page); in allocate_slab()
1787 kasan_poison_slab(page); in allocate_slab()
1789 start = page_address(page); in allocate_slab()
1791 setup_page_debug(s, page, start); in allocate_slab()
1793 shuffle = shuffle_freelist(s, page); in allocate_slab()
1797 start = setup_object(s, page, start); in allocate_slab()
1798 page->freelist = start; in allocate_slab()
1799 for (idx = 0, p = start; idx < page->objects - 1; idx++) { in allocate_slab()
1800 next = p + s->size; in allocate_slab()
1801 next = setup_object(s, page, next); in allocate_slab()
1808 page->inuse = page->objects; in allocate_slab()
1809 page->frozen = 1; in allocate_slab()
1814 if (!page) in allocate_slab()
1817 inc_slabs_node(s, page_to_nid(page), page->objects); in allocate_slab()
1819 return page; in allocate_slab()
1822 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) in new_slab()
1831 static void __free_slab(struct kmem_cache *s, struct page *page) in __free_slab() argument
1833 int order = compound_order(page); in __free_slab()
1839 slab_pad_check(s, page); in __free_slab()
1840 for_each_object(p, s, page_address(page), in __free_slab()
1841 page->objects) in __free_slab()
1842 check_object(s, page, p, SLUB_RED_INACTIVE); in __free_slab()
1845 __ClearPageSlabPfmemalloc(page); in __free_slab()
1846 __ClearPageSlab(page); in __free_slab()
1848 page->mapping = NULL; in __free_slab()
1849 if (current->reclaim_state) in __free_slab()
1850 current->reclaim_state->reclaimed_slab += pages; in __free_slab()
1851 unaccount_slab_page(page, order, s); in __free_slab()
1852 __free_pages(page, order); in __free_slab()
1857 struct page *page = container_of(h, struct page, rcu_head); in rcu_free_slab() local
1859 __free_slab(page->slab_cache, page); in rcu_free_slab()
1862 static void free_slab(struct kmem_cache *s, struct page *page) in free_slab() argument
1864 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) { in free_slab()
1865 call_rcu(&page->rcu_head, rcu_free_slab); in free_slab()
1867 __free_slab(s, page); in free_slab()
1870 static void discard_slab(struct kmem_cache *s, struct page *page) in discard_slab() argument
1872 dec_slabs_node(s, page_to_nid(page), page->objects); in discard_slab()
1873 free_slab(s, page); in discard_slab()
1880 __add_partial(struct kmem_cache_node *n, struct page *page, int tail) in __add_partial() argument
1882 n->nr_partial++; in __add_partial()
1884 list_add_tail(&page->slab_list, &n->partial); in __add_partial()
1886 list_add(&page->slab_list, &n->partial); in __add_partial()
1890 struct page *page, int tail) in add_partial() argument
1892 lockdep_assert_held(&n->list_lock); in add_partial()
1893 __add_partial(n, page, tail); in add_partial()
1897 struct page *page) in remove_partial() argument
1899 lockdep_assert_held(&n->list_lock); in remove_partial()
1900 list_del(&page->slab_list); in remove_partial()
1901 n->nr_partial--; in remove_partial()
1911 struct kmem_cache_node *n, struct page *page, in acquire_slab() argument
1916 struct page new; in acquire_slab()
1918 lockdep_assert_held(&n->list_lock); in acquire_slab()
1925 freelist = page->freelist; in acquire_slab()
1926 counters = page->counters; in acquire_slab()
1928 *objects = new.objects - new.inuse; in acquire_slab()
1930 new.inuse = page->objects; in acquire_slab()
1939 if (!__cmpxchg_double_slab(s, page, in acquire_slab()
1945 remove_partial(n, page); in acquire_slab()
1950 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
1951 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
1959 struct page *page, *page2; in get_partial_node() local
1970 if (!n || !n->nr_partial) in get_partial_node()
1973 spin_lock(&n->list_lock); in get_partial_node()
1974 list_for_each_entry_safe(page, page2, &n->partial, slab_list) { in get_partial_node()
1977 if (!pfmemalloc_match(page, flags)) in get_partial_node()
1980 t = acquire_slab(s, n, page, object == NULL, &objects); in get_partial_node()
1986 c->page = page; in get_partial_node()
1990 put_cpu_partial(s, page, 0); in get_partial_node()
1998 spin_unlock(&n->list_lock); in get_partial_node()
2003 * Get a page from somewhere. Search in increasing NUMA distances.
2034 if (!s->remote_node_defrag_ratio || in get_any_partial()
2035 get_cycles() % 1024 > s->remote_node_defrag_ratio) in get_any_partial()
2047 n->nr_partial > s->min_partial) { in get_any_partial()
2052 * here - if mems_allowed was updated in in get_any_partial()
2067 * Get a partial page, lock it and return it.
2126 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); in note_cmpxchg_failure()
2128 pr_info("%s %s: cmpxchg redo ", n, s->name); in note_cmpxchg_failure()
2132 pr_warn("due to cpu change %d -> %d\n", in note_cmpxchg_failure()
2137 pr_warn("due to cpu running other code. Event %ld->%ld\n", in note_cmpxchg_failure()
2151 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu); in init_kmem_cache_cpus()
2157 static void deactivate_slab(struct kmem_cache *s, struct page *page, in deactivate_slab() argument
2161 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); in deactivate_slab()
2166 struct page new; in deactivate_slab()
2167 struct page old; in deactivate_slab()
2169 if (page->freelist) { in deactivate_slab()
2176 * to the page freelist while it is still frozen. Leave the in deactivate_slab()
2179 * There is no need to take the list->lock because the page in deactivate_slab()
2191 if (freelist_corrupted(s, page, &freelist, nextfree)) in deactivate_slab()
2195 prior = page->freelist; in deactivate_slab()
2196 counters = page->counters; in deactivate_slab()
2199 new.inuse--; in deactivate_slab()
2202 } while (!__cmpxchg_double_slab(s, page, in deactivate_slab()
2211 * Stage two: Ensure that the page is unfrozen while the in deactivate_slab()
2216 * with the count. If there is a mismatch then the page in deactivate_slab()
2217 * is not unfrozen but the page is on the wrong list. in deactivate_slab()
2220 * the page from the list that we just put it on again in deactivate_slab()
2226 old.freelist = page->freelist; in deactivate_slab()
2227 old.counters = page->counters; in deactivate_slab()
2233 new.inuse--; in deactivate_slab()
2241 if (!new.inuse && n->nr_partial >= s->min_partial) in deactivate_slab()
2249 * that acquire_slab() will see a slab page that in deactivate_slab()
2252 spin_lock(&n->list_lock); in deactivate_slab()
2257 if ((s->flags & SLAB_STORE_USER) && !lock) { in deactivate_slab()
2264 spin_lock(&n->list_lock); in deactivate_slab()
2271 remove_partial(n, page); in deactivate_slab()
2273 remove_full(s, n, page); in deactivate_slab()
2276 add_partial(n, page, tail); in deactivate_slab()
2278 add_full(s, n, page); in deactivate_slab()
2282 if (!__cmpxchg_double_slab(s, page, in deactivate_slab()
2289 spin_unlock(&n->list_lock); in deactivate_slab()
2297 discard_slab(s, page); in deactivate_slab()
2301 c->page = NULL; in deactivate_slab()
2302 c->freelist = NULL; in deactivate_slab()
2317 struct page *page, *discard_page = NULL; in unfreeze_partials() local
2319 while ((page = slub_percpu_partial(c))) { in unfreeze_partials()
2320 struct page new; in unfreeze_partials()
2321 struct page old; in unfreeze_partials()
2323 slub_set_percpu_partial(c, page); in unfreeze_partials()
2325 n2 = get_node(s, page_to_nid(page)); in unfreeze_partials()
2328 spin_unlock(&n->list_lock); in unfreeze_partials()
2331 spin_lock(&n->list_lock); in unfreeze_partials()
2336 old.freelist = page->freelist; in unfreeze_partials()
2337 old.counters = page->counters; in unfreeze_partials()
2345 } while (!__cmpxchg_double_slab(s, page, in unfreeze_partials()
2350 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) { in unfreeze_partials()
2351 page->next = discard_page; in unfreeze_partials()
2352 discard_page = page; in unfreeze_partials()
2354 add_partial(n, page, DEACTIVATE_TO_TAIL); in unfreeze_partials()
2360 spin_unlock(&n->list_lock); in unfreeze_partials()
2363 page = discard_page; in unfreeze_partials()
2364 discard_page = discard_page->next; in unfreeze_partials()
2367 discard_slab(s, page); in unfreeze_partials()
2374 * Put a page that was just frozen (in __slab_free|get_partial_node) into a
2375 * partial page slot if available.
2380 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) in put_cpu_partial() argument
2383 struct page *oldpage; in put_cpu_partial()
2391 oldpage = this_cpu_read(s->cpu_slab->partial); in put_cpu_partial()
2394 pobjects = oldpage->pobjects; in put_cpu_partial()
2395 pages = oldpage->pages; in put_cpu_partial()
2403 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); in put_cpu_partial()
2413 pobjects += page->objects - page->inuse; in put_cpu_partial()
2415 page->pages = pages; in put_cpu_partial()
2416 page->pobjects = pobjects; in put_cpu_partial()
2417 page->next = oldpage; in put_cpu_partial()
2419 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) in put_cpu_partial()
2425 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); in put_cpu_partial()
2435 deactivate_slab(s, c->page, c->freelist, c); in flush_slab()
2437 c->tid = next_tid(c->tid); in flush_slab()
2447 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); in __flush_cpu_slab()
2449 if (c->page) in __flush_cpu_slab()
2465 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); in has_cpu_slab()
2467 return c->page || slub_percpu_partial(c); in has_cpu_slab()
2498 static inline int node_match(struct page *page, int node) in node_match() argument
2501 if (node != NUMA_NO_NODE && page_to_nid(page) != node) in node_match()
2508 static int count_free(struct page *page) in count_free() argument
2510 return page->objects - page->inuse; in count_free()
2515 return atomic_long_read(&n->total_objects); in node_nr_objs()
2521 int (*get_count)(struct page *)) in count_partial() argument
2525 struct page *page; in count_partial() local
2527 spin_lock_irqsave(&n->list_lock, flags); in count_partial()
2528 list_for_each_entry(page, &n->partial, slab_list) in count_partial()
2529 x += get_count(page); in count_partial()
2530 spin_unlock_irqrestore(&n->list_lock, flags); in count_partial()
2549 pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n", in slab_out_of_memory()
2550 s->name, s->object_size, s->size, oo_order(s->oo), in slab_out_of_memory()
2551 oo_order(s->min)); in slab_out_of_memory()
2553 if (oo_order(s->min) > get_order(s->object_size)) in slab_out_of_memory()
2555 s->name); in slab_out_of_memory()
2577 struct page *page; in new_slab_objects() local
2579 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO)); in new_slab_objects()
2586 page = new_slab(s, flags, node); in new_slab_objects()
2587 if (page) { in new_slab_objects()
2588 c = raw_cpu_ptr(s->cpu_slab); in new_slab_objects()
2589 if (c->page) in new_slab_objects()
2593 * No other reference to the page yet so we can in new_slab_objects()
2596 freelist = page->freelist; in new_slab_objects()
2597 page->freelist = NULL; in new_slab_objects()
2600 c->page = page; in new_slab_objects()
2607 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags) in pfmemalloc_match() argument
2609 if (unlikely(PageSlabPfmemalloc(page))) in pfmemalloc_match()
2616 * Check the page->freelist of a page and either transfer the freelist to the
2617 * per cpu freelist or deactivate the page.
2619 * The page is still frozen if the return value is not NULL.
2621 * If this function returns NULL then the page has been unfrozen.
2625 static inline void *get_freelist(struct kmem_cache *s, struct page *page) in get_freelist() argument
2627 struct page new; in get_freelist()
2632 freelist = page->freelist; in get_freelist()
2633 counters = page->counters; in get_freelist()
2638 new.inuse = page->objects; in get_freelist()
2641 } while (!__cmpxchg_double_slab(s, page, in get_freelist()
2663 * a call to the page allocator and the setup of a new slab.
2672 struct page *page; in ___slab_alloc() local
2676 page = c->page; in ___slab_alloc()
2677 if (!page) { in ___slab_alloc()
2689 if (unlikely(!node_match(page, node))) { in ___slab_alloc()
2699 deactivate_slab(s, page, c->freelist, c); in ___slab_alloc()
2705 * By rights, we should be searching for a slab page that was in ___slab_alloc()
2707 * information when the page leaves the per-cpu allocator in ___slab_alloc()
2709 if (unlikely(!pfmemalloc_match(page, gfpflags))) { in ___slab_alloc()
2710 deactivate_slab(s, page, c->freelist, c); in ___slab_alloc()
2714 /* must check again c->freelist in case of cpu migration or IRQ */ in ___slab_alloc()
2715 freelist = c->freelist; in ___slab_alloc()
2719 freelist = get_freelist(s, page); in ___slab_alloc()
2722 c->page = NULL; in ___slab_alloc()
2732 * page is pointing to the page from which the objects are obtained. in ___slab_alloc()
2733 * That page must be frozen for per cpu allocations to work. in ___slab_alloc()
2735 VM_BUG_ON(!c->page->frozen); in ___slab_alloc()
2736 c->freelist = get_freepointer(s, freelist); in ___slab_alloc()
2737 c->tid = next_tid(c->tid); in ___slab_alloc()
2743 page = c->page = slub_percpu_partial(c); in ___slab_alloc()
2744 slub_set_percpu_partial(c, page); in ___slab_alloc()
2756 page = c->page; in ___slab_alloc()
2757 if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags))) in ___slab_alloc()
2762 !alloc_debug_processing(s, page, freelist, addr)) in ___slab_alloc()
2765 deactivate_slab(s, page, get_freepointer(s, freelist), c); in ___slab_alloc()
2786 c = this_cpu_ptr(s->cpu_slab); in __slab_alloc()
2802 memset((void *)((char *)obj + s->offset), 0, sizeof(void *)); in maybe_wipe_obj_freeptr()
2820 struct page *page; in slab_alloc_node() local
2839 tid = this_cpu_read(s->cpu_slab->tid); in slab_alloc_node()
2840 c = raw_cpu_ptr(s->cpu_slab); in slab_alloc_node()
2842 unlikely(tid != READ_ONCE(c->tid))); in slab_alloc_node()
2847 * on c to guarantee that object and page associated with previous tid in slab_alloc_node()
2849 * page could be one associated with next tid and our alloc/free in slab_alloc_node()
2861 object = c->freelist; in slab_alloc_node()
2862 page = c->page; in slab_alloc_node()
2863 if (unlikely(!object || !page || !node_match(page, node))) { in slab_alloc_node()
2883 s->cpu_slab->freelist, s->cpu_slab->tid, in slab_alloc_node()
2897 memset(object, 0, s->object_size); in slab_alloc_node()
2914 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, in kmem_cache_alloc()
2915 s->size, gfpflags); in kmem_cache_alloc()
2922 void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) in kmem_cache_alloc_trace() argument
2925 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); in kmem_cache_alloc_trace()
2926 ret = kasan_kmalloc(s, ret, size, gfpflags); in kmem_cache_alloc_trace()
2938 s->object_size, s->size, gfpflags, node); in kmem_cache_alloc_node()
2947 int node, size_t size) in kmem_cache_alloc_node_trace() argument
2952 size, s->size, gfpflags, node); in kmem_cache_alloc_node_trace()
2954 ret = kasan_kmalloc(s, ret, size, gfpflags); in kmem_cache_alloc_node_trace()
2966 * lock and free the item. If there is no additional partial page
2969 static void __slab_free(struct kmem_cache *s, struct page *page, in __slab_free() argument
2976 struct page new; in __slab_free()
2984 !free_debug_processing(s, page, head, tail, cnt, addr)) in __slab_free()
2989 spin_unlock_irqrestore(&n->list_lock, flags); in __slab_free()
2992 prior = page->freelist; in __slab_free()
2993 counters = page->counters; in __slab_free()
2997 new.inuse -= cnt; in __slab_free()
3012 n = get_node(s, page_to_nid(page)); in __slab_free()
3021 spin_lock_irqsave(&n->list_lock, flags); in __slab_free()
3026 } while (!cmpxchg_double_slab(s, page, in __slab_free()
3041 * If we just froze the page then put it onto the in __slab_free()
3044 put_cpu_partial(s, page, 1); in __slab_free()
3051 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) in __slab_free()
3059 remove_full(s, n, page); in __slab_free()
3060 add_partial(n, page, DEACTIVATE_TO_TAIL); in __slab_free()
3063 spin_unlock_irqrestore(&n->list_lock, flags); in __slab_free()
3071 remove_partial(n, page); in __slab_free()
3075 remove_full(s, n, page); in __slab_free()
3078 spin_unlock_irqrestore(&n->list_lock, flags); in __slab_free()
3080 discard_slab(s, page); in __slab_free()
3095 * same page) possible by specifying head and tail ptr, plus objects
3099 struct page *page, void *head, void *tail, in do_slab_free() argument
3117 tid = this_cpu_read(s->cpu_slab->tid); in do_slab_free()
3118 c = raw_cpu_ptr(s->cpu_slab); in do_slab_free()
3120 unlikely(tid != READ_ONCE(c->tid))); in do_slab_free()
3125 if (likely(page == c->page)) { in do_slab_free()
3126 void **freelist = READ_ONCE(c->freelist); in do_slab_free()
3131 s->cpu_slab->freelist, s->cpu_slab->tid, in do_slab_free()
3140 __slab_free(s, page, head, tail_obj, cnt, addr); in do_slab_free()
3144 static __always_inline void slab_free(struct kmem_cache *s, struct page *page, in slab_free() argument
3153 do_slab_free(s, page, head, tail, cnt, addr); in slab_free()
3174 struct page *page; member
3184 * page. It builds a detached freelist directly within the given
3185 * page/objects. This can happen without any need for
3194 int build_detached_freelist(struct kmem_cache *s, size_t size, in build_detached_freelist() argument
3200 struct page *page; in build_detached_freelist() local
3202 /* Always re-init detached_freelist */ in build_detached_freelist()
3203 df->page = NULL; in build_detached_freelist()
3206 object = p[--size]; in build_detached_freelist()
3208 } while (!object && size); in build_detached_freelist()
3213 page = virt_to_head_page(object); in build_detached_freelist()
3216 if (unlikely(!PageSlab(page))) { in build_detached_freelist()
3217 BUG_ON(!PageCompound(page)); in build_detached_freelist()
3219 __free_pages(page, compound_order(page)); in build_detached_freelist()
3220 p[size] = NULL; /* mark object processed */ in build_detached_freelist()
3221 return size; in build_detached_freelist()
3224 df->s = page->slab_cache; in build_detached_freelist()
3226 df->s = cache_from_obj(s, object); /* Support for memcg */ in build_detached_freelist()
3230 df->page = page; in build_detached_freelist()
3231 set_freepointer(df->s, object, NULL); in build_detached_freelist()
3232 df->tail = object; in build_detached_freelist()
3233 df->freelist = object; in build_detached_freelist()
3234 p[size] = NULL; /* mark object processed */ in build_detached_freelist()
3235 df->cnt = 1; in build_detached_freelist()
3237 while (size) { in build_detached_freelist()
3238 object = p[--size]; in build_detached_freelist()
3242 /* df->page is always set at this point */ in build_detached_freelist()
3243 if (df->page == virt_to_head_page(object)) { in build_detached_freelist()
3245 set_freepointer(df->s, object, df->freelist); in build_detached_freelist()
3246 df->freelist = object; in build_detached_freelist()
3247 df->cnt++; in build_detached_freelist()
3248 p[size] = NULL; /* mark object processed */ in build_detached_freelist()
3254 if (!--lookahead) in build_detached_freelist()
3258 first_skipped_index = size + 1; in build_detached_freelist()
3265 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) in kmem_cache_free_bulk() argument
3267 if (WARN_ON(!size)) in kmem_cache_free_bulk()
3270 memcg_slab_free_hook(s, p, size); in kmem_cache_free_bulk()
3274 size = build_detached_freelist(s, size, p, &df); in kmem_cache_free_bulk()
3275 if (!df.page) in kmem_cache_free_bulk()
3278 slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_); in kmem_cache_free_bulk()
3279 } while (likely(size)); in kmem_cache_free_bulk()
3284 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, in kmem_cache_alloc_bulk() argument
3292 s = slab_pre_alloc_hook(s, &objcg, size, flags); in kmem_cache_alloc_bulk()
3301 c = this_cpu_ptr(s->cpu_slab); in kmem_cache_alloc_bulk()
3303 for (i = 0; i < size; i++) { in kmem_cache_alloc_bulk()
3304 void *object = c->freelist; in kmem_cache_alloc_bulk()
3308 * We may have removed an object from c->freelist using in kmem_cache_alloc_bulk()
3310 * c->tid has not been bumped yet. in kmem_cache_alloc_bulk()
3312 * allocating memory, we should bump c->tid now. in kmem_cache_alloc_bulk()
3314 c->tid = next_tid(c->tid); in kmem_cache_alloc_bulk()
3317 * Invoking slow path likely have side-effect in kmem_cache_alloc_bulk()
3318 * of re-populating per CPU c->freelist in kmem_cache_alloc_bulk()
3325 c = this_cpu_ptr(s->cpu_slab); in kmem_cache_alloc_bulk()
3328 continue; /* goto for-loop */ in kmem_cache_alloc_bulk()
3330 c->freelist = get_freepointer(s, object); in kmem_cache_alloc_bulk()
3334 c->tid = next_tid(c->tid); in kmem_cache_alloc_bulk()
3342 memset(p[j], 0, s->object_size); in kmem_cache_alloc_bulk()
3346 slab_post_alloc_hook(s, objcg, flags, size, p); in kmem_cache_alloc_bulk()
3359 * offset 0. If we tune the size of the object to the alignment then we can
3381 * Calculate the order of allocation given an slab object size.
3385 * order 0 does not cause fragmentation in the page allocator. Larger objects
3397 * we try to keep the page order as low as possible. So we accept more waste
3398 * of space in favor of a small page order.
3405 static inline unsigned int slab_order(unsigned int size, in slab_order() argument
3412 if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE) in slab_order()
3413 return get_order(size * MAX_OBJS_PER_PAGE) - 1; in slab_order()
3415 for (order = max(min_order, (unsigned int)get_order(min_objects * size)); in slab_order()
3421 rem = slab_size % size; in slab_order()
3430 static inline int calculate_order(unsigned int size) in calculate_order() argument
3447 max_objects = order_objects(slub_max_order, size); in calculate_order()
3455 order = slab_order(size, min_objects, in calculate_order()
3461 min_objects--; in calculate_order()
3468 order = slab_order(size, 1, slub_max_order, 1); in calculate_order()
3475 order = slab_order(size, 1, MAX_ORDER, 1); in calculate_order()
3478 return -ENOSYS; in calculate_order()
3484 n->nr_partial = 0; in init_kmem_cache_node()
3485 spin_lock_init(&n->list_lock); in init_kmem_cache_node()
3486 INIT_LIST_HEAD(&n->partial); in init_kmem_cache_node()
3488 atomic_long_set(&n->nr_slabs, 0); in init_kmem_cache_node()
3489 atomic_long_set(&n->total_objects, 0); in init_kmem_cache_node()
3490 INIT_LIST_HEAD(&n->full); in init_kmem_cache_node()
3503 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), in alloc_kmem_cache_cpus()
3506 if (!s->cpu_slab) in alloc_kmem_cache_cpus()
3527 struct page *page; in early_kmem_cache_node_alloc() local
3530 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node)); in early_kmem_cache_node_alloc()
3532 page = new_slab(kmem_cache_node, GFP_NOWAIT, node); in early_kmem_cache_node_alloc()
3534 BUG_ON(!page); in early_kmem_cache_node_alloc()
3535 if (page_to_nid(page) != node) { in early_kmem_cache_node_alloc()
3540 n = page->freelist; in early_kmem_cache_node_alloc()
3548 page->freelist = get_freepointer(kmem_cache_node, n); in early_kmem_cache_node_alloc()
3549 page->inuse = 1; in early_kmem_cache_node_alloc()
3550 page->frozen = 0; in early_kmem_cache_node_alloc()
3551 kmem_cache_node->node[node] = n; in early_kmem_cache_node_alloc()
3553 inc_slabs_node(kmem_cache_node, node, page->objects); in early_kmem_cache_node_alloc()
3559 __add_partial(n, page, DEACTIVATE_TO_HEAD); in early_kmem_cache_node_alloc()
3568 s->node[node] = NULL; in free_kmem_cache_nodes()
3576 free_percpu(s->cpu_slab); in __kmem_cache_release()
3600 s->node[node] = n; in init_kmem_cache_nodes()
3611 s->min_partial = min; in set_min_partial()
3636 else if (s->size >= PAGE_SIZE) in set_cpu_partial()
3638 else if (s->size >= 1024) in set_cpu_partial()
3640 else if (s->size >= 256) in set_cpu_partial()
3653 slab_flags_t flags = s->flags; in calculate_sizes()
3654 unsigned int size = s->object_size; in calculate_sizes() local
3658 * Round up object size to the next word boundary. We can only in calculate_sizes()
3662 size = ALIGN(size, sizeof(void *)); in calculate_sizes()
3671 !s->ctor) in calculate_sizes()
3672 s->flags |= __OBJECT_POISON; in calculate_sizes()
3674 s->flags &= ~__OBJECT_POISON; in calculate_sizes()
3682 if ((flags & SLAB_RED_ZONE) && size == s->object_size) in calculate_sizes()
3683 size += sizeof(void *); in calculate_sizes()
3690 s->inuse = size; in calculate_sizes()
3693 ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) || in calculate_sizes()
3694 s->ctor) { in calculate_sizes()
3704 * The assumption that s->offset >= s->inuse means free in calculate_sizes()
3709 s->offset = size; in calculate_sizes()
3710 size += sizeof(void *); in calculate_sizes()
3717 s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *)); in calculate_sizes()
3726 size += 2 * sizeof(struct track); in calculate_sizes()
3729 kasan_cache_create(s, &size, &s->flags); in calculate_sizes()
3739 size += sizeof(void *); in calculate_sizes()
3741 s->red_left_pad = sizeof(void *); in calculate_sizes()
3742 s->red_left_pad = ALIGN(s->red_left_pad, s->align); in calculate_sizes()
3743 size += s->red_left_pad; in calculate_sizes()
3749 * offset 0. In order to align the objects we have to simply size in calculate_sizes()
3752 size = ALIGN(size, s->align); in calculate_sizes()
3753 s->size = size; in calculate_sizes()
3754 s->reciprocal_size = reciprocal_value(size); in calculate_sizes()
3758 order = calculate_order(size); in calculate_sizes()
3763 s->allocflags = 0; in calculate_sizes()
3765 s->allocflags |= __GFP_COMP; in calculate_sizes()
3767 if (s->flags & SLAB_CACHE_DMA) in calculate_sizes()
3768 s->allocflags |= GFP_DMA; in calculate_sizes()
3770 if (s->flags & SLAB_CACHE_DMA32) in calculate_sizes()
3771 s->allocflags |= GFP_DMA32; in calculate_sizes()
3773 if (s->flags & SLAB_RECLAIM_ACCOUNT) in calculate_sizes()
3774 s->allocflags |= __GFP_RECLAIMABLE; in calculate_sizes()
3779 s->oo = oo_make(order, size); in calculate_sizes()
3780 s->min = oo_make(get_order(size), size); in calculate_sizes()
3781 if (oo_objects(s->oo) > oo_objects(s->max)) in calculate_sizes()
3782 s->max = s->oo; in calculate_sizes()
3784 return !!oo_objects(s->oo); in calculate_sizes()
3789 s->flags = kmem_cache_flags(s->size, flags, s->name); in kmem_cache_open()
3791 s->random = get_random_long(); in kmem_cache_open()
3794 if (!calculate_sizes(s, -1)) in kmem_cache_open()
3801 if (get_order(s->size) > get_order(s->object_size)) { in kmem_cache_open()
3802 s->flags &= ~DEBUG_METADATA_FLAGS; in kmem_cache_open()
3803 s->offset = 0; in kmem_cache_open()
3804 if (!calculate_sizes(s, -1)) in kmem_cache_open()
3811 if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0) in kmem_cache_open()
3813 s->flags |= __CMPXCHG_DOUBLE; in kmem_cache_open()
3817 * The larger the object size is, the more pages we want on the partial in kmem_cache_open()
3818 * list to avoid pounding the page allocator excessively. in kmem_cache_open()
3820 set_min_partial(s, ilog2(s->size) / 2); in kmem_cache_open()
3825 s->remote_node_defrag_ratio = 1000; in kmem_cache_open()
3828 /* Initialize the pre-computed randomized freelist if slab is up */ in kmem_cache_open()
3842 return -EINVAL; in kmem_cache_open()
3845 static void list_slab_objects(struct kmem_cache *s, struct page *page, in list_slab_objects() argument
3849 void *addr = page_address(page); in list_slab_objects()
3853 slab_err(s, page, text, s->name); in list_slab_objects()
3854 slab_lock(page); in list_slab_objects()
3856 map = get_map(s, page); in list_slab_objects()
3857 for_each_object(p, s, addr, page->objects) { in list_slab_objects()
3860 pr_err("INFO: Object 0x%p @offset=%tu\n", p, p - addr); in list_slab_objects()
3865 slab_unlock(page); in list_slab_objects()
3877 struct page *page, *h; in free_partial() local
3880 spin_lock_irq(&n->list_lock); in free_partial()
3881 list_for_each_entry_safe(page, h, &n->partial, slab_list) { in free_partial()
3882 if (!page->inuse) { in free_partial()
3883 remove_partial(n, page); in free_partial()
3884 list_add(&page->slab_list, &discard); in free_partial()
3886 list_slab_objects(s, page, in free_partial()
3890 spin_unlock_irq(&n->list_lock); in free_partial()
3892 list_for_each_entry_safe(page, h, &discard, slab_list) in free_partial()
3893 discard_slab(s, page); in free_partial()
3902 if (n->nr_partial || slabs_node(s, node)) in __kmem_cache_empty()
3919 if (n->nr_partial || slabs_node(s, node)) in __kmem_cache_shutdown()
3941 slub_max_order = min(slub_max_order, (unsigned int)MAX_ORDER - 1); in setup_slub_max_order()
3957 void *__kmalloc(size_t size, gfp_t flags) in __kmalloc() argument
3962 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) in __kmalloc()
3963 return kmalloc_large(size, flags); in __kmalloc()
3965 s = kmalloc_slab(size, flags); in __kmalloc()
3972 trace_kmalloc(_RET_IP_, ret, size, s->size, flags); in __kmalloc()
3974 ret = kasan_kmalloc(s, ret, size, flags); in __kmalloc()
3981 static void *kmalloc_large_node(size_t size, gfp_t flags, int node) in kmalloc_large_node() argument
3983 struct page *page; in kmalloc_large_node() local
3985 unsigned int order = get_order(size); in kmalloc_large_node()
3988 page = alloc_pages_node(node, flags, order); in kmalloc_large_node()
3989 if (page) { in kmalloc_large_node()
3990 ptr = page_address(page); in kmalloc_large_node()
3991 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, in kmalloc_large_node()
3995 return kmalloc_large_node_hook(ptr, size, flags); in kmalloc_large_node()
3998 void *__kmalloc_node(size_t size, gfp_t flags, int node) in __kmalloc_node() argument
4003 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { in __kmalloc_node()
4004 ret = kmalloc_large_node(size, flags, node); in __kmalloc_node()
4007 size, PAGE_SIZE << get_order(size), in __kmalloc_node()
4013 s = kmalloc_slab(size, flags); in __kmalloc_node()
4020 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); in __kmalloc_node()
4022 ret = kasan_kmalloc(s, ret, size, flags); in __kmalloc_node()
4038 void __check_heap_object(const void *ptr, unsigned long n, struct page *page, in __check_heap_object() argument
4047 /* Find object and usable object size. */ in __check_heap_object()
4048 s = page->slab_cache; in __check_heap_object()
4051 if (ptr < page_address(page)) in __check_heap_object()
4052 usercopy_abort("SLUB object not in SLUB page?!", NULL, in __check_heap_object()
4056 offset = (ptr - page_address(page)) % s->size; in __check_heap_object()
4060 if (offset < s->red_left_pad) in __check_heap_object()
4062 s->name, to_user, offset, n); in __check_heap_object()
4063 offset -= s->red_left_pad; in __check_heap_object()
4067 if (offset >= s->useroffset && in __check_heap_object()
4068 offset - s->useroffset <= s->usersize && in __check_heap_object()
4069 n <= s->useroffset - offset + s->usersize) in __check_heap_object()
4080 offset <= object_size && n <= object_size - offset) { in __check_heap_object()
4081 usercopy_warn("SLUB object", s->name, to_user, offset, n); in __check_heap_object()
4085 usercopy_abort("SLUB object", s->name, to_user, offset, n); in __check_heap_object()
4091 struct page *page; in __ksize() local
4096 page = virt_to_head_page(object); in __ksize()
4098 if (unlikely(!PageSlab(page))) { in __ksize()
4099 WARN_ON(!PageCompound(page)); in __ksize()
4100 return page_size(page); in __ksize()
4103 return slab_ksize(page->slab_cache); in __ksize()
4109 struct page *page; in kfree() local
4117 page = virt_to_head_page(x); in kfree()
4118 if (unlikely(!PageSlab(page))) { in kfree()
4119 unsigned int order = compound_order(page); in kfree()
4121 BUG_ON(!PageCompound(page)); in kfree()
4123 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, in kfree()
4124 -(PAGE_SIZE << order)); in kfree()
4125 __free_pages(page, order); in kfree()
4128 slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_); in kfree()
4148 struct page *page; in __kmem_cache_shrink() local
4149 struct page *t; in __kmem_cache_shrink()
4161 spin_lock_irqsave(&n->list_lock, flags); in __kmem_cache_shrink()
4167 * list_lock. page->inuse here is the upper limit. in __kmem_cache_shrink()
4169 list_for_each_entry_safe(page, t, &n->partial, slab_list) { in __kmem_cache_shrink()
4170 int free = page->objects - page->inuse; in __kmem_cache_shrink()
4172 /* Do not reread page->inuse */ in __kmem_cache_shrink()
4178 if (free == page->objects) { in __kmem_cache_shrink()
4179 list_move(&page->slab_list, &discard); in __kmem_cache_shrink()
4180 n->nr_partial--; in __kmem_cache_shrink()
4182 list_move(&page->slab_list, promote + free - 1); in __kmem_cache_shrink()
4189 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) in __kmem_cache_shrink()
4190 list_splice(promote + i, &n->partial); in __kmem_cache_shrink()
4192 spin_unlock_irqrestore(&n->list_lock, flags); in __kmem_cache_shrink()
4195 list_for_each_entry_safe(page, t, &discard, slab_list) in __kmem_cache_shrink()
4196 discard_slab(s, page); in __kmem_cache_shrink()
4224 offline_node = marg->status_change_nid_normal; in slab_mem_offline_callback()
4238 * if n->nr_slabs > 0, slabs still exist on the node in slab_mem_offline_callback()
4245 s->node[offline_node] = NULL; in slab_mem_offline_callback()
4257 int nid = marg->status_change_nid_normal; in slab_mem_going_online_callback()
4281 ret = -ENOMEM; in slab_mem_going_online_callback()
4285 s->node[nid] = n; in slab_mem_going_online_callback()
4330 * the page allocator. Allocate them properly then fix up the pointers
4340 memcpy(s, static_cache, kmem_cache->object_size); in bootstrap()
4349 struct page *p; in bootstrap()
4351 list_for_each_entry(p, &n->partial, slab_list) in bootstrap()
4352 p->slab_cache = s; in bootstrap()
4355 list_for_each_entry(p, &n->full, slab_list) in bootstrap()
4356 p->slab_cache = s; in bootstrap()
4359 list_add(&s->list, &slab_caches); in bootstrap()
4400 pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n", in kmem_cache_init()
4411 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, in __kmem_cache_alias() argument
4416 s = find_mergeable(size, align, flags, name, ctor); in __kmem_cache_alias()
4418 s->refcount++; in __kmem_cache_alias()
4424 s->object_size = max(s->object_size, size); in __kmem_cache_alias()
4425 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *))); in __kmem_cache_alias()
4428 s->refcount--; in __kmem_cache_alias()
4455 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) in __kmalloc_track_caller() argument
4460 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) in __kmalloc_track_caller()
4461 return kmalloc_large(size, gfpflags); in __kmalloc_track_caller()
4463 s = kmalloc_slab(size, gfpflags); in __kmalloc_track_caller()
4471 trace_kmalloc(caller, ret, size, s->size, gfpflags); in __kmalloc_track_caller()
4478 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, in __kmalloc_node_track_caller() argument
4484 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { in __kmalloc_node_track_caller()
4485 ret = kmalloc_large_node(size, gfpflags, node); in __kmalloc_node_track_caller()
4488 size, PAGE_SIZE << get_order(size), in __kmalloc_node_track_caller()
4494 s = kmalloc_slab(size, gfpflags); in __kmalloc_node_track_caller()
4502 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); in __kmalloc_node_track_caller()
4510 static int count_inuse(struct page *page) in count_inuse() argument
4512 return page->inuse; in count_inuse()
4515 static int count_total(struct page *page) in count_total() argument
4517 return page->objects; in count_total()
4522 static void validate_slab(struct kmem_cache *s, struct page *page) in validate_slab() argument
4525 void *addr = page_address(page); in validate_slab()
4528 slab_lock(page); in validate_slab()
4530 if (!check_slab(s, page) || !on_freelist(s, page, NULL)) in validate_slab()
4534 map = get_map(s, page); in validate_slab()
4535 for_each_object(p, s, addr, page->objects) { in validate_slab()
4539 if (!check_object(s, page, p, val)) in validate_slab()
4544 slab_unlock(page); in validate_slab()
4551 struct page *page; in validate_slab_node() local
4554 spin_lock_irqsave(&n->list_lock, flags); in validate_slab_node()
4556 list_for_each_entry(page, &n->partial, slab_list) { in validate_slab_node()
4557 validate_slab(s, page); in validate_slab_node()
4560 if (count != n->nr_partial) in validate_slab_node()
4562 s->name, count, n->nr_partial); in validate_slab_node()
4564 if (!(s->flags & SLAB_STORE_USER)) in validate_slab_node()
4567 list_for_each_entry(page, &n->full, slab_list) { in validate_slab_node()
4568 validate_slab(s, page); in validate_slab_node()
4571 if (count != atomic_long_read(&n->nr_slabs)) in validate_slab_node()
4573 s->name, count, atomic_long_read(&n->nr_slabs)); in validate_slab_node()
4576 spin_unlock_irqrestore(&n->list_lock, flags); in validate_slab_node()
4617 if (t->max) in free_loc_track()
4618 free_pages((unsigned long)t->loc, in free_loc_track()
4619 get_order(sizeof(struct location) * t->max)); in free_loc_track()
4633 if (t->count) { in alloc_loc_track()
4634 memcpy(l, t->loc, sizeof(struct location) * t->count); in alloc_loc_track()
4637 t->max = max; in alloc_loc_track()
4638 t->loc = l; in alloc_loc_track()
4648 unsigned long age = jiffies - track->when; in add_location()
4650 start = -1; in add_location()
4651 end = t->count; in add_location()
4654 pos = start + (end - start + 1) / 2; in add_location()
4663 caddr = t->loc[pos].addr; in add_location()
4664 if (track->addr == caddr) { in add_location()
4666 l = &t->loc[pos]; in add_location()
4667 l->count++; in add_location()
4668 if (track->when) { in add_location()
4669 l->sum_time += age; in add_location()
4670 if (age < l->min_time) in add_location()
4671 l->min_time = age; in add_location()
4672 if (age > l->max_time) in add_location()
4673 l->max_time = age; in add_location()
4675 if (track->pid < l->min_pid) in add_location()
4676 l->min_pid = track->pid; in add_location()
4677 if (track->pid > l->max_pid) in add_location()
4678 l->max_pid = track->pid; in add_location()
4680 cpumask_set_cpu(track->cpu, in add_location()
4681 to_cpumask(l->cpus)); in add_location()
4683 node_set(page_to_nid(virt_to_page(track)), l->nodes); in add_location()
4687 if (track->addr < caddr) in add_location()
4696 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) in add_location()
4699 l = t->loc + pos; in add_location()
4700 if (pos < t->count) in add_location()
4702 (t->count - pos) * sizeof(struct location)); in add_location()
4703 t->count++; in add_location()
4704 l->count = 1; in add_location()
4705 l->addr = track->addr; in add_location()
4706 l->sum_time = age; in add_location()
4707 l->min_time = age; in add_location()
4708 l->max_time = age; in add_location()
4709 l->min_pid = track->pid; in add_location()
4710 l->max_pid = track->pid; in add_location()
4711 cpumask_clear(to_cpumask(l->cpus)); in add_location()
4712 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); in add_location()
4713 nodes_clear(l->nodes); in add_location()
4714 node_set(page_to_nid(virt_to_page(track)), l->nodes); in add_location()
4719 struct page *page, enum track_item alloc) in process_slab() argument
4721 void *addr = page_address(page); in process_slab()
4725 map = get_map(s, page); in process_slab()
4726 for_each_object(p, s, addr, page->objects) in process_slab()
4750 struct page *page; in list_locations() local
4752 if (!atomic_long_read(&n->nr_slabs)) in list_locations()
4755 spin_lock_irqsave(&n->list_lock, flags); in list_locations()
4756 list_for_each_entry(page, &n->partial, slab_list) in list_locations()
4757 process_slab(&t, s, page, alloc); in list_locations()
4758 list_for_each_entry(page, &n->full, slab_list) in list_locations()
4759 process_slab(&t, s, page, alloc); in list_locations()
4760 spin_unlock_irqrestore(&n->list_lock, flags); in list_locations()
4766 if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100) in list_locations()
4768 len += sprintf(buf + len, "%7ld ", l->count); in list_locations()
4770 if (l->addr) in list_locations()
4771 len += sprintf(buf + len, "%pS", (void *)l->addr); in list_locations()
4773 len += sprintf(buf + len, "<not-available>"); in list_locations()
4775 if (l->sum_time != l->min_time) { in list_locations()
4777 l->min_time, in list_locations()
4778 (long)div_u64(l->sum_time, l->count), in list_locations()
4779 l->max_time); in list_locations()
4782 l->min_time); in list_locations()
4784 if (l->min_pid != l->max_pid) in list_locations()
4785 len += sprintf(buf + len, " pid=%ld-%ld", in list_locations()
4786 l->min_pid, l->max_pid); in list_locations()
4789 l->min_pid); in list_locations()
4792 !cpumask_empty(to_cpumask(l->cpus)) && in list_locations()
4793 len < PAGE_SIZE - 60) in list_locations()
4794 len += scnprintf(buf + len, PAGE_SIZE - len - 50, in list_locations()
4796 cpumask_pr_args(to_cpumask(l->cpus))); in list_locations()
4798 if (nr_online_nodes > 1 && !nodes_empty(l->nodes) && in list_locations()
4799 len < PAGE_SIZE - 60) in list_locations()
4800 len += scnprintf(buf + len, PAGE_SIZE - len - 50, in list_locations()
4802 nodemask_pr_args(&l->nodes)); in list_locations()
4823 pr_err("-----------------------\n"); in resiliency_test()
4828 pr_err("\n1. kmalloc-16: Clobber Redzone/next pointer 0x12->0x%p\n\n", in resiliency_test()
4836 pr_err("\n2. kmalloc-32: Clobber next pointer/next slab 0x34 -> -0x%p\n", in resiliency_test()
4844 pr_err("\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n", in resiliency_test()
4853 pr_err("1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p); in resiliency_test()
4859 pr_err("\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p); in resiliency_test()
4865 pr_err("\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p); in resiliency_test()
4915 return -ENOMEM; in show_slab_objects()
4921 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, in show_slab_objects()
4924 struct page *page; in show_slab_objects() local
4926 page = READ_ONCE(c->page); in show_slab_objects()
4927 if (!page) in show_slab_objects()
4930 node = page_to_nid(page); in show_slab_objects()
4932 x = page->objects; in show_slab_objects()
4934 x = page->inuse; in show_slab_objects()
4941 page = slub_percpu_partial_read_once(c); in show_slab_objects()
4942 if (page) { in show_slab_objects()
4943 node = page_to_nid(page); in show_slab_objects()
4949 x = page->pages; in show_slab_objects()
4960 * mem_hotplug_lock->slab_mutex->kernfs_mutex in show_slab_objects()
4964 * unplug code doesn't destroy the kmem_cache->node[] data. in show_slab_objects()
4974 x = atomic_long_read(&n->total_objects); in show_slab_objects()
4976 x = atomic_long_read(&n->total_objects) - in show_slab_objects()
4979 x = atomic_long_read(&n->nr_slabs); in show_slab_objects()
4995 x = n->nr_partial; in show_slab_objects()
5030 return sprintf(buf, "%u\n", s->size); in slab_size_show()
5036 return sprintf(buf, "%u\n", s->align); in align_show()
5042 return sprintf(buf, "%u\n", s->object_size); in object_size_show()
5048 return sprintf(buf, "%u\n", oo_objects(s->oo)); in objs_per_slab_show()
5054 return sprintf(buf, "%u\n", oo_order(s->oo)); in order_show()
5060 return sprintf(buf, "%lu\n", s->min_partial); in min_partial_show()
5093 return -EINVAL; in cpu_partial_store()
5103 if (!s->ctor) in ctor_show()
5105 return sprintf(buf, "%pS\n", s->ctor); in ctor_show()
5111 return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1); in aliases_show()
5147 struct page *page; in slabs_cpu_partial_show() local
5149 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); in slabs_cpu_partial_show()
5151 if (page) { in slabs_cpu_partial_show()
5152 pages += page->pages; in slabs_cpu_partial_show()
5153 objects += page->pobjects; in slabs_cpu_partial_show()
5161 struct page *page; in slabs_cpu_partial_show() local
5163 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); in slabs_cpu_partial_show()
5165 if (page && len < PAGE_SIZE - 20) in slabs_cpu_partial_show()
5167 page->pobjects, page->pages); in slabs_cpu_partial_show()
5176 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); in reclaim_account_show()
5182 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); in hwcache_align_show()
5189 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); in cache_dma_show()
5196 return sprintf(buf, "%u\n", s->usersize); in usersize_show()
5202 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU)); in destroy_by_rcu_show()
5221 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS)); in sanity_checks_show()
5227 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE)); in trace_show()
5233 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); in red_zone_show()
5240 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON)); in poison_show()
5247 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); in store_user_show()
5260 int ret = -EINVAL; in validate_store()
5273 if (!(s->flags & SLAB_STORE_USER)) in alloc_calls_show()
5274 return -ENOSYS; in alloc_calls_show()
5281 if (!(s->flags & SLAB_STORE_USER)) in free_calls_show()
5282 return -ENOSYS; in free_calls_show()
5291 return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); in failslab_show()
5307 return -EINVAL; in shrink_store()
5315 return sprintf(buf, "%u\n", s->remote_node_defrag_ratio / 10); in remote_node_defrag_ratio_show()
5328 return -ERANGE; in remote_node_defrag_ratio_store()
5330 s->remote_node_defrag_ratio = ratio * 10; in remote_node_defrag_ratio_store()
5346 return -ENOMEM; in show_stat()
5349 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; in show_stat()
5359 if (data[cpu] && len < PAGE_SIZE - 20) in show_stat()
5372 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; in clear_stat()
5384 return -EINVAL; \
5506 if (!attribute->show) in slab_attr_show()
5507 return -EIO; in slab_attr_show()
5509 err = attribute->show(s, buf); in slab_attr_show()
5525 if (!attribute->store) in slab_attr_store()
5526 return -EIO; in slab_attr_store()
5528 err = attribute->store(s, buf, len); in slab_attr_store()
5558 * Format :[flags-]size
5575 if (s->flags & SLAB_CACHE_DMA) in create_unique_id()
5577 if (s->flags & SLAB_CACHE_DMA32) in create_unique_id()
5579 if (s->flags & SLAB_RECLAIM_ACCOUNT) in create_unique_id()
5581 if (s->flags & SLAB_CONSISTENCY_CHECKS) in create_unique_id()
5583 if (s->flags & SLAB_ACCOUNT) in create_unique_id()
5586 *p++ = '-'; in create_unique_id()
5587 p += sprintf(p, "%07u", s->size); in create_unique_id()
5589 BUG_ON(p > name + ID_STR_LENGTH - 1); in create_unique_id()
5601 kobject_init(&s->kobj, &slab_ktype); in sysfs_slab_add()
5615 sysfs_remove_link(&slab_kset->kobj, s->name); in sysfs_slab_add()
5616 name = s->name; in sysfs_slab_add()
5625 s->kobj.kset = kset; in sysfs_slab_add()
5626 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); in sysfs_slab_add()
5630 err = sysfs_create_group(&s->kobj, &slab_attr_group); in sysfs_slab_add()
5636 sysfs_slab_alias(s, s->name); in sysfs_slab_add()
5643 kobject_del(&s->kobj); in sysfs_slab_add()
5650 kobject_del(&s->kobj); in sysfs_slab_unlink()
5656 kobject_put(&s->kobj); in sysfs_slab_release()
5679 sysfs_remove_link(&slab_kset->kobj, name); in sysfs_slab_alias()
5680 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); in sysfs_slab_alias()
5685 return -ENOMEM; in sysfs_slab_alias()
5687 al->s = s; in sysfs_slab_alias()
5688 al->name = name; in sysfs_slab_alias()
5689 al->next = alias_list; in sysfs_slab_alias()
5705 return -ENOSYS; in slab_sysfs_init()
5714 s->name); in slab_sysfs_init()
5720 alias_list = alias_list->next; in slab_sysfs_init()
5721 err = sysfs_slab_alias(al->s, al->name); in slab_sysfs_init()
5724 al->name); in slab_sysfs_init()
5754 sinfo->active_objs = nr_objs - nr_free; in get_slabinfo()
5755 sinfo->num_objs = nr_objs; in get_slabinfo()
5756 sinfo->active_slabs = nr_slabs; in get_slabinfo()
5757 sinfo->num_slabs = nr_slabs; in get_slabinfo()
5758 sinfo->objects_per_slab = oo_objects(s->oo); in get_slabinfo()
5759 sinfo->cache_order = oo_order(s->oo); in get_slabinfo()
5769 return -EIO; in slabinfo_write()