1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * SLUB: A slab allocator that limits cache line use instead of queuing
4 * objects in per cpu and per node lists.
5 *
6 * The allocator synchronizes using per slab locks or atomic operatios
7 * and only uses a centralized lock to manage a pool of partial slabs.
8 *
9 * (C) 2007 SGI, Christoph Lameter
10 * (C) 2011 Linux Foundation, Christoph Lameter
11 */
12
13 #include <linux/mm.h>
14 #include <linux/swap.h> /* struct reclaim_state */
15 #include <linux/module.h>
16 #include <linux/bit_spinlock.h>
17 #include <linux/interrupt.h>
18 #include <linux/swab.h>
19 #include <linux/bitops.h>
20 #include <linux/slab.h>
21 #include "slab.h"
22 #include <linux/proc_fs.h>
23 #include <linux/seq_file.h>
24 #include <linux/kasan.h>
25 #include <linux/cpu.h>
26 #include <linux/cpuset.h>
27 #include <linux/mempolicy.h>
28 #include <linux/ctype.h>
29 #include <linux/debugobjects.h>
30 #include <linux/kallsyms.h>
31 #include <linux/memory.h>
32 #include <linux/math64.h>
33 #include <linux/fault-inject.h>
34 #include <linux/stacktrace.h>
35 #include <linux/prefetch.h>
36 #include <linux/memcontrol.h>
37 #include <linux/random.h>
38
39 #include <trace/events/kmem.h>
40
41 #include "internal.h"
42
43 /*
44 * Lock order:
45 * 1. slab_mutex (Global Mutex)
46 * 2. node->list_lock
47 * 3. slab_lock(page) (Only on some arches and for debugging)
48 *
49 * slab_mutex
50 *
51 * The role of the slab_mutex is to protect the list of all the slabs
52 * and to synchronize major metadata changes to slab cache structures.
53 *
54 * The slab_lock is only used for debugging and on arches that do not
55 * have the ability to do a cmpxchg_double. It only protects:
56 * A. page->freelist -> List of object free in a page
57 * B. page->inuse -> Number of objects in use
58 * C. page->objects -> Number of objects in page
59 * D. page->frozen -> frozen state
60 *
61 * If a slab is frozen then it is exempt from list management. It is not
62 * on any list except per cpu partial list. The processor that froze the
63 * slab is the one who can perform list operations on the page. Other
64 * processors may put objects onto the freelist but the processor that
65 * froze the slab is the only one that can retrieve the objects from the
66 * page's freelist.
67 *
68 * The list_lock protects the partial and full list on each node and
69 * the partial slab counter. If taken then no new slabs may be added or
70 * removed from the lists nor make the number of partial slabs be modified.
71 * (Note that the total number of slabs is an atomic value that may be
72 * modified without taking the list lock).
73 *
74 * The list_lock is a centralized lock and thus we avoid taking it as
75 * much as possible. As long as SLUB does not have to handle partial
76 * slabs, operations can continue without any centralized lock. F.e.
77 * allocating a long series of objects that fill up slabs does not require
78 * the list lock.
79 * Interrupts are disabled during allocation and deallocation in order to
80 * make the slab allocator safe to use in the context of an irq. In addition
81 * interrupts are disabled to ensure that the processor does not change
82 * while handling per_cpu slabs, due to kernel preemption.
83 *
84 * SLUB assigns one slab for allocation to each processor.
85 * Allocations only occur from these slabs called cpu slabs.
86 *
87 * Slabs with free elements are kept on a partial list and during regular
88 * operations no list for full slabs is used. If an object in a full slab is
89 * freed then the slab will show up again on the partial lists.
90 * We track full slabs for debugging purposes though because otherwise we
91 * cannot scan all objects.
92 *
93 * Slabs are freed when they become empty. Teardown and setup is
94 * minimal so we rely on the page allocators per cpu caches for
95 * fast frees and allocs.
96 *
97 * Overloading of page flags that are otherwise used for LRU management.
98 *
99 * PageActive The slab is frozen and exempt from list processing.
100 * This means that the slab is dedicated to a purpose
101 * such as satisfying allocations for a specific
102 * processor. Objects may be freed in the slab while
103 * it is frozen but slab_free will then skip the usual
104 * list operations. It is up to the processor holding
105 * the slab to integrate the slab into the slab lists
106 * when the slab is no longer needed.
107 *
108 * One use of this flag is to mark slabs that are
109 * used for allocations. Then such a slab becomes a cpu
110 * slab. The cpu slab may be equipped with an additional
111 * freelist that allows lockless access to
112 * free objects in addition to the regular freelist
113 * that requires the slab lock.
114 *
115 * PageError Slab requires special handling due to debug
116 * options set. This moves slab handling out of
117 * the fast path and disables lockless freelists.
118 */
119
kmem_cache_debug(struct kmem_cache * s)120 static inline int kmem_cache_debug(struct kmem_cache *s)
121 {
122 #ifdef CONFIG_SLUB_DEBUG
123 return unlikely(s->flags & SLAB_DEBUG_FLAGS);
124 #else
125 return 0;
126 #endif
127 }
128
fixup_red_left(struct kmem_cache * s,void * p)129 void *fixup_red_left(struct kmem_cache *s, void *p)
130 {
131 if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
132 p += s->red_left_pad;
133
134 return p;
135 }
136
kmem_cache_has_cpu_partial(struct kmem_cache * s)137 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
138 {
139 #ifdef CONFIG_SLUB_CPU_PARTIAL
140 return !kmem_cache_debug(s);
141 #else
142 return false;
143 #endif
144 }
145
146 /*
147 * Issues still to be resolved:
148 *
149 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
150 *
151 * - Variable sizing of the per node arrays
152 */
153
154 /* Enable to test recovery from slab corruption on boot */
155 #undef SLUB_RESILIENCY_TEST
156
157 /* Enable to log cmpxchg failures */
158 #undef SLUB_DEBUG_CMPXCHG
159
160 /*
161 * Mininum number of partial slabs. These will be left on the partial
162 * lists even if they are empty. kmem_cache_shrink may reclaim them.
163 */
164 #define MIN_PARTIAL 5
165
166 /*
167 * Maximum number of desirable partial slabs.
168 * The existence of more partial slabs makes kmem_cache_shrink
169 * sort the partial list by the number of objects in use.
170 */
171 #define MAX_PARTIAL 10
172
173 #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
174 SLAB_POISON | SLAB_STORE_USER)
175
176 /*
177 * These debug flags cannot use CMPXCHG because there might be consistency
178 * issues when checking or reading debug information
179 */
180 #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
181 SLAB_TRACE)
182
183
184 /*
185 * Debugging flags that require metadata to be stored in the slab. These get
186 * disabled when slub_debug=O is used and a cache's min order increases with
187 * metadata.
188 */
189 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
190
191 #define OO_SHIFT 16
192 #define OO_MASK ((1 << OO_SHIFT) - 1)
193 #define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
194
195 /* Internal SLUB flags */
196 /* Poison object */
197 #define __OBJECT_POISON ((slab_flags_t __force)0x80000000U)
198 /* Use cmpxchg_double */
199 #define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U)
200
201 /*
202 * Tracking user of a slab.
203 */
204 #define TRACK_ADDRS_COUNT 16
205 struct track {
206 unsigned long addr; /* Called from address */
207 #ifdef CONFIG_STACKTRACE
208 unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */
209 #endif
210 int cpu; /* Was running on cpu */
211 int pid; /* Pid context */
212 unsigned long when; /* When did the operation occur */
213 };
214
215 enum track_item { TRACK_ALLOC, TRACK_FREE };
216
217 #ifdef CONFIG_SYSFS
218 static int sysfs_slab_add(struct kmem_cache *);
219 static int sysfs_slab_alias(struct kmem_cache *, const char *);
220 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
221 static void sysfs_slab_remove(struct kmem_cache *s);
222 #else
sysfs_slab_add(struct kmem_cache * s)223 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
sysfs_slab_alias(struct kmem_cache * s,const char * p)224 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
225 { return 0; }
memcg_propagate_slab_attrs(struct kmem_cache * s)226 static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
sysfs_slab_remove(struct kmem_cache * s)227 static inline void sysfs_slab_remove(struct kmem_cache *s) { }
228 #endif
229
stat(const struct kmem_cache * s,enum stat_item si)230 static inline void stat(const struct kmem_cache *s, enum stat_item si)
231 {
232 #ifdef CONFIG_SLUB_STATS
233 /*
234 * The rmw is racy on a preemptible kernel but this is acceptable, so
235 * avoid this_cpu_add()'s irq-disable overhead.
236 */
237 raw_cpu_inc(s->cpu_slab->stat[si]);
238 #endif
239 }
240
241 /********************************************************************
242 * Core slab cache functions
243 *******************************************************************/
244
245 /*
246 * Returns freelist pointer (ptr). With hardening, this is obfuscated
247 * with an XOR of the address where the pointer is held and a per-cache
248 * random number.
249 */
freelist_ptr(const struct kmem_cache * s,void * ptr,unsigned long ptr_addr)250 static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
251 unsigned long ptr_addr)
252 {
253 #ifdef CONFIG_SLAB_FREELIST_HARDENED
254 /*
255 * When CONFIG_KASAN_SW_TAGS is enabled, ptr_addr might be tagged.
256 * Normally, this doesn't cause any issues, as both set_freepointer()
257 * and get_freepointer() are called with a pointer with the same tag.
258 * However, there are some issues with CONFIG_SLUB_DEBUG code. For
259 * example, when __free_slub() iterates over objects in a cache, it
260 * passes untagged pointers to check_object(). check_object() in turns
261 * calls get_freepointer() with an untagged pointer, which causes the
262 * freepointer to be restored incorrectly.
263 */
264 return (void *)((unsigned long)ptr ^ s->random ^
265 swab((unsigned long)kasan_reset_tag((void *)ptr_addr)));
266 #else
267 return ptr;
268 #endif
269 }
270
271 /* Returns the freelist pointer recorded at location ptr_addr. */
freelist_dereference(const struct kmem_cache * s,void * ptr_addr)272 static inline void *freelist_dereference(const struct kmem_cache *s,
273 void *ptr_addr)
274 {
275 return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr),
276 (unsigned long)ptr_addr);
277 }
278
get_freepointer(struct kmem_cache * s,void * object)279 static inline void *get_freepointer(struct kmem_cache *s, void *object)
280 {
281 return freelist_dereference(s, object + s->offset);
282 }
283
prefetch_freepointer(const struct kmem_cache * s,void * object)284 static void prefetch_freepointer(const struct kmem_cache *s, void *object)
285 {
286 prefetch(object + s->offset);
287 }
288
get_freepointer_safe(struct kmem_cache * s,void * object)289 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
290 {
291 unsigned long freepointer_addr;
292 void *p;
293
294 if (!debug_pagealloc_enabled_static())
295 return get_freepointer(s, object);
296
297 freepointer_addr = (unsigned long)object + s->offset;
298 probe_kernel_read(&p, (void **)freepointer_addr, sizeof(p));
299 return freelist_ptr(s, p, freepointer_addr);
300 }
301
set_freepointer(struct kmem_cache * s,void * object,void * fp)302 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
303 {
304 unsigned long freeptr_addr = (unsigned long)object + s->offset;
305
306 #ifdef CONFIG_SLAB_FREELIST_HARDENED
307 BUG_ON(object == fp); /* naive detection of double free or corruption */
308 #endif
309
310 *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
311 }
312
313 /* Loop over all objects in a slab */
314 #define for_each_object(__p, __s, __addr, __objects) \
315 for (__p = fixup_red_left(__s, __addr); \
316 __p < (__addr) + (__objects) * (__s)->size; \
317 __p += (__s)->size)
318
319 /* Determine object index from a given position */
slab_index(void * p,struct kmem_cache * s,void * addr)320 static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr)
321 {
322 return (kasan_reset_tag(p) - addr) / s->size;
323 }
324
order_objects(unsigned int order,unsigned int size)325 static inline unsigned int order_objects(unsigned int order, unsigned int size)
326 {
327 return ((unsigned int)PAGE_SIZE << order) / size;
328 }
329
oo_make(unsigned int order,unsigned int size)330 static inline struct kmem_cache_order_objects oo_make(unsigned int order,
331 unsigned int size)
332 {
333 struct kmem_cache_order_objects x = {
334 (order << OO_SHIFT) + order_objects(order, size)
335 };
336
337 return x;
338 }
339
oo_order(struct kmem_cache_order_objects x)340 static inline unsigned int oo_order(struct kmem_cache_order_objects x)
341 {
342 return x.x >> OO_SHIFT;
343 }
344
oo_objects(struct kmem_cache_order_objects x)345 static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
346 {
347 return x.x & OO_MASK;
348 }
349
350 /*
351 * Per slab locking using the pagelock
352 */
slab_lock(struct page * page)353 static __always_inline void slab_lock(struct page *page)
354 {
355 VM_BUG_ON_PAGE(PageTail(page), page);
356 bit_spin_lock(PG_locked, &page->flags);
357 }
358
slab_unlock(struct page * page)359 static __always_inline void slab_unlock(struct page *page)
360 {
361 VM_BUG_ON_PAGE(PageTail(page), page);
362 __bit_spin_unlock(PG_locked, &page->flags);
363 }
364
365 /* Interrupts must be disabled (for the fallback code to work right) */
__cmpxchg_double_slab(struct kmem_cache * s,struct page * page,void * freelist_old,unsigned long counters_old,void * freelist_new,unsigned long counters_new,const char * n)366 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
367 void *freelist_old, unsigned long counters_old,
368 void *freelist_new, unsigned long counters_new,
369 const char *n)
370 {
371 VM_BUG_ON(!irqs_disabled());
372 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
373 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
374 if (s->flags & __CMPXCHG_DOUBLE) {
375 if (cmpxchg_double(&page->freelist, &page->counters,
376 freelist_old, counters_old,
377 freelist_new, counters_new))
378 return true;
379 } else
380 #endif
381 {
382 slab_lock(page);
383 if (page->freelist == freelist_old &&
384 page->counters == counters_old) {
385 page->freelist = freelist_new;
386 page->counters = counters_new;
387 slab_unlock(page);
388 return true;
389 }
390 slab_unlock(page);
391 }
392
393 cpu_relax();
394 stat(s, CMPXCHG_DOUBLE_FAIL);
395
396 #ifdef SLUB_DEBUG_CMPXCHG
397 pr_info("%s %s: cmpxchg double redo ", n, s->name);
398 #endif
399
400 return false;
401 }
402
cmpxchg_double_slab(struct kmem_cache * s,struct page * page,void * freelist_old,unsigned long counters_old,void * freelist_new,unsigned long counters_new,const char * n)403 static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
404 void *freelist_old, unsigned long counters_old,
405 void *freelist_new, unsigned long counters_new,
406 const char *n)
407 {
408 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
409 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
410 if (s->flags & __CMPXCHG_DOUBLE) {
411 if (cmpxchg_double(&page->freelist, &page->counters,
412 freelist_old, counters_old,
413 freelist_new, counters_new))
414 return true;
415 } else
416 #endif
417 {
418 unsigned long flags;
419
420 local_irq_save(flags);
421 slab_lock(page);
422 if (page->freelist == freelist_old &&
423 page->counters == counters_old) {
424 page->freelist = freelist_new;
425 page->counters = counters_new;
426 slab_unlock(page);
427 local_irq_restore(flags);
428 return true;
429 }
430 slab_unlock(page);
431 local_irq_restore(flags);
432 }
433
434 cpu_relax();
435 stat(s, CMPXCHG_DOUBLE_FAIL);
436
437 #ifdef SLUB_DEBUG_CMPXCHG
438 pr_info("%s %s: cmpxchg double redo ", n, s->name);
439 #endif
440
441 return false;
442 }
443
444 #ifdef CONFIG_SLUB_DEBUG
445 /*
446 * Determine a map of object in use on a page.
447 *
448 * Node listlock must be held to guarantee that the page does
449 * not vanish from under us.
450 */
get_map(struct kmem_cache * s,struct page * page,unsigned long * map)451 static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
452 {
453 void *p;
454 void *addr = page_address(page);
455
456 for (p = page->freelist; p; p = get_freepointer(s, p))
457 set_bit(slab_index(p, s, addr), map);
458 }
459
size_from_object(struct kmem_cache * s)460 static inline unsigned int size_from_object(struct kmem_cache *s)
461 {
462 if (s->flags & SLAB_RED_ZONE)
463 return s->size - s->red_left_pad;
464
465 return s->size;
466 }
467
restore_red_left(struct kmem_cache * s,void * p)468 static inline void *restore_red_left(struct kmem_cache *s, void *p)
469 {
470 if (s->flags & SLAB_RED_ZONE)
471 p -= s->red_left_pad;
472
473 return p;
474 }
475
476 /*
477 * Debug settings:
478 */
479 #if defined(CONFIG_SLUB_DEBUG_ON)
480 static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
481 #else
482 static slab_flags_t slub_debug;
483 #endif
484
485 static char *slub_debug_slabs;
486 static int disable_higher_order_debug;
487
488 /*
489 * slub is about to manipulate internal object metadata. This memory lies
490 * outside the range of the allocated object, so accessing it would normally
491 * be reported by kasan as a bounds error. metadata_access_enable() is used
492 * to tell kasan that these accesses are OK.
493 */
metadata_access_enable(void)494 static inline void metadata_access_enable(void)
495 {
496 kasan_disable_current();
497 }
498
metadata_access_disable(void)499 static inline void metadata_access_disable(void)
500 {
501 kasan_enable_current();
502 }
503
504 /*
505 * Object debugging
506 */
507
508 /* Verify that a pointer has an address that is valid within a slab page */
check_valid_pointer(struct kmem_cache * s,struct page * page,void * object)509 static inline int check_valid_pointer(struct kmem_cache *s,
510 struct page *page, void *object)
511 {
512 void *base;
513
514 if (!object)
515 return 1;
516
517 base = page_address(page);
518 object = kasan_reset_tag(object);
519 object = restore_red_left(s, object);
520 if (object < base || object >= base + page->objects * s->size ||
521 (object - base) % s->size) {
522 return 0;
523 }
524
525 return 1;
526 }
527
print_section(char * level,char * text,u8 * addr,unsigned int length)528 static void print_section(char *level, char *text, u8 *addr,
529 unsigned int length)
530 {
531 metadata_access_enable();
532 print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
533 length, 1);
534 metadata_access_disable();
535 }
536
537 /*
538 * See comment in calculate_sizes().
539 */
freeptr_outside_object(struct kmem_cache * s)540 static inline bool freeptr_outside_object(struct kmem_cache *s)
541 {
542 return s->offset >= s->inuse;
543 }
544
545 /*
546 * Return offset of the end of info block which is inuse + free pointer if
547 * not overlapping with object.
548 */
get_info_end(struct kmem_cache * s)549 static inline unsigned int get_info_end(struct kmem_cache *s)
550 {
551 if (freeptr_outside_object(s))
552 return s->inuse + sizeof(void *);
553 else
554 return s->inuse;
555 }
556
get_track(struct kmem_cache * s,void * object,enum track_item alloc)557 static struct track *get_track(struct kmem_cache *s, void *object,
558 enum track_item alloc)
559 {
560 struct track *p;
561
562 p = object + get_info_end(s);
563
564 return p + alloc;
565 }
566
set_track(struct kmem_cache * s,void * object,enum track_item alloc,unsigned long addr)567 static void set_track(struct kmem_cache *s, void *object,
568 enum track_item alloc, unsigned long addr)
569 {
570 struct track *p = get_track(s, object, alloc);
571
572 if (addr) {
573 #ifdef CONFIG_STACKTRACE
574 unsigned int nr_entries;
575
576 metadata_access_enable();
577 nr_entries = stack_trace_save(p->addrs, TRACK_ADDRS_COUNT, 3);
578 metadata_access_disable();
579
580 if (nr_entries < TRACK_ADDRS_COUNT)
581 p->addrs[nr_entries] = 0;
582 #endif
583 p->addr = addr;
584 p->cpu = smp_processor_id();
585 p->pid = current->pid;
586 p->when = jiffies;
587 } else {
588 memset(p, 0, sizeof(struct track));
589 }
590 }
591
init_tracking(struct kmem_cache * s,void * object)592 static void init_tracking(struct kmem_cache *s, void *object)
593 {
594 if (!(s->flags & SLAB_STORE_USER))
595 return;
596
597 set_track(s, object, TRACK_FREE, 0UL);
598 set_track(s, object, TRACK_ALLOC, 0UL);
599 }
600
print_track(const char * s,struct track * t,unsigned long pr_time)601 static void print_track(const char *s, struct track *t, unsigned long pr_time)
602 {
603 if (!t->addr)
604 return;
605
606 pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
607 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
608 #ifdef CONFIG_STACKTRACE
609 {
610 int i;
611 for (i = 0; i < TRACK_ADDRS_COUNT; i++)
612 if (t->addrs[i])
613 pr_err("\t%pS\n", (void *)t->addrs[i]);
614 else
615 break;
616 }
617 #endif
618 }
619
print_tracking(struct kmem_cache * s,void * object)620 static void print_tracking(struct kmem_cache *s, void *object)
621 {
622 unsigned long pr_time = jiffies;
623 if (!(s->flags & SLAB_STORE_USER))
624 return;
625
626 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time);
627 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time);
628 }
629
print_page_info(struct page * page)630 static void print_page_info(struct page *page)
631 {
632 pr_err("INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
633 page, page->objects, page->inuse, page->freelist, page->flags);
634
635 }
636
slab_bug(struct kmem_cache * s,char * fmt,...)637 static void slab_bug(struct kmem_cache *s, char *fmt, ...)
638 {
639 struct va_format vaf;
640 va_list args;
641
642 va_start(args, fmt);
643 vaf.fmt = fmt;
644 vaf.va = &args;
645 pr_err("=============================================================================\n");
646 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
647 pr_err("-----------------------------------------------------------------------------\n\n");
648
649 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
650 va_end(args);
651 }
652
slab_fix(struct kmem_cache * s,char * fmt,...)653 static void slab_fix(struct kmem_cache *s, char *fmt, ...)
654 {
655 struct va_format vaf;
656 va_list args;
657
658 va_start(args, fmt);
659 vaf.fmt = fmt;
660 vaf.va = &args;
661 pr_err("FIX %s: %pV\n", s->name, &vaf);
662 va_end(args);
663 }
664
freelist_corrupted(struct kmem_cache * s,struct page * page,void ** freelist,void * nextfree)665 static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
666 void **freelist, void *nextfree)
667 {
668 if ((s->flags & SLAB_CONSISTENCY_CHECKS) &&
669 !check_valid_pointer(s, page, nextfree) && freelist) {
670 object_err(s, page, *freelist, "Freechain corrupt");
671 *freelist = NULL;
672 slab_fix(s, "Isolate corrupted freechain");
673 return true;
674 }
675
676 return false;
677 }
678
print_trailer(struct kmem_cache * s,struct page * page,u8 * p)679 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
680 {
681 unsigned int off; /* Offset of last byte */
682 u8 *addr = page_address(page);
683
684 print_tracking(s, p);
685
686 print_page_info(page);
687
688 pr_err("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
689 p, p - addr, get_freepointer(s, p));
690
691 if (s->flags & SLAB_RED_ZONE)
692 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
693 s->red_left_pad);
694 else if (p > addr + 16)
695 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
696
697 print_section(KERN_ERR, "Object ", p,
698 min_t(unsigned int, s->object_size, PAGE_SIZE));
699 if (s->flags & SLAB_RED_ZONE)
700 print_section(KERN_ERR, "Redzone ", p + s->object_size,
701 s->inuse - s->object_size);
702
703 off = get_info_end(s);
704
705 if (s->flags & SLAB_STORE_USER)
706 off += 2 * sizeof(struct track);
707
708 off += kasan_metadata_size(s);
709
710 if (off != size_from_object(s))
711 /* Beginning of the filler is the free pointer */
712 print_section(KERN_ERR, "Padding ", p + off,
713 size_from_object(s) - off);
714
715 dump_stack();
716 }
717
object_err(struct kmem_cache * s,struct page * page,u8 * object,char * reason)718 void object_err(struct kmem_cache *s, struct page *page,
719 u8 *object, char *reason)
720 {
721 slab_bug(s, "%s", reason);
722 print_trailer(s, page, object);
723 }
724
slab_err(struct kmem_cache * s,struct page * page,const char * fmt,...)725 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
726 const char *fmt, ...)
727 {
728 va_list args;
729 char buf[100];
730
731 va_start(args, fmt);
732 vsnprintf(buf, sizeof(buf), fmt, args);
733 va_end(args);
734 slab_bug(s, "%s", buf);
735 print_page_info(page);
736 dump_stack();
737 }
738
init_object(struct kmem_cache * s,void * object,u8 val)739 static void init_object(struct kmem_cache *s, void *object, u8 val)
740 {
741 u8 *p = object;
742
743 if (s->flags & SLAB_RED_ZONE)
744 memset(p - s->red_left_pad, val, s->red_left_pad);
745
746 if (s->flags & __OBJECT_POISON) {
747 memset(p, POISON_FREE, s->object_size - 1);
748 p[s->object_size - 1] = POISON_END;
749 }
750
751 if (s->flags & SLAB_RED_ZONE)
752 memset(p + s->object_size, val, s->inuse - s->object_size);
753 }
754
restore_bytes(struct kmem_cache * s,char * message,u8 data,void * from,void * to)755 static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
756 void *from, void *to)
757 {
758 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
759 memset(from, data, to - from);
760 }
761
check_bytes_and_report(struct kmem_cache * s,struct page * page,u8 * object,char * what,u8 * start,unsigned int value,unsigned int bytes)762 static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
763 u8 *object, char *what,
764 u8 *start, unsigned int value, unsigned int bytes)
765 {
766 u8 *fault;
767 u8 *end;
768
769 metadata_access_enable();
770 fault = memchr_inv(start, value, bytes);
771 metadata_access_disable();
772 if (!fault)
773 return 1;
774
775 end = start + bytes;
776 while (end > fault && end[-1] == value)
777 end--;
778
779 slab_bug(s, "%s overwritten", what);
780 pr_err("INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
781 fault, end - 1, fault[0], value);
782 print_trailer(s, page, object);
783
784 restore_bytes(s, what, value, fault, end);
785 return 0;
786 }
787
788 /*
789 * Object layout:
790 *
791 * object address
792 * Bytes of the object to be managed.
793 * If the freepointer may overlay the object then the free
794 * pointer is at the middle of the object.
795 *
796 * Poisoning uses 0x6b (POISON_FREE) and the last byte is
797 * 0xa5 (POISON_END)
798 *
799 * object + s->object_size
800 * Padding to reach word boundary. This is also used for Redzoning.
801 * Padding is extended by another word if Redzoning is enabled and
802 * object_size == inuse.
803 *
804 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
805 * 0xcc (RED_ACTIVE) for objects in use.
806 *
807 * object + s->inuse
808 * Meta data starts here.
809 *
810 * A. Free pointer (if we cannot overwrite object on free)
811 * B. Tracking data for SLAB_STORE_USER
812 * C. Padding to reach required alignment boundary or at mininum
813 * one word if debugging is on to be able to detect writes
814 * before the word boundary.
815 *
816 * Padding is done using 0x5a (POISON_INUSE)
817 *
818 * object + s->size
819 * Nothing is used beyond s->size.
820 *
821 * If slabcaches are merged then the object_size and inuse boundaries are mostly
822 * ignored. And therefore no slab options that rely on these boundaries
823 * may be used with merged slabcaches.
824 */
825
check_pad_bytes(struct kmem_cache * s,struct page * page,u8 * p)826 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
827 {
828 unsigned long off = get_info_end(s); /* The end of info */
829
830 if (s->flags & SLAB_STORE_USER)
831 /* We also have user information there */
832 off += 2 * sizeof(struct track);
833
834 off += kasan_metadata_size(s);
835
836 if (size_from_object(s) == off)
837 return 1;
838
839 return check_bytes_and_report(s, page, p, "Object padding",
840 p + off, POISON_INUSE, size_from_object(s) - off);
841 }
842
843 /* Check the pad bytes at the end of a slab page */
slab_pad_check(struct kmem_cache * s,struct page * page)844 static int slab_pad_check(struct kmem_cache *s, struct page *page)
845 {
846 u8 *start;
847 u8 *fault;
848 u8 *end;
849 u8 *pad;
850 int length;
851 int remainder;
852
853 if (!(s->flags & SLAB_POISON))
854 return 1;
855
856 start = page_address(page);
857 length = page_size(page);
858 end = start + length;
859 remainder = length % s->size;
860 if (!remainder)
861 return 1;
862
863 pad = end - remainder;
864 metadata_access_enable();
865 fault = memchr_inv(pad, POISON_INUSE, remainder);
866 metadata_access_disable();
867 if (!fault)
868 return 1;
869 while (end > fault && end[-1] == POISON_INUSE)
870 end--;
871
872 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
873 print_section(KERN_ERR, "Padding ", pad, remainder);
874
875 restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
876 return 0;
877 }
878
check_object(struct kmem_cache * s,struct page * page,void * object,u8 val)879 static int check_object(struct kmem_cache *s, struct page *page,
880 void *object, u8 val)
881 {
882 u8 *p = object;
883 u8 *endobject = object + s->object_size;
884
885 if (s->flags & SLAB_RED_ZONE) {
886 if (!check_bytes_and_report(s, page, object, "Left Redzone",
887 object - s->red_left_pad, val, s->red_left_pad))
888 return 0;
889
890 if (!check_bytes_and_report(s, page, object, "Right Redzone",
891 endobject, val, s->inuse - s->object_size))
892 return 0;
893 } else {
894 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
895 check_bytes_and_report(s, page, p, "Alignment padding",
896 endobject, POISON_INUSE,
897 s->inuse - s->object_size);
898 }
899 }
900
901 if (s->flags & SLAB_POISON) {
902 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
903 (!check_bytes_and_report(s, page, p, "Poison", p,
904 POISON_FREE, s->object_size - 1) ||
905 !check_bytes_and_report(s, page, p, "End Poison",
906 p + s->object_size - 1, POISON_END, 1)))
907 return 0;
908 /*
909 * check_pad_bytes cleans up on its own.
910 */
911 check_pad_bytes(s, page, p);
912 }
913
914 if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE)
915 /*
916 * Object and freepointer overlap. Cannot check
917 * freepointer while object is allocated.
918 */
919 return 1;
920
921 /* Check free pointer validity */
922 if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
923 object_err(s, page, p, "Freepointer corrupt");
924 /*
925 * No choice but to zap it and thus lose the remainder
926 * of the free objects in this slab. May cause
927 * another error because the object count is now wrong.
928 */
929 set_freepointer(s, p, NULL);
930 return 0;
931 }
932 return 1;
933 }
934
check_slab(struct kmem_cache * s,struct page * page)935 static int check_slab(struct kmem_cache *s, struct page *page)
936 {
937 int maxobj;
938
939 VM_BUG_ON(!irqs_disabled());
940
941 if (!PageSlab(page)) {
942 slab_err(s, page, "Not a valid slab page");
943 return 0;
944 }
945
946 maxobj = order_objects(compound_order(page), s->size);
947 if (page->objects > maxobj) {
948 slab_err(s, page, "objects %u > max %u",
949 page->objects, maxobj);
950 return 0;
951 }
952 if (page->inuse > page->objects) {
953 slab_err(s, page, "inuse %u > max %u",
954 page->inuse, page->objects);
955 return 0;
956 }
957 /* Slab_pad_check fixes things up after itself */
958 slab_pad_check(s, page);
959 return 1;
960 }
961
962 /*
963 * Determine if a certain object on a page is on the freelist. Must hold the
964 * slab lock to guarantee that the chains are in a consistent state.
965 */
on_freelist(struct kmem_cache * s,struct page * page,void * search)966 static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
967 {
968 int nr = 0;
969 void *fp;
970 void *object = NULL;
971 int max_objects;
972
973 fp = page->freelist;
974 while (fp && nr <= page->objects) {
975 if (fp == search)
976 return 1;
977 if (!check_valid_pointer(s, page, fp)) {
978 if (object) {
979 object_err(s, page, object,
980 "Freechain corrupt");
981 set_freepointer(s, object, NULL);
982 } else {
983 slab_err(s, page, "Freepointer corrupt");
984 page->freelist = NULL;
985 page->inuse = page->objects;
986 slab_fix(s, "Freelist cleared");
987 return 0;
988 }
989 break;
990 }
991 object = fp;
992 fp = get_freepointer(s, object);
993 nr++;
994 }
995
996 max_objects = order_objects(compound_order(page), s->size);
997 if (max_objects > MAX_OBJS_PER_PAGE)
998 max_objects = MAX_OBJS_PER_PAGE;
999
1000 if (page->objects != max_objects) {
1001 slab_err(s, page, "Wrong number of objects. Found %d but should be %d",
1002 page->objects, max_objects);
1003 page->objects = max_objects;
1004 slab_fix(s, "Number of objects adjusted.");
1005 }
1006 if (page->inuse != page->objects - nr) {
1007 slab_err(s, page, "Wrong object count. Counter is %d but counted were %d",
1008 page->inuse, page->objects - nr);
1009 page->inuse = page->objects - nr;
1010 slab_fix(s, "Object count adjusted.");
1011 }
1012 return search == NULL;
1013 }
1014
trace(struct kmem_cache * s,struct page * page,void * object,int alloc)1015 static void trace(struct kmem_cache *s, struct page *page, void *object,
1016 int alloc)
1017 {
1018 if (s->flags & SLAB_TRACE) {
1019 pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
1020 s->name,
1021 alloc ? "alloc" : "free",
1022 object, page->inuse,
1023 page->freelist);
1024
1025 if (!alloc)
1026 print_section(KERN_INFO, "Object ", (void *)object,
1027 s->object_size);
1028
1029 dump_stack();
1030 }
1031 }
1032
1033 /*
1034 * Tracking of fully allocated slabs for debugging purposes.
1035 */
add_full(struct kmem_cache * s,struct kmem_cache_node * n,struct page * page)1036 static void add_full(struct kmem_cache *s,
1037 struct kmem_cache_node *n, struct page *page)
1038 {
1039 if (!(s->flags & SLAB_STORE_USER))
1040 return;
1041
1042 lockdep_assert_held(&n->list_lock);
1043 list_add(&page->slab_list, &n->full);
1044 }
1045
remove_full(struct kmem_cache * s,struct kmem_cache_node * n,struct page * page)1046 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
1047 {
1048 if (!(s->flags & SLAB_STORE_USER))
1049 return;
1050
1051 lockdep_assert_held(&n->list_lock);
1052 list_del(&page->slab_list);
1053 }
1054
1055 /* Tracking of the number of slabs for debugging purposes */
slabs_node(struct kmem_cache * s,int node)1056 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1057 {
1058 struct kmem_cache_node *n = get_node(s, node);
1059
1060 return atomic_long_read(&n->nr_slabs);
1061 }
1062
node_nr_slabs(struct kmem_cache_node * n)1063 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1064 {
1065 return atomic_long_read(&n->nr_slabs);
1066 }
1067
inc_slabs_node(struct kmem_cache * s,int node,int objects)1068 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1069 {
1070 struct kmem_cache_node *n = get_node(s, node);
1071
1072 /*
1073 * May be called early in order to allocate a slab for the
1074 * kmem_cache_node structure. Solve the chicken-egg
1075 * dilemma by deferring the increment of the count during
1076 * bootstrap (see early_kmem_cache_node_alloc).
1077 */
1078 if (likely(n)) {
1079 atomic_long_inc(&n->nr_slabs);
1080 atomic_long_add(objects, &n->total_objects);
1081 }
1082 }
dec_slabs_node(struct kmem_cache * s,int node,int objects)1083 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1084 {
1085 struct kmem_cache_node *n = get_node(s, node);
1086
1087 atomic_long_dec(&n->nr_slabs);
1088 atomic_long_sub(objects, &n->total_objects);
1089 }
1090
1091 /* Object debug checks for alloc/free paths */
setup_object_debug(struct kmem_cache * s,struct page * page,void * object)1092 static void setup_object_debug(struct kmem_cache *s, struct page *page,
1093 void *object)
1094 {
1095 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
1096 return;
1097
1098 init_object(s, object, SLUB_RED_INACTIVE);
1099 init_tracking(s, object);
1100 }
1101
1102 static
setup_page_debug(struct kmem_cache * s,struct page * page,void * addr)1103 void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr)
1104 {
1105 if (!(s->flags & SLAB_POISON))
1106 return;
1107
1108 metadata_access_enable();
1109 memset(addr, POISON_INUSE, page_size(page));
1110 metadata_access_disable();
1111 }
1112
alloc_consistency_checks(struct kmem_cache * s,struct page * page,void * object)1113 static inline int alloc_consistency_checks(struct kmem_cache *s,
1114 struct page *page, void *object)
1115 {
1116 if (!check_slab(s, page))
1117 return 0;
1118
1119 if (!check_valid_pointer(s, page, object)) {
1120 object_err(s, page, object, "Freelist Pointer check fails");
1121 return 0;
1122 }
1123
1124 if (!check_object(s, page, object, SLUB_RED_INACTIVE))
1125 return 0;
1126
1127 return 1;
1128 }
1129
alloc_debug_processing(struct kmem_cache * s,struct page * page,void * object,unsigned long addr)1130 static noinline int alloc_debug_processing(struct kmem_cache *s,
1131 struct page *page,
1132 void *object, unsigned long addr)
1133 {
1134 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1135 if (!alloc_consistency_checks(s, page, object))
1136 goto bad;
1137 }
1138
1139 /* Success perform special debug activities for allocs */
1140 if (s->flags & SLAB_STORE_USER)
1141 set_track(s, object, TRACK_ALLOC, addr);
1142 trace(s, page, object, 1);
1143 init_object(s, object, SLUB_RED_ACTIVE);
1144 return 1;
1145
1146 bad:
1147 if (PageSlab(page)) {
1148 /*
1149 * If this is a slab page then lets do the best we can
1150 * to avoid issues in the future. Marking all objects
1151 * as used avoids touching the remaining objects.
1152 */
1153 slab_fix(s, "Marking all objects used");
1154 page->inuse = page->objects;
1155 page->freelist = NULL;
1156 }
1157 return 0;
1158 }
1159
free_consistency_checks(struct kmem_cache * s,struct page * page,void * object,unsigned long addr)1160 static inline int free_consistency_checks(struct kmem_cache *s,
1161 struct page *page, void *object, unsigned long addr)
1162 {
1163 if (!check_valid_pointer(s, page, object)) {
1164 slab_err(s, page, "Invalid object pointer 0x%p", object);
1165 return 0;
1166 }
1167
1168 if (on_freelist(s, page, object)) {
1169 object_err(s, page, object, "Object already free");
1170 return 0;
1171 }
1172
1173 if (!check_object(s, page, object, SLUB_RED_ACTIVE))
1174 return 0;
1175
1176 if (unlikely(s != page->slab_cache)) {
1177 if (!PageSlab(page)) {
1178 slab_err(s, page, "Attempt to free object(0x%p) outside of slab",
1179 object);
1180 } else if (!page->slab_cache) {
1181 pr_err("SLUB <none>: no slab for object 0x%p.\n",
1182 object);
1183 dump_stack();
1184 } else
1185 object_err(s, page, object,
1186 "page slab pointer corrupt.");
1187 return 0;
1188 }
1189 return 1;
1190 }
1191
1192 /* Supports checking bulk free of a constructed freelist */
free_debug_processing(struct kmem_cache * s,struct page * page,void * head,void * tail,int bulk_cnt,unsigned long addr)1193 static noinline int free_debug_processing(
1194 struct kmem_cache *s, struct page *page,
1195 void *head, void *tail, int bulk_cnt,
1196 unsigned long addr)
1197 {
1198 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1199 void *object = head;
1200 int cnt = 0;
1201 unsigned long flags;
1202 int ret = 0;
1203
1204 spin_lock_irqsave(&n->list_lock, flags);
1205 slab_lock(page);
1206
1207 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1208 if (!check_slab(s, page))
1209 goto out;
1210 }
1211
1212 next_object:
1213 cnt++;
1214
1215 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1216 if (!free_consistency_checks(s, page, object, addr))
1217 goto out;
1218 }
1219
1220 if (s->flags & SLAB_STORE_USER)
1221 set_track(s, object, TRACK_FREE, addr);
1222 trace(s, page, object, 0);
1223 /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
1224 init_object(s, object, SLUB_RED_INACTIVE);
1225
1226 /* Reached end of constructed freelist yet? */
1227 if (object != tail) {
1228 object = get_freepointer(s, object);
1229 goto next_object;
1230 }
1231 ret = 1;
1232
1233 out:
1234 if (cnt != bulk_cnt)
1235 slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
1236 bulk_cnt, cnt);
1237
1238 slab_unlock(page);
1239 spin_unlock_irqrestore(&n->list_lock, flags);
1240 if (!ret)
1241 slab_fix(s, "Object at 0x%p not freed", object);
1242 return ret;
1243 }
1244
setup_slub_debug(char * str)1245 static int __init setup_slub_debug(char *str)
1246 {
1247 slub_debug = DEBUG_DEFAULT_FLAGS;
1248 if (*str++ != '=' || !*str)
1249 /*
1250 * No options specified. Switch on full debugging.
1251 */
1252 goto out;
1253
1254 if (*str == ',')
1255 /*
1256 * No options but restriction on slabs. This means full
1257 * debugging for slabs matching a pattern.
1258 */
1259 goto check_slabs;
1260
1261 slub_debug = 0;
1262 if (*str == '-')
1263 /*
1264 * Switch off all debugging measures.
1265 */
1266 goto out;
1267
1268 /*
1269 * Determine which debug features should be switched on
1270 */
1271 for (; *str && *str != ','; str++) {
1272 switch (tolower(*str)) {
1273 case 'f':
1274 slub_debug |= SLAB_CONSISTENCY_CHECKS;
1275 break;
1276 case 'z':
1277 slub_debug |= SLAB_RED_ZONE;
1278 break;
1279 case 'p':
1280 slub_debug |= SLAB_POISON;
1281 break;
1282 case 'u':
1283 slub_debug |= SLAB_STORE_USER;
1284 break;
1285 case 't':
1286 slub_debug |= SLAB_TRACE;
1287 break;
1288 case 'a':
1289 slub_debug |= SLAB_FAILSLAB;
1290 break;
1291 case 'o':
1292 /*
1293 * Avoid enabling debugging on caches if its minimum
1294 * order would increase as a result.
1295 */
1296 disable_higher_order_debug = 1;
1297 break;
1298 default:
1299 pr_err("slub_debug option '%c' unknown. skipped\n",
1300 *str);
1301 }
1302 }
1303
1304 check_slabs:
1305 if (*str == ',')
1306 slub_debug_slabs = str + 1;
1307 out:
1308 if ((static_branch_unlikely(&init_on_alloc) ||
1309 static_branch_unlikely(&init_on_free)) &&
1310 (slub_debug & SLAB_POISON))
1311 pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n");
1312 return 1;
1313 }
1314
1315 __setup("slub_debug", setup_slub_debug);
1316
1317 /*
1318 * kmem_cache_flags - apply debugging options to the cache
1319 * @object_size: the size of an object without meta data
1320 * @flags: flags to set
1321 * @name: name of the cache
1322 * @ctor: constructor function
1323 *
1324 * Debug option(s) are applied to @flags. In addition to the debug
1325 * option(s), if a slab name (or multiple) is specified i.e.
1326 * slub_debug=<Debug-Options>,<slab name1>,<slab name2> ...
1327 * then only the select slabs will receive the debug option(s).
1328 */
kmem_cache_flags(unsigned int object_size,slab_flags_t flags,const char * name,void (* ctor)(void *))1329 slab_flags_t kmem_cache_flags(unsigned int object_size,
1330 slab_flags_t flags, const char *name,
1331 void (*ctor)(void *))
1332 {
1333 char *iter;
1334 size_t len;
1335
1336 /* If slub_debug = 0, it folds into the if conditional. */
1337 if (!slub_debug_slabs)
1338 return flags | slub_debug;
1339
1340 len = strlen(name);
1341 iter = slub_debug_slabs;
1342 while (*iter) {
1343 char *end, *glob;
1344 size_t cmplen;
1345
1346 end = strchrnul(iter, ',');
1347
1348 glob = strnchr(iter, end - iter, '*');
1349 if (glob)
1350 cmplen = glob - iter;
1351 else
1352 cmplen = max_t(size_t, len, (end - iter));
1353
1354 if (!strncmp(name, iter, cmplen)) {
1355 flags |= slub_debug;
1356 break;
1357 }
1358
1359 if (!*end)
1360 break;
1361 iter = end + 1;
1362 }
1363
1364 return flags;
1365 }
1366 #else /* !CONFIG_SLUB_DEBUG */
setup_object_debug(struct kmem_cache * s,struct page * page,void * object)1367 static inline void setup_object_debug(struct kmem_cache *s,
1368 struct page *page, void *object) {}
1369 static inline
setup_page_debug(struct kmem_cache * s,struct page * page,void * addr)1370 void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {}
1371
alloc_debug_processing(struct kmem_cache * s,struct page * page,void * object,unsigned long addr)1372 static inline int alloc_debug_processing(struct kmem_cache *s,
1373 struct page *page, void *object, unsigned long addr) { return 0; }
1374
free_debug_processing(struct kmem_cache * s,struct page * page,void * head,void * tail,int bulk_cnt,unsigned long addr)1375 static inline int free_debug_processing(
1376 struct kmem_cache *s, struct page *page,
1377 void *head, void *tail, int bulk_cnt,
1378 unsigned long addr) { return 0; }
1379
slab_pad_check(struct kmem_cache * s,struct page * page)1380 static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1381 { return 1; }
check_object(struct kmem_cache * s,struct page * page,void * object,u8 val)1382 static inline int check_object(struct kmem_cache *s, struct page *page,
1383 void *object, u8 val) { return 1; }
add_full(struct kmem_cache * s,struct kmem_cache_node * n,struct page * page)1384 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1385 struct page *page) {}
remove_full(struct kmem_cache * s,struct kmem_cache_node * n,struct page * page)1386 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
1387 struct page *page) {}
kmem_cache_flags(unsigned int object_size,slab_flags_t flags,const char * name,void (* ctor)(void *))1388 slab_flags_t kmem_cache_flags(unsigned int object_size,
1389 slab_flags_t flags, const char *name,
1390 void (*ctor)(void *))
1391 {
1392 return flags;
1393 }
1394 #define slub_debug 0
1395
1396 #define disable_higher_order_debug 0
1397
slabs_node(struct kmem_cache * s,int node)1398 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1399 { return 0; }
node_nr_slabs(struct kmem_cache_node * n)1400 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1401 { return 0; }
inc_slabs_node(struct kmem_cache * s,int node,int objects)1402 static inline void inc_slabs_node(struct kmem_cache *s, int node,
1403 int objects) {}
dec_slabs_node(struct kmem_cache * s,int node,int objects)1404 static inline void dec_slabs_node(struct kmem_cache *s, int node,
1405 int objects) {}
1406
freelist_corrupted(struct kmem_cache * s,struct page * page,void ** freelist,void * nextfree)1407 static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
1408 void **freelist, void *nextfree)
1409 {
1410 return false;
1411 }
1412 #endif /* CONFIG_SLUB_DEBUG */
1413
1414 /*
1415 * Hooks for other subsystems that check memory allocations. In a typical
1416 * production configuration these hooks all should produce no code at all.
1417 */
kmalloc_large_node_hook(void * ptr,size_t size,gfp_t flags)1418 static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
1419 {
1420 ptr = kasan_kmalloc_large(ptr, size, flags);
1421 /* As ptr might get tagged, call kmemleak hook after KASAN. */
1422 kmemleak_alloc(ptr, size, 1, flags);
1423 return ptr;
1424 }
1425
kfree_hook(void * x)1426 static __always_inline void kfree_hook(void *x)
1427 {
1428 kmemleak_free(x);
1429 kasan_kfree_large(x, _RET_IP_);
1430 }
1431
slab_free_hook(struct kmem_cache * s,void * x)1432 static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
1433 {
1434 kmemleak_free_recursive(x, s->flags);
1435
1436 /*
1437 * Trouble is that we may no longer disable interrupts in the fast path
1438 * So in order to make the debug calls that expect irqs to be
1439 * disabled we need to disable interrupts temporarily.
1440 */
1441 #ifdef CONFIG_LOCKDEP
1442 {
1443 unsigned long flags;
1444
1445 local_irq_save(flags);
1446 debug_check_no_locks_freed(x, s->object_size);
1447 local_irq_restore(flags);
1448 }
1449 #endif
1450 if (!(s->flags & SLAB_DEBUG_OBJECTS))
1451 debug_check_no_obj_freed(x, s->object_size);
1452
1453 /* KASAN might put x into memory quarantine, delaying its reuse */
1454 return kasan_slab_free(s, x, _RET_IP_);
1455 }
1456
slab_free_freelist_hook(struct kmem_cache * s,void ** head,void ** tail,int * cnt)1457 static inline bool slab_free_freelist_hook(struct kmem_cache *s,
1458 void **head, void **tail,
1459 int *cnt)
1460 {
1461
1462 void *object;
1463 void *next = *head;
1464 void *old_tail = *tail ? *tail : *head;
1465 int rsize;
1466
1467 /* Head and tail of the reconstructed freelist */
1468 *head = NULL;
1469 *tail = NULL;
1470
1471 do {
1472 object = next;
1473 next = get_freepointer(s, object);
1474
1475 if (slab_want_init_on_free(s)) {
1476 /*
1477 * Clear the object and the metadata, but don't touch
1478 * the redzone.
1479 */
1480 memset(object, 0, s->object_size);
1481 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad
1482 : 0;
1483 memset((char *)object + s->inuse, 0,
1484 s->size - s->inuse - rsize);
1485
1486 }
1487 /* If object's reuse doesn't have to be delayed */
1488 if (!slab_free_hook(s, object)) {
1489 /* Move object to the new freelist */
1490 set_freepointer(s, object, *head);
1491 *head = object;
1492 if (!*tail)
1493 *tail = object;
1494 } else {
1495 /*
1496 * Adjust the reconstructed freelist depth
1497 * accordingly if object's reuse is delayed.
1498 */
1499 --(*cnt);
1500 }
1501 } while (object != old_tail);
1502
1503 if (*head == *tail)
1504 *tail = NULL;
1505
1506 return *head != NULL;
1507 }
1508
setup_object(struct kmem_cache * s,struct page * page,void * object)1509 static void *setup_object(struct kmem_cache *s, struct page *page,
1510 void *object)
1511 {
1512 setup_object_debug(s, page, object);
1513 object = kasan_init_slab_obj(s, object);
1514 if (unlikely(s->ctor)) {
1515 kasan_unpoison_object_data(s, object);
1516 s->ctor(object);
1517 kasan_poison_object_data(s, object);
1518 }
1519 return object;
1520 }
1521
1522 /*
1523 * Slab allocation and freeing
1524 */
alloc_slab_page(struct kmem_cache * s,gfp_t flags,int node,struct kmem_cache_order_objects oo)1525 static inline struct page *alloc_slab_page(struct kmem_cache *s,
1526 gfp_t flags, int node, struct kmem_cache_order_objects oo)
1527 {
1528 struct page *page;
1529 unsigned int order = oo_order(oo);
1530
1531 if (node == NUMA_NO_NODE)
1532 page = alloc_pages(flags, order);
1533 else
1534 page = __alloc_pages_node(node, flags, order);
1535
1536 if (page && charge_slab_page(page, flags, order, s)) {
1537 __free_pages(page, order);
1538 page = NULL;
1539 }
1540
1541 return page;
1542 }
1543
1544 #ifdef CONFIG_SLAB_FREELIST_RANDOM
1545 /* Pre-initialize the random sequence cache */
init_cache_random_seq(struct kmem_cache * s)1546 static int init_cache_random_seq(struct kmem_cache *s)
1547 {
1548 unsigned int count = oo_objects(s->oo);
1549 int err;
1550
1551 /* Bailout if already initialised */
1552 if (s->random_seq)
1553 return 0;
1554
1555 err = cache_random_seq_create(s, count, GFP_KERNEL);
1556 if (err) {
1557 pr_err("SLUB: Unable to initialize free list for %s\n",
1558 s->name);
1559 return err;
1560 }
1561
1562 /* Transform to an offset on the set of pages */
1563 if (s->random_seq) {
1564 unsigned int i;
1565
1566 for (i = 0; i < count; i++)
1567 s->random_seq[i] *= s->size;
1568 }
1569 return 0;
1570 }
1571
1572 /* Initialize each random sequence freelist per cache */
init_freelist_randomization(void)1573 static void __init init_freelist_randomization(void)
1574 {
1575 struct kmem_cache *s;
1576
1577 mutex_lock(&slab_mutex);
1578
1579 list_for_each_entry(s, &slab_caches, list)
1580 init_cache_random_seq(s);
1581
1582 mutex_unlock(&slab_mutex);
1583 }
1584
1585 /* Get the next entry on the pre-computed freelist randomized */
next_freelist_entry(struct kmem_cache * s,struct page * page,unsigned long * pos,void * start,unsigned long page_limit,unsigned long freelist_count)1586 static void *next_freelist_entry(struct kmem_cache *s, struct page *page,
1587 unsigned long *pos, void *start,
1588 unsigned long page_limit,
1589 unsigned long freelist_count)
1590 {
1591 unsigned int idx;
1592
1593 /*
1594 * If the target page allocation failed, the number of objects on the
1595 * page might be smaller than the usual size defined by the cache.
1596 */
1597 do {
1598 idx = s->random_seq[*pos];
1599 *pos += 1;
1600 if (*pos >= freelist_count)
1601 *pos = 0;
1602 } while (unlikely(idx >= page_limit));
1603
1604 return (char *)start + idx;
1605 }
1606
1607 /* Shuffle the single linked freelist based on a random pre-computed sequence */
shuffle_freelist(struct kmem_cache * s,struct page * page)1608 static bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1609 {
1610 void *start;
1611 void *cur;
1612 void *next;
1613 unsigned long idx, pos, page_limit, freelist_count;
1614
1615 if (page->objects < 2 || !s->random_seq)
1616 return false;
1617
1618 freelist_count = oo_objects(s->oo);
1619 pos = get_random_int() % freelist_count;
1620
1621 page_limit = page->objects * s->size;
1622 start = fixup_red_left(s, page_address(page));
1623
1624 /* First entry is used as the base of the freelist */
1625 cur = next_freelist_entry(s, page, &pos, start, page_limit,
1626 freelist_count);
1627 cur = setup_object(s, page, cur);
1628 page->freelist = cur;
1629
1630 for (idx = 1; idx < page->objects; idx++) {
1631 next = next_freelist_entry(s, page, &pos, start, page_limit,
1632 freelist_count);
1633 next = setup_object(s, page, next);
1634 set_freepointer(s, cur, next);
1635 cur = next;
1636 }
1637 set_freepointer(s, cur, NULL);
1638
1639 return true;
1640 }
1641 #else
init_cache_random_seq(struct kmem_cache * s)1642 static inline int init_cache_random_seq(struct kmem_cache *s)
1643 {
1644 return 0;
1645 }
init_freelist_randomization(void)1646 static inline void init_freelist_randomization(void) { }
shuffle_freelist(struct kmem_cache * s,struct page * page)1647 static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1648 {
1649 return false;
1650 }
1651 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
1652
allocate_slab(struct kmem_cache * s,gfp_t flags,int node)1653 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1654 {
1655 struct page *page;
1656 struct kmem_cache_order_objects oo = s->oo;
1657 gfp_t alloc_gfp;
1658 void *start, *p, *next;
1659 int idx;
1660 bool shuffle;
1661
1662 flags &= gfp_allowed_mask;
1663
1664 if (gfpflags_allow_blocking(flags))
1665 local_irq_enable();
1666
1667 flags |= s->allocflags;
1668
1669 /*
1670 * Let the initial higher-order allocation fail under memory pressure
1671 * so we fall-back to the minimum order allocation.
1672 */
1673 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
1674 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
1675 alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
1676
1677 page = alloc_slab_page(s, alloc_gfp, node, oo);
1678 if (unlikely(!page)) {
1679 oo = s->min;
1680 alloc_gfp = flags;
1681 /*
1682 * Allocation may have failed due to fragmentation.
1683 * Try a lower order alloc if possible
1684 */
1685 page = alloc_slab_page(s, alloc_gfp, node, oo);
1686 if (unlikely(!page))
1687 goto out;
1688 stat(s, ORDER_FALLBACK);
1689 }
1690
1691 page->objects = oo_objects(oo);
1692
1693 page->slab_cache = s;
1694 __SetPageSlab(page);
1695 if (page_is_pfmemalloc(page))
1696 SetPageSlabPfmemalloc(page);
1697
1698 kasan_poison_slab(page);
1699
1700 start = page_address(page);
1701
1702 setup_page_debug(s, page, start);
1703
1704 shuffle = shuffle_freelist(s, page);
1705
1706 if (!shuffle) {
1707 start = fixup_red_left(s, start);
1708 start = setup_object(s, page, start);
1709 page->freelist = start;
1710 for (idx = 0, p = start; idx < page->objects - 1; idx++) {
1711 next = p + s->size;
1712 next = setup_object(s, page, next);
1713 set_freepointer(s, p, next);
1714 p = next;
1715 }
1716 set_freepointer(s, p, NULL);
1717 }
1718
1719 page->inuse = page->objects;
1720 page->frozen = 1;
1721
1722 out:
1723 if (gfpflags_allow_blocking(flags))
1724 local_irq_disable();
1725 if (!page)
1726 return NULL;
1727
1728 inc_slabs_node(s, page_to_nid(page), page->objects);
1729
1730 return page;
1731 }
1732
new_slab(struct kmem_cache * s,gfp_t flags,int node)1733 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1734 {
1735 if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
1736 gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
1737 flags &= ~GFP_SLAB_BUG_MASK;
1738 pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
1739 invalid_mask, &invalid_mask, flags, &flags);
1740 dump_stack();
1741 }
1742
1743 return allocate_slab(s,
1744 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1745 }
1746
__free_slab(struct kmem_cache * s,struct page * page)1747 static void __free_slab(struct kmem_cache *s, struct page *page)
1748 {
1749 int order = compound_order(page);
1750 int pages = 1 << order;
1751
1752 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1753 void *p;
1754
1755 slab_pad_check(s, page);
1756 for_each_object(p, s, page_address(page),
1757 page->objects)
1758 check_object(s, page, p, SLUB_RED_INACTIVE);
1759 }
1760
1761 __ClearPageSlabPfmemalloc(page);
1762 __ClearPageSlab(page);
1763
1764 page->mapping = NULL;
1765 if (current->reclaim_state)
1766 current->reclaim_state->reclaimed_slab += pages;
1767 uncharge_slab_page(page, order, s);
1768 __free_pages(page, order);
1769 }
1770
rcu_free_slab(struct rcu_head * h)1771 static void rcu_free_slab(struct rcu_head *h)
1772 {
1773 struct page *page = container_of(h, struct page, rcu_head);
1774
1775 __free_slab(page->slab_cache, page);
1776 }
1777
free_slab(struct kmem_cache * s,struct page * page)1778 static void free_slab(struct kmem_cache *s, struct page *page)
1779 {
1780 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
1781 call_rcu(&page->rcu_head, rcu_free_slab);
1782 } else
1783 __free_slab(s, page);
1784 }
1785
discard_slab(struct kmem_cache * s,struct page * page)1786 static void discard_slab(struct kmem_cache *s, struct page *page)
1787 {
1788 dec_slabs_node(s, page_to_nid(page), page->objects);
1789 free_slab(s, page);
1790 }
1791
1792 /*
1793 * Management of partially allocated slabs.
1794 */
1795 static inline void
__add_partial(struct kmem_cache_node * n,struct page * page,int tail)1796 __add_partial(struct kmem_cache_node *n, struct page *page, int tail)
1797 {
1798 n->nr_partial++;
1799 if (tail == DEACTIVATE_TO_TAIL)
1800 list_add_tail(&page->slab_list, &n->partial);
1801 else
1802 list_add(&page->slab_list, &n->partial);
1803 }
1804
add_partial(struct kmem_cache_node * n,struct page * page,int tail)1805 static inline void add_partial(struct kmem_cache_node *n,
1806 struct page *page, int tail)
1807 {
1808 lockdep_assert_held(&n->list_lock);
1809 __add_partial(n, page, tail);
1810 }
1811
remove_partial(struct kmem_cache_node * n,struct page * page)1812 static inline void remove_partial(struct kmem_cache_node *n,
1813 struct page *page)
1814 {
1815 lockdep_assert_held(&n->list_lock);
1816 list_del(&page->slab_list);
1817 n->nr_partial--;
1818 }
1819
1820 /*
1821 * Remove slab from the partial list, freeze it and
1822 * return the pointer to the freelist.
1823 *
1824 * Returns a list of objects or NULL if it fails.
1825 */
acquire_slab(struct kmem_cache * s,struct kmem_cache_node * n,struct page * page,int mode,int * objects)1826 static inline void *acquire_slab(struct kmem_cache *s,
1827 struct kmem_cache_node *n, struct page *page,
1828 int mode, int *objects)
1829 {
1830 void *freelist;
1831 unsigned long counters;
1832 struct page new;
1833
1834 lockdep_assert_held(&n->list_lock);
1835
1836 /*
1837 * Zap the freelist and set the frozen bit.
1838 * The old freelist is the list of objects for the
1839 * per cpu allocation list.
1840 */
1841 freelist = page->freelist;
1842 counters = page->counters;
1843 new.counters = counters;
1844 *objects = new.objects - new.inuse;
1845 if (mode) {
1846 new.inuse = page->objects;
1847 new.freelist = NULL;
1848 } else {
1849 new.freelist = freelist;
1850 }
1851
1852 VM_BUG_ON(new.frozen);
1853 new.frozen = 1;
1854
1855 if (!__cmpxchg_double_slab(s, page,
1856 freelist, counters,
1857 new.freelist, new.counters,
1858 "acquire_slab"))
1859 return NULL;
1860
1861 remove_partial(n, page);
1862 WARN_ON(!freelist);
1863 return freelist;
1864 }
1865
1866 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
1867 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
1868
1869 /*
1870 * Try to allocate a partial slab from a specific node.
1871 */
get_partial_node(struct kmem_cache * s,struct kmem_cache_node * n,struct kmem_cache_cpu * c,gfp_t flags)1872 static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
1873 struct kmem_cache_cpu *c, gfp_t flags)
1874 {
1875 struct page *page, *page2;
1876 void *object = NULL;
1877 unsigned int available = 0;
1878 int objects;
1879
1880 /*
1881 * Racy check. If we mistakenly see no partial slabs then we
1882 * just allocate an empty slab. If we mistakenly try to get a
1883 * partial slab and there is none available then get_partials()
1884 * will return NULL.
1885 */
1886 if (!n || !n->nr_partial)
1887 return NULL;
1888
1889 spin_lock(&n->list_lock);
1890 list_for_each_entry_safe(page, page2, &n->partial, slab_list) {
1891 void *t;
1892
1893 if (!pfmemalloc_match(page, flags))
1894 continue;
1895
1896 t = acquire_slab(s, n, page, object == NULL, &objects);
1897 if (!t)
1898 break;
1899
1900 available += objects;
1901 if (!object) {
1902 c->page = page;
1903 stat(s, ALLOC_FROM_PARTIAL);
1904 object = t;
1905 } else {
1906 put_cpu_partial(s, page, 0);
1907 stat(s, CPU_PARTIAL_NODE);
1908 }
1909 if (!kmem_cache_has_cpu_partial(s)
1910 || available > slub_cpu_partial(s) / 2)
1911 break;
1912
1913 }
1914 spin_unlock(&n->list_lock);
1915 return object;
1916 }
1917
1918 /*
1919 * Get a page from somewhere. Search in increasing NUMA distances.
1920 */
get_any_partial(struct kmem_cache * s,gfp_t flags,struct kmem_cache_cpu * c)1921 static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
1922 struct kmem_cache_cpu *c)
1923 {
1924 #ifdef CONFIG_NUMA
1925 struct zonelist *zonelist;
1926 struct zoneref *z;
1927 struct zone *zone;
1928 enum zone_type high_zoneidx = gfp_zone(flags);
1929 void *object;
1930 unsigned int cpuset_mems_cookie;
1931
1932 /*
1933 * The defrag ratio allows a configuration of the tradeoffs between
1934 * inter node defragmentation and node local allocations. A lower
1935 * defrag_ratio increases the tendency to do local allocations
1936 * instead of attempting to obtain partial slabs from other nodes.
1937 *
1938 * If the defrag_ratio is set to 0 then kmalloc() always
1939 * returns node local objects. If the ratio is higher then kmalloc()
1940 * may return off node objects because partial slabs are obtained
1941 * from other nodes and filled up.
1942 *
1943 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100
1944 * (which makes defrag_ratio = 1000) then every (well almost)
1945 * allocation will first attempt to defrag slab caches on other nodes.
1946 * This means scanning over all nodes to look for partial slabs which
1947 * may be expensive if we do it every time we are trying to find a slab
1948 * with available objects.
1949 */
1950 if (!s->remote_node_defrag_ratio ||
1951 get_cycles() % 1024 > s->remote_node_defrag_ratio)
1952 return NULL;
1953
1954 do {
1955 cpuset_mems_cookie = read_mems_allowed_begin();
1956 zonelist = node_zonelist(mempolicy_slab_node(), flags);
1957 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1958 struct kmem_cache_node *n;
1959
1960 n = get_node(s, zone_to_nid(zone));
1961
1962 if (n && cpuset_zone_allowed(zone, flags) &&
1963 n->nr_partial > s->min_partial) {
1964 object = get_partial_node(s, n, c, flags);
1965 if (object) {
1966 /*
1967 * Don't check read_mems_allowed_retry()
1968 * here - if mems_allowed was updated in
1969 * parallel, that was a harmless race
1970 * between allocation and the cpuset
1971 * update
1972 */
1973 return object;
1974 }
1975 }
1976 }
1977 } while (read_mems_allowed_retry(cpuset_mems_cookie));
1978 #endif /* CONFIG_NUMA */
1979 return NULL;
1980 }
1981
1982 /*
1983 * Get a partial page, lock it and return it.
1984 */
get_partial(struct kmem_cache * s,gfp_t flags,int node,struct kmem_cache_cpu * c)1985 static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
1986 struct kmem_cache_cpu *c)
1987 {
1988 void *object;
1989 int searchnode = node;
1990
1991 if (node == NUMA_NO_NODE)
1992 searchnode = numa_mem_id();
1993
1994 object = get_partial_node(s, get_node(s, searchnode), c, flags);
1995 if (object || node != NUMA_NO_NODE)
1996 return object;
1997
1998 return get_any_partial(s, flags, c);
1999 }
2000
2001 #ifdef CONFIG_PREEMPT
2002 /*
2003 * Calculate the next globally unique transaction for disambiguiation
2004 * during cmpxchg. The transactions start with the cpu number and are then
2005 * incremented by CONFIG_NR_CPUS.
2006 */
2007 #define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS)
2008 #else
2009 /*
2010 * No preemption supported therefore also no need to check for
2011 * different cpus.
2012 */
2013 #define TID_STEP 1
2014 #endif
2015
next_tid(unsigned long tid)2016 static inline unsigned long next_tid(unsigned long tid)
2017 {
2018 return tid + TID_STEP;
2019 }
2020
2021 #ifdef SLUB_DEBUG_CMPXCHG
tid_to_cpu(unsigned long tid)2022 static inline unsigned int tid_to_cpu(unsigned long tid)
2023 {
2024 return tid % TID_STEP;
2025 }
2026
tid_to_event(unsigned long tid)2027 static inline unsigned long tid_to_event(unsigned long tid)
2028 {
2029 return tid / TID_STEP;
2030 }
2031 #endif
2032
init_tid(int cpu)2033 static inline unsigned int init_tid(int cpu)
2034 {
2035 return cpu;
2036 }
2037
note_cmpxchg_failure(const char * n,const struct kmem_cache * s,unsigned long tid)2038 static inline void note_cmpxchg_failure(const char *n,
2039 const struct kmem_cache *s, unsigned long tid)
2040 {
2041 #ifdef SLUB_DEBUG_CMPXCHG
2042 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
2043
2044 pr_info("%s %s: cmpxchg redo ", n, s->name);
2045
2046 #ifdef CONFIG_PREEMPT
2047 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
2048 pr_warn("due to cpu change %d -> %d\n",
2049 tid_to_cpu(tid), tid_to_cpu(actual_tid));
2050 else
2051 #endif
2052 if (tid_to_event(tid) != tid_to_event(actual_tid))
2053 pr_warn("due to cpu running other code. Event %ld->%ld\n",
2054 tid_to_event(tid), tid_to_event(actual_tid));
2055 else
2056 pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n",
2057 actual_tid, tid, next_tid(tid));
2058 #endif
2059 stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
2060 }
2061
init_kmem_cache_cpus(struct kmem_cache * s)2062 static void init_kmem_cache_cpus(struct kmem_cache *s)
2063 {
2064 int cpu;
2065
2066 for_each_possible_cpu(cpu)
2067 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
2068 }
2069
2070 /*
2071 * Remove the cpu slab
2072 */
deactivate_slab(struct kmem_cache * s,struct page * page,void * freelist,struct kmem_cache_cpu * c)2073 static void deactivate_slab(struct kmem_cache *s, struct page *page,
2074 void *freelist, struct kmem_cache_cpu *c)
2075 {
2076 enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
2077 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
2078 int lock = 0;
2079 enum slab_modes l = M_NONE, m = M_NONE;
2080 void *nextfree;
2081 int tail = DEACTIVATE_TO_HEAD;
2082 struct page new;
2083 struct page old;
2084
2085 if (page->freelist) {
2086 stat(s, DEACTIVATE_REMOTE_FREES);
2087 tail = DEACTIVATE_TO_TAIL;
2088 }
2089
2090 /*
2091 * Stage one: Free all available per cpu objects back
2092 * to the page freelist while it is still frozen. Leave the
2093 * last one.
2094 *
2095 * There is no need to take the list->lock because the page
2096 * is still frozen.
2097 */
2098 while (freelist && (nextfree = get_freepointer(s, freelist))) {
2099 void *prior;
2100 unsigned long counters;
2101
2102 /*
2103 * If 'nextfree' is invalid, it is possible that the object at
2104 * 'freelist' is already corrupted. So isolate all objects
2105 * starting at 'freelist'.
2106 */
2107 if (freelist_corrupted(s, page, &freelist, nextfree))
2108 break;
2109
2110 do {
2111 prior = page->freelist;
2112 counters = page->counters;
2113 set_freepointer(s, freelist, prior);
2114 new.counters = counters;
2115 new.inuse--;
2116 VM_BUG_ON(!new.frozen);
2117
2118 } while (!__cmpxchg_double_slab(s, page,
2119 prior, counters,
2120 freelist, new.counters,
2121 "drain percpu freelist"));
2122
2123 freelist = nextfree;
2124 }
2125
2126 /*
2127 * Stage two: Ensure that the page is unfrozen while the
2128 * list presence reflects the actual number of objects
2129 * during unfreeze.
2130 *
2131 * We setup the list membership and then perform a cmpxchg
2132 * with the count. If there is a mismatch then the page
2133 * is not unfrozen but the page is on the wrong list.
2134 *
2135 * Then we restart the process which may have to remove
2136 * the page from the list that we just put it on again
2137 * because the number of objects in the slab may have
2138 * changed.
2139 */
2140 redo:
2141
2142 old.freelist = page->freelist;
2143 old.counters = page->counters;
2144 VM_BUG_ON(!old.frozen);
2145
2146 /* Determine target state of the slab */
2147 new.counters = old.counters;
2148 if (freelist) {
2149 new.inuse--;
2150 set_freepointer(s, freelist, old.freelist);
2151 new.freelist = freelist;
2152 } else
2153 new.freelist = old.freelist;
2154
2155 new.frozen = 0;
2156
2157 if (!new.inuse && n->nr_partial >= s->min_partial)
2158 m = M_FREE;
2159 else if (new.freelist) {
2160 m = M_PARTIAL;
2161 if (!lock) {
2162 lock = 1;
2163 /*
2164 * Taking the spinlock removes the possibility
2165 * that acquire_slab() will see a slab page that
2166 * is frozen
2167 */
2168 spin_lock(&n->list_lock);
2169 }
2170 } else {
2171 m = M_FULL;
2172 if (kmem_cache_debug(s) && !lock) {
2173 lock = 1;
2174 /*
2175 * This also ensures that the scanning of full
2176 * slabs from diagnostic functions will not see
2177 * any frozen slabs.
2178 */
2179 spin_lock(&n->list_lock);
2180 }
2181 }
2182
2183 if (l != m) {
2184 if (l == M_PARTIAL)
2185 remove_partial(n, page);
2186 else if (l == M_FULL)
2187 remove_full(s, n, page);
2188
2189 if (m == M_PARTIAL)
2190 add_partial(n, page, tail);
2191 else if (m == M_FULL)
2192 add_full(s, n, page);
2193 }
2194
2195 l = m;
2196 if (!__cmpxchg_double_slab(s, page,
2197 old.freelist, old.counters,
2198 new.freelist, new.counters,
2199 "unfreezing slab"))
2200 goto redo;
2201
2202 if (lock)
2203 spin_unlock(&n->list_lock);
2204
2205 if (m == M_PARTIAL)
2206 stat(s, tail);
2207 else if (m == M_FULL)
2208 stat(s, DEACTIVATE_FULL);
2209 else if (m == M_FREE) {
2210 stat(s, DEACTIVATE_EMPTY);
2211 discard_slab(s, page);
2212 stat(s, FREE_SLAB);
2213 }
2214
2215 c->page = NULL;
2216 c->freelist = NULL;
2217 c->tid = next_tid(c->tid);
2218 }
2219
2220 /*
2221 * Unfreeze all the cpu partial slabs.
2222 *
2223 * This function must be called with interrupts disabled
2224 * for the cpu using c (or some other guarantee must be there
2225 * to guarantee no concurrent accesses).
2226 */
unfreeze_partials(struct kmem_cache * s,struct kmem_cache_cpu * c)2227 static void unfreeze_partials(struct kmem_cache *s,
2228 struct kmem_cache_cpu *c)
2229 {
2230 #ifdef CONFIG_SLUB_CPU_PARTIAL
2231 struct kmem_cache_node *n = NULL, *n2 = NULL;
2232 struct page *page, *discard_page = NULL;
2233
2234 while ((page = c->partial)) {
2235 struct page new;
2236 struct page old;
2237
2238 c->partial = page->next;
2239
2240 n2 = get_node(s, page_to_nid(page));
2241 if (n != n2) {
2242 if (n)
2243 spin_unlock(&n->list_lock);
2244
2245 n = n2;
2246 spin_lock(&n->list_lock);
2247 }
2248
2249 do {
2250
2251 old.freelist = page->freelist;
2252 old.counters = page->counters;
2253 VM_BUG_ON(!old.frozen);
2254
2255 new.counters = old.counters;
2256 new.freelist = old.freelist;
2257
2258 new.frozen = 0;
2259
2260 } while (!__cmpxchg_double_slab(s, page,
2261 old.freelist, old.counters,
2262 new.freelist, new.counters,
2263 "unfreezing slab"));
2264
2265 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
2266 page->next = discard_page;
2267 discard_page = page;
2268 } else {
2269 add_partial(n, page, DEACTIVATE_TO_TAIL);
2270 stat(s, FREE_ADD_PARTIAL);
2271 }
2272 }
2273
2274 if (n)
2275 spin_unlock(&n->list_lock);
2276
2277 while (discard_page) {
2278 page = discard_page;
2279 discard_page = discard_page->next;
2280
2281 stat(s, DEACTIVATE_EMPTY);
2282 discard_slab(s, page);
2283 stat(s, FREE_SLAB);
2284 }
2285 #endif /* CONFIG_SLUB_CPU_PARTIAL */
2286 }
2287
2288 /*
2289 * Put a page that was just frozen (in __slab_free|get_partial_node) into a
2290 * partial page slot if available.
2291 *
2292 * If we did not find a slot then simply move all the partials to the
2293 * per node partial list.
2294 */
put_cpu_partial(struct kmem_cache * s,struct page * page,int drain)2295 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
2296 {
2297 #ifdef CONFIG_SLUB_CPU_PARTIAL
2298 struct page *oldpage;
2299 int pages;
2300 int pobjects;
2301
2302 preempt_disable();
2303 do {
2304 pages = 0;
2305 pobjects = 0;
2306 oldpage = this_cpu_read(s->cpu_slab->partial);
2307
2308 if (oldpage) {
2309 pobjects = oldpage->pobjects;
2310 pages = oldpage->pages;
2311 if (drain && pobjects > s->cpu_partial) {
2312 unsigned long flags;
2313 /*
2314 * partial array is full. Move the existing
2315 * set to the per node partial list.
2316 */
2317 local_irq_save(flags);
2318 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
2319 local_irq_restore(flags);
2320 oldpage = NULL;
2321 pobjects = 0;
2322 pages = 0;
2323 stat(s, CPU_PARTIAL_DRAIN);
2324 }
2325 }
2326
2327 pages++;
2328 pobjects += page->objects - page->inuse;
2329
2330 page->pages = pages;
2331 page->pobjects = pobjects;
2332 page->next = oldpage;
2333
2334 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
2335 != oldpage);
2336 if (unlikely(!s->cpu_partial)) {
2337 unsigned long flags;
2338
2339 local_irq_save(flags);
2340 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
2341 local_irq_restore(flags);
2342 }
2343 preempt_enable();
2344 #endif /* CONFIG_SLUB_CPU_PARTIAL */
2345 }
2346
flush_slab(struct kmem_cache * s,struct kmem_cache_cpu * c)2347 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
2348 {
2349 stat(s, CPUSLAB_FLUSH);
2350 deactivate_slab(s, c->page, c->freelist, c);
2351 }
2352
2353 /*
2354 * Flush cpu slab.
2355 *
2356 * Called from IPI handler with interrupts disabled.
2357 */
__flush_cpu_slab(struct kmem_cache * s,int cpu)2358 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
2359 {
2360 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2361
2362 if (c->page)
2363 flush_slab(s, c);
2364
2365 unfreeze_partials(s, c);
2366 }
2367
flush_cpu_slab(void * d)2368 static void flush_cpu_slab(void *d)
2369 {
2370 struct kmem_cache *s = d;
2371
2372 __flush_cpu_slab(s, smp_processor_id());
2373 }
2374
has_cpu_slab(int cpu,void * info)2375 static bool has_cpu_slab(int cpu, void *info)
2376 {
2377 struct kmem_cache *s = info;
2378 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2379
2380 return c->page || slub_percpu_partial(c);
2381 }
2382
flush_all(struct kmem_cache * s)2383 static void flush_all(struct kmem_cache *s)
2384 {
2385 on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
2386 }
2387
2388 /*
2389 * Use the cpu notifier to insure that the cpu slabs are flushed when
2390 * necessary.
2391 */
slub_cpu_dead(unsigned int cpu)2392 static int slub_cpu_dead(unsigned int cpu)
2393 {
2394 struct kmem_cache *s;
2395 unsigned long flags;
2396
2397 mutex_lock(&slab_mutex);
2398 list_for_each_entry(s, &slab_caches, list) {
2399 local_irq_save(flags);
2400 __flush_cpu_slab(s, cpu);
2401 local_irq_restore(flags);
2402 }
2403 mutex_unlock(&slab_mutex);
2404 return 0;
2405 }
2406
2407 /*
2408 * Check if the objects in a per cpu structure fit numa
2409 * locality expectations.
2410 */
node_match(struct page * page,int node)2411 static inline int node_match(struct page *page, int node)
2412 {
2413 #ifdef CONFIG_NUMA
2414 if (node != NUMA_NO_NODE && page_to_nid(page) != node)
2415 return 0;
2416 #endif
2417 return 1;
2418 }
2419
2420 #ifdef CONFIG_SLUB_DEBUG
count_free(struct page * page)2421 static int count_free(struct page *page)
2422 {
2423 return page->objects - page->inuse;
2424 }
2425
node_nr_objs(struct kmem_cache_node * n)2426 static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
2427 {
2428 return atomic_long_read(&n->total_objects);
2429 }
2430 #endif /* CONFIG_SLUB_DEBUG */
2431
2432 #if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS)
count_partial(struct kmem_cache_node * n,int (* get_count)(struct page *))2433 static unsigned long count_partial(struct kmem_cache_node *n,
2434 int (*get_count)(struct page *))
2435 {
2436 unsigned long flags;
2437 unsigned long x = 0;
2438 struct page *page;
2439
2440 spin_lock_irqsave(&n->list_lock, flags);
2441 list_for_each_entry(page, &n->partial, slab_list)
2442 x += get_count(page);
2443 spin_unlock_irqrestore(&n->list_lock, flags);
2444 return x;
2445 }
2446 #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
2447
2448 static noinline void
slab_out_of_memory(struct kmem_cache * s,gfp_t gfpflags,int nid)2449 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2450 {
2451 #ifdef CONFIG_SLUB_DEBUG
2452 static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
2453 DEFAULT_RATELIMIT_BURST);
2454 int node;
2455 struct kmem_cache_node *n;
2456
2457 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
2458 return;
2459
2460 pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
2461 nid, gfpflags, &gfpflags);
2462 pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
2463 s->name, s->object_size, s->size, oo_order(s->oo),
2464 oo_order(s->min));
2465
2466 if (oo_order(s->min) > get_order(s->object_size))
2467 pr_warn(" %s debugging increased min order, use slub_debug=O to disable.\n",
2468 s->name);
2469
2470 for_each_kmem_cache_node(s, node, n) {
2471 unsigned long nr_slabs;
2472 unsigned long nr_objs;
2473 unsigned long nr_free;
2474
2475 nr_free = count_partial(n, count_free);
2476 nr_slabs = node_nr_slabs(n);
2477 nr_objs = node_nr_objs(n);
2478
2479 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n",
2480 node, nr_slabs, nr_objs, nr_free);
2481 }
2482 #endif
2483 }
2484
new_slab_objects(struct kmem_cache * s,gfp_t flags,int node,struct kmem_cache_cpu ** pc)2485 static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
2486 int node, struct kmem_cache_cpu **pc)
2487 {
2488 void *freelist;
2489 struct kmem_cache_cpu *c = *pc;
2490 struct page *page;
2491
2492 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO));
2493
2494 freelist = get_partial(s, flags, node, c);
2495
2496 if (freelist)
2497 return freelist;
2498
2499 page = new_slab(s, flags, node);
2500 if (page) {
2501 c = raw_cpu_ptr(s->cpu_slab);
2502 if (c->page)
2503 flush_slab(s, c);
2504
2505 /*
2506 * No other reference to the page yet so we can
2507 * muck around with it freely without cmpxchg
2508 */
2509 freelist = page->freelist;
2510 page->freelist = NULL;
2511
2512 stat(s, ALLOC_SLAB);
2513 c->page = page;
2514 *pc = c;
2515 }
2516
2517 return freelist;
2518 }
2519
pfmemalloc_match(struct page * page,gfp_t gfpflags)2520 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
2521 {
2522 if (unlikely(PageSlabPfmemalloc(page)))
2523 return gfp_pfmemalloc_allowed(gfpflags);
2524
2525 return true;
2526 }
2527
2528 /*
2529 * Check the page->freelist of a page and either transfer the freelist to the
2530 * per cpu freelist or deactivate the page.
2531 *
2532 * The page is still frozen if the return value is not NULL.
2533 *
2534 * If this function returns NULL then the page has been unfrozen.
2535 *
2536 * This function must be called with interrupt disabled.
2537 */
get_freelist(struct kmem_cache * s,struct page * page)2538 static inline void *get_freelist(struct kmem_cache *s, struct page *page)
2539 {
2540 struct page new;
2541 unsigned long counters;
2542 void *freelist;
2543
2544 do {
2545 freelist = page->freelist;
2546 counters = page->counters;
2547
2548 new.counters = counters;
2549 VM_BUG_ON(!new.frozen);
2550
2551 new.inuse = page->objects;
2552 new.frozen = freelist != NULL;
2553
2554 } while (!__cmpxchg_double_slab(s, page,
2555 freelist, counters,
2556 NULL, new.counters,
2557 "get_freelist"));
2558
2559 return freelist;
2560 }
2561
2562 /*
2563 * Slow path. The lockless freelist is empty or we need to perform
2564 * debugging duties.
2565 *
2566 * Processing is still very fast if new objects have been freed to the
2567 * regular freelist. In that case we simply take over the regular freelist
2568 * as the lockless freelist and zap the regular freelist.
2569 *
2570 * If that is not working then we fall back to the partial lists. We take the
2571 * first element of the freelist as the object to allocate now and move the
2572 * rest of the freelist to the lockless freelist.
2573 *
2574 * And if we were unable to get a new slab from the partial slab lists then
2575 * we need to allocate a new slab. This is the slowest path since it involves
2576 * a call to the page allocator and the setup of a new slab.
2577 *
2578 * Version of __slab_alloc to use when we know that interrupts are
2579 * already disabled (which is the case for bulk allocation).
2580 */
___slab_alloc(struct kmem_cache * s,gfp_t gfpflags,int node,unsigned long addr,struct kmem_cache_cpu * c)2581 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2582 unsigned long addr, struct kmem_cache_cpu *c)
2583 {
2584 void *freelist;
2585 struct page *page;
2586
2587 page = c->page;
2588 if (!page) {
2589 /*
2590 * if the node is not online or has no normal memory, just
2591 * ignore the node constraint
2592 */
2593 if (unlikely(node != NUMA_NO_NODE &&
2594 !node_state(node, N_NORMAL_MEMORY)))
2595 node = NUMA_NO_NODE;
2596 goto new_slab;
2597 }
2598 redo:
2599
2600 if (unlikely(!node_match(page, node))) {
2601 /*
2602 * same as above but node_match() being false already
2603 * implies node != NUMA_NO_NODE
2604 */
2605 if (!node_state(node, N_NORMAL_MEMORY)) {
2606 node = NUMA_NO_NODE;
2607 goto redo;
2608 } else {
2609 stat(s, ALLOC_NODE_MISMATCH);
2610 deactivate_slab(s, page, c->freelist, c);
2611 goto new_slab;
2612 }
2613 }
2614
2615 /*
2616 * By rights, we should be searching for a slab page that was
2617 * PFMEMALLOC but right now, we are losing the pfmemalloc
2618 * information when the page leaves the per-cpu allocator
2619 */
2620 if (unlikely(!pfmemalloc_match(page, gfpflags))) {
2621 deactivate_slab(s, page, c->freelist, c);
2622 goto new_slab;
2623 }
2624
2625 /* must check again c->freelist in case of cpu migration or IRQ */
2626 freelist = c->freelist;
2627 if (freelist)
2628 goto load_freelist;
2629
2630 freelist = get_freelist(s, page);
2631
2632 if (!freelist) {
2633 c->page = NULL;
2634 c->tid = next_tid(c->tid);
2635 stat(s, DEACTIVATE_BYPASS);
2636 goto new_slab;
2637 }
2638
2639 stat(s, ALLOC_REFILL);
2640
2641 load_freelist:
2642 /*
2643 * freelist is pointing to the list of objects to be used.
2644 * page is pointing to the page from which the objects are obtained.
2645 * That page must be frozen for per cpu allocations to work.
2646 */
2647 VM_BUG_ON(!c->page->frozen);
2648 c->freelist = get_freepointer(s, freelist);
2649 c->tid = next_tid(c->tid);
2650 return freelist;
2651
2652 new_slab:
2653
2654 if (slub_percpu_partial(c)) {
2655 page = c->page = slub_percpu_partial(c);
2656 slub_set_percpu_partial(c, page);
2657 stat(s, CPU_PARTIAL_ALLOC);
2658 goto redo;
2659 }
2660
2661 freelist = new_slab_objects(s, gfpflags, node, &c);
2662
2663 if (unlikely(!freelist)) {
2664 slab_out_of_memory(s, gfpflags, node);
2665 return NULL;
2666 }
2667
2668 page = c->page;
2669 if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
2670 goto load_freelist;
2671
2672 /* Only entered in the debug case */
2673 if (kmem_cache_debug(s) &&
2674 !alloc_debug_processing(s, page, freelist, addr))
2675 goto new_slab; /* Slab failed checks. Next slab needed */
2676
2677 deactivate_slab(s, page, get_freepointer(s, freelist), c);
2678 return freelist;
2679 }
2680
2681 /*
2682 * Another one that disabled interrupt and compensates for possible
2683 * cpu changes by refetching the per cpu area pointer.
2684 */
__slab_alloc(struct kmem_cache * s,gfp_t gfpflags,int node,unsigned long addr,struct kmem_cache_cpu * c)2685 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2686 unsigned long addr, struct kmem_cache_cpu *c)
2687 {
2688 void *p;
2689 unsigned long flags;
2690
2691 local_irq_save(flags);
2692 #ifdef CONFIG_PREEMPT
2693 /*
2694 * We may have been preempted and rescheduled on a different
2695 * cpu before disabling interrupts. Need to reload cpu area
2696 * pointer.
2697 */
2698 c = this_cpu_ptr(s->cpu_slab);
2699 #endif
2700
2701 p = ___slab_alloc(s, gfpflags, node, addr, c);
2702 local_irq_restore(flags);
2703 return p;
2704 }
2705
2706 /*
2707 * If the object has been wiped upon free, make sure it's fully initialized by
2708 * zeroing out freelist pointer.
2709 */
maybe_wipe_obj_freeptr(struct kmem_cache * s,void * obj)2710 static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
2711 void *obj)
2712 {
2713 if (unlikely(slab_want_init_on_free(s)) && obj)
2714 memset((void *)((char *)obj + s->offset), 0, sizeof(void *));
2715 }
2716
2717 /*
2718 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
2719 * have the fastpath folded into their functions. So no function call
2720 * overhead for requests that can be satisfied on the fastpath.
2721 *
2722 * The fastpath works by first checking if the lockless freelist can be used.
2723 * If not then __slab_alloc is called for slow processing.
2724 *
2725 * Otherwise we can simply pick the next object from the lockless free list.
2726 */
slab_alloc_node(struct kmem_cache * s,gfp_t gfpflags,int node,unsigned long addr)2727 static __always_inline void *slab_alloc_node(struct kmem_cache *s,
2728 gfp_t gfpflags, int node, unsigned long addr)
2729 {
2730 void *object;
2731 struct kmem_cache_cpu *c;
2732 struct page *page;
2733 unsigned long tid;
2734
2735 s = slab_pre_alloc_hook(s, gfpflags);
2736 if (!s)
2737 return NULL;
2738 redo:
2739 /*
2740 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
2741 * enabled. We may switch back and forth between cpus while
2742 * reading from one cpu area. That does not matter as long
2743 * as we end up on the original cpu again when doing the cmpxchg.
2744 *
2745 * We should guarantee that tid and kmem_cache are retrieved on
2746 * the same cpu. It could be different if CONFIG_PREEMPT so we need
2747 * to check if it is matched or not.
2748 */
2749 do {
2750 tid = this_cpu_read(s->cpu_slab->tid);
2751 c = raw_cpu_ptr(s->cpu_slab);
2752 } while (IS_ENABLED(CONFIG_PREEMPT) &&
2753 unlikely(tid != READ_ONCE(c->tid)));
2754
2755 /*
2756 * Irqless object alloc/free algorithm used here depends on sequence
2757 * of fetching cpu_slab's data. tid should be fetched before anything
2758 * on c to guarantee that object and page associated with previous tid
2759 * won't be used with current tid. If we fetch tid first, object and
2760 * page could be one associated with next tid and our alloc/free
2761 * request will be failed. In this case, we will retry. So, no problem.
2762 */
2763 barrier();
2764
2765 /*
2766 * The transaction ids are globally unique per cpu and per operation on
2767 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
2768 * occurs on the right processor and that there was no operation on the
2769 * linked list in between.
2770 */
2771
2772 object = c->freelist;
2773 page = c->page;
2774 if (unlikely(!object || !page || !node_match(page, node))) {
2775 object = __slab_alloc(s, gfpflags, node, addr, c);
2776 stat(s, ALLOC_SLOWPATH);
2777 } else {
2778 void *next_object = get_freepointer_safe(s, object);
2779
2780 /*
2781 * The cmpxchg will only match if there was no additional
2782 * operation and if we are on the right processor.
2783 *
2784 * The cmpxchg does the following atomically (without lock
2785 * semantics!)
2786 * 1. Relocate first pointer to the current per cpu area.
2787 * 2. Verify that tid and freelist have not been changed
2788 * 3. If they were not changed replace tid and freelist
2789 *
2790 * Since this is without lock semantics the protection is only
2791 * against code executing on this cpu *not* from access by
2792 * other cpus.
2793 */
2794 if (unlikely(!this_cpu_cmpxchg_double(
2795 s->cpu_slab->freelist, s->cpu_slab->tid,
2796 object, tid,
2797 next_object, next_tid(tid)))) {
2798
2799 note_cmpxchg_failure("slab_alloc", s, tid);
2800 goto redo;
2801 }
2802 prefetch_freepointer(s, next_object);
2803 stat(s, ALLOC_FASTPATH);
2804 }
2805
2806 maybe_wipe_obj_freeptr(s, object);
2807
2808 if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
2809 memset(object, 0, s->object_size);
2810
2811 slab_post_alloc_hook(s, gfpflags, 1, &object);
2812
2813 return object;
2814 }
2815
slab_alloc(struct kmem_cache * s,gfp_t gfpflags,unsigned long addr)2816 static __always_inline void *slab_alloc(struct kmem_cache *s,
2817 gfp_t gfpflags, unsigned long addr)
2818 {
2819 return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
2820 }
2821
kmem_cache_alloc(struct kmem_cache * s,gfp_t gfpflags)2822 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
2823 {
2824 void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2825
2826 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
2827 s->size, gfpflags);
2828
2829 return ret;
2830 }
2831 EXPORT_SYMBOL(kmem_cache_alloc);
2832
2833 #ifdef CONFIG_TRACING
kmem_cache_alloc_trace(struct kmem_cache * s,gfp_t gfpflags,size_t size)2834 void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
2835 {
2836 void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2837 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
2838 ret = kasan_kmalloc(s, ret, size, gfpflags);
2839 return ret;
2840 }
2841 EXPORT_SYMBOL(kmem_cache_alloc_trace);
2842 #endif
2843
2844 #ifdef CONFIG_NUMA
kmem_cache_alloc_node(struct kmem_cache * s,gfp_t gfpflags,int node)2845 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
2846 {
2847 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
2848
2849 trace_kmem_cache_alloc_node(_RET_IP_, ret,
2850 s->object_size, s->size, gfpflags, node);
2851
2852 return ret;
2853 }
2854 EXPORT_SYMBOL(kmem_cache_alloc_node);
2855
2856 #ifdef CONFIG_TRACING
kmem_cache_alloc_node_trace(struct kmem_cache * s,gfp_t gfpflags,int node,size_t size)2857 void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
2858 gfp_t gfpflags,
2859 int node, size_t size)
2860 {
2861 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
2862
2863 trace_kmalloc_node(_RET_IP_, ret,
2864 size, s->size, gfpflags, node);
2865
2866 ret = kasan_kmalloc(s, ret, size, gfpflags);
2867 return ret;
2868 }
2869 EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
2870 #endif
2871 #endif /* CONFIG_NUMA */
2872
2873 /*
2874 * Slow path handling. This may still be called frequently since objects
2875 * have a longer lifetime than the cpu slabs in most processing loads.
2876 *
2877 * So we still attempt to reduce cache line usage. Just take the slab
2878 * lock and free the item. If there is no additional partial page
2879 * handling required then we can return immediately.
2880 */
__slab_free(struct kmem_cache * s,struct page * page,void * head,void * tail,int cnt,unsigned long addr)2881 static void __slab_free(struct kmem_cache *s, struct page *page,
2882 void *head, void *tail, int cnt,
2883 unsigned long addr)
2884
2885 {
2886 void *prior;
2887 int was_frozen;
2888 struct page new;
2889 unsigned long counters;
2890 struct kmem_cache_node *n = NULL;
2891 unsigned long flags;
2892
2893 stat(s, FREE_SLOWPATH);
2894
2895 if (kmem_cache_debug(s) &&
2896 !free_debug_processing(s, page, head, tail, cnt, addr))
2897 return;
2898
2899 do {
2900 if (unlikely(n)) {
2901 spin_unlock_irqrestore(&n->list_lock, flags);
2902 n = NULL;
2903 }
2904 prior = page->freelist;
2905 counters = page->counters;
2906 set_freepointer(s, tail, prior);
2907 new.counters = counters;
2908 was_frozen = new.frozen;
2909 new.inuse -= cnt;
2910 if ((!new.inuse || !prior) && !was_frozen) {
2911
2912 if (kmem_cache_has_cpu_partial(s) && !prior) {
2913
2914 /*
2915 * Slab was on no list before and will be
2916 * partially empty
2917 * We can defer the list move and instead
2918 * freeze it.
2919 */
2920 new.frozen = 1;
2921
2922 } else { /* Needs to be taken off a list */
2923
2924 n = get_node(s, page_to_nid(page));
2925 /*
2926 * Speculatively acquire the list_lock.
2927 * If the cmpxchg does not succeed then we may
2928 * drop the list_lock without any processing.
2929 *
2930 * Otherwise the list_lock will synchronize with
2931 * other processors updating the list of slabs.
2932 */
2933 spin_lock_irqsave(&n->list_lock, flags);
2934
2935 }
2936 }
2937
2938 } while (!cmpxchg_double_slab(s, page,
2939 prior, counters,
2940 head, new.counters,
2941 "__slab_free"));
2942
2943 if (likely(!n)) {
2944
2945 /*
2946 * If we just froze the page then put it onto the
2947 * per cpu partial list.
2948 */
2949 if (new.frozen && !was_frozen) {
2950 put_cpu_partial(s, page, 1);
2951 stat(s, CPU_PARTIAL_FREE);
2952 }
2953 /*
2954 * The list lock was not taken therefore no list
2955 * activity can be necessary.
2956 */
2957 if (was_frozen)
2958 stat(s, FREE_FROZEN);
2959 return;
2960 }
2961
2962 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
2963 goto slab_empty;
2964
2965 /*
2966 * Objects left in the slab. If it was not on the partial list before
2967 * then add it.
2968 */
2969 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
2970 remove_full(s, n, page);
2971 add_partial(n, page, DEACTIVATE_TO_TAIL);
2972 stat(s, FREE_ADD_PARTIAL);
2973 }
2974 spin_unlock_irqrestore(&n->list_lock, flags);
2975 return;
2976
2977 slab_empty:
2978 if (prior) {
2979 /*
2980 * Slab on the partial list.
2981 */
2982 remove_partial(n, page);
2983 stat(s, FREE_REMOVE_PARTIAL);
2984 } else {
2985 /* Slab must be on the full list */
2986 remove_full(s, n, page);
2987 }
2988
2989 spin_unlock_irqrestore(&n->list_lock, flags);
2990 stat(s, FREE_SLAB);
2991 discard_slab(s, page);
2992 }
2993
2994 /*
2995 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
2996 * can perform fastpath freeing without additional function calls.
2997 *
2998 * The fastpath is only possible if we are freeing to the current cpu slab
2999 * of this processor. This typically the case if we have just allocated
3000 * the item before.
3001 *
3002 * If fastpath is not possible then fall back to __slab_free where we deal
3003 * with all sorts of special processing.
3004 *
3005 * Bulk free of a freelist with several objects (all pointing to the
3006 * same page) possible by specifying head and tail ptr, plus objects
3007 * count (cnt). Bulk free indicated by tail pointer being set.
3008 */
do_slab_free(struct kmem_cache * s,struct page * page,void * head,void * tail,int cnt,unsigned long addr)3009 static __always_inline void do_slab_free(struct kmem_cache *s,
3010 struct page *page, void *head, void *tail,
3011 int cnt, unsigned long addr)
3012 {
3013 void *tail_obj = tail ? : head;
3014 struct kmem_cache_cpu *c;
3015 unsigned long tid;
3016 redo:
3017 /*
3018 * Determine the currently cpus per cpu slab.
3019 * The cpu may change afterward. However that does not matter since
3020 * data is retrieved via this pointer. If we are on the same cpu
3021 * during the cmpxchg then the free will succeed.
3022 */
3023 do {
3024 tid = this_cpu_read(s->cpu_slab->tid);
3025 c = raw_cpu_ptr(s->cpu_slab);
3026 } while (IS_ENABLED(CONFIG_PREEMPT) &&
3027 unlikely(tid != READ_ONCE(c->tid)));
3028
3029 /* Same with comment on barrier() in slab_alloc_node() */
3030 barrier();
3031
3032 if (likely(page == c->page)) {
3033 void **freelist = READ_ONCE(c->freelist);
3034
3035 set_freepointer(s, tail_obj, freelist);
3036
3037 if (unlikely(!this_cpu_cmpxchg_double(
3038 s->cpu_slab->freelist, s->cpu_slab->tid,
3039 freelist, tid,
3040 head, next_tid(tid)))) {
3041
3042 note_cmpxchg_failure("slab_free", s, tid);
3043 goto redo;
3044 }
3045 stat(s, FREE_FASTPATH);
3046 } else
3047 __slab_free(s, page, head, tail_obj, cnt, addr);
3048
3049 }
3050
slab_free(struct kmem_cache * s,struct page * page,void * head,void * tail,int cnt,unsigned long addr)3051 static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
3052 void *head, void *tail, int cnt,
3053 unsigned long addr)
3054 {
3055 /*
3056 * With KASAN enabled slab_free_freelist_hook modifies the freelist
3057 * to remove objects, whose reuse must be delayed.
3058 */
3059 if (slab_free_freelist_hook(s, &head, &tail, &cnt))
3060 do_slab_free(s, page, head, tail, cnt, addr);
3061 }
3062
3063 #ifdef CONFIG_KASAN_GENERIC
___cache_free(struct kmem_cache * cache,void * x,unsigned long addr)3064 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
3065 {
3066 do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr);
3067 }
3068 #endif
3069
kmem_cache_free(struct kmem_cache * s,void * x)3070 void kmem_cache_free(struct kmem_cache *s, void *x)
3071 {
3072 s = cache_from_obj(s, x);
3073 if (!s)
3074 return;
3075 slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
3076 trace_kmem_cache_free(_RET_IP_, x);
3077 }
3078 EXPORT_SYMBOL(kmem_cache_free);
3079
3080 struct detached_freelist {
3081 struct page *page;
3082 void *tail;
3083 void *freelist;
3084 int cnt;
3085 struct kmem_cache *s;
3086 };
3087
3088 /*
3089 * This function progressively scans the array with free objects (with
3090 * a limited look ahead) and extract objects belonging to the same
3091 * page. It builds a detached freelist directly within the given
3092 * page/objects. This can happen without any need for
3093 * synchronization, because the objects are owned by running process.
3094 * The freelist is build up as a single linked list in the objects.
3095 * The idea is, that this detached freelist can then be bulk
3096 * transferred to the real freelist(s), but only requiring a single
3097 * synchronization primitive. Look ahead in the array is limited due
3098 * to performance reasons.
3099 */
3100 static inline
build_detached_freelist(struct kmem_cache * s,size_t size,void ** p,struct detached_freelist * df)3101 int build_detached_freelist(struct kmem_cache *s, size_t size,
3102 void **p, struct detached_freelist *df)
3103 {
3104 size_t first_skipped_index = 0;
3105 int lookahead = 3;
3106 void *object;
3107 struct page *page;
3108
3109 /* Always re-init detached_freelist */
3110 df->page = NULL;
3111
3112 do {
3113 object = p[--size];
3114 /* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */
3115 } while (!object && size);
3116
3117 if (!object)
3118 return 0;
3119
3120 page = virt_to_head_page(object);
3121 if (!s) {
3122 /* Handle kalloc'ed objects */
3123 if (unlikely(!PageSlab(page))) {
3124 BUG_ON(!PageCompound(page));
3125 kfree_hook(object);
3126 __free_pages(page, compound_order(page));
3127 p[size] = NULL; /* mark object processed */
3128 return size;
3129 }
3130 /* Derive kmem_cache from object */
3131 df->s = page->slab_cache;
3132 } else {
3133 df->s = cache_from_obj(s, object); /* Support for memcg */
3134 }
3135
3136 /* Start new detached freelist */
3137 df->page = page;
3138 set_freepointer(df->s, object, NULL);
3139 df->tail = object;
3140 df->freelist = object;
3141 p[size] = NULL; /* mark object processed */
3142 df->cnt = 1;
3143
3144 while (size) {
3145 object = p[--size];
3146 if (!object)
3147 continue; /* Skip processed objects */
3148
3149 /* df->page is always set at this point */
3150 if (df->page == virt_to_head_page(object)) {
3151 /* Opportunity build freelist */
3152 set_freepointer(df->s, object, df->freelist);
3153 df->freelist = object;
3154 df->cnt++;
3155 p[size] = NULL; /* mark object processed */
3156
3157 continue;
3158 }
3159
3160 /* Limit look ahead search */
3161 if (!--lookahead)
3162 break;
3163
3164 if (!first_skipped_index)
3165 first_skipped_index = size + 1;
3166 }
3167
3168 return first_skipped_index;
3169 }
3170
3171 /* Note that interrupts must be enabled when calling this function. */
kmem_cache_free_bulk(struct kmem_cache * s,size_t size,void ** p)3172 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
3173 {
3174 if (WARN_ON(!size))
3175 return;
3176
3177 do {
3178 struct detached_freelist df;
3179
3180 size = build_detached_freelist(s, size, p, &df);
3181 if (!df.page)
3182 continue;
3183
3184 slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_);
3185 } while (likely(size));
3186 }
3187 EXPORT_SYMBOL(kmem_cache_free_bulk);
3188
3189 /* Note that interrupts must be enabled when calling this function. */
kmem_cache_alloc_bulk(struct kmem_cache * s,gfp_t flags,size_t size,void ** p)3190 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3191 void **p)
3192 {
3193 struct kmem_cache_cpu *c;
3194 int i;
3195
3196 /* memcg and kmem_cache debug support */
3197 s = slab_pre_alloc_hook(s, flags);
3198 if (unlikely(!s))
3199 return false;
3200 /*
3201 * Drain objects in the per cpu slab, while disabling local
3202 * IRQs, which protects against PREEMPT and interrupts
3203 * handlers invoking normal fastpath.
3204 */
3205 local_irq_disable();
3206 c = this_cpu_ptr(s->cpu_slab);
3207
3208 for (i = 0; i < size; i++) {
3209 void *object = c->freelist;
3210
3211 if (unlikely(!object)) {
3212 /*
3213 * We may have removed an object from c->freelist using
3214 * the fastpath in the previous iteration; in that case,
3215 * c->tid has not been bumped yet.
3216 * Since ___slab_alloc() may reenable interrupts while
3217 * allocating memory, we should bump c->tid now.
3218 */
3219 c->tid = next_tid(c->tid);
3220
3221 /*
3222 * Invoking slow path likely have side-effect
3223 * of re-populating per CPU c->freelist
3224 */
3225 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
3226 _RET_IP_, c);
3227 if (unlikely(!p[i]))
3228 goto error;
3229
3230 c = this_cpu_ptr(s->cpu_slab);
3231 maybe_wipe_obj_freeptr(s, p[i]);
3232
3233 continue; /* goto for-loop */
3234 }
3235 c->freelist = get_freepointer(s, object);
3236 p[i] = object;
3237 maybe_wipe_obj_freeptr(s, p[i]);
3238 }
3239 c->tid = next_tid(c->tid);
3240 local_irq_enable();
3241
3242 /* Clear memory outside IRQ disabled fastpath loop */
3243 if (unlikely(slab_want_init_on_alloc(flags, s))) {
3244 int j;
3245
3246 for (j = 0; j < i; j++)
3247 memset(p[j], 0, s->object_size);
3248 }
3249
3250 /* memcg and kmem_cache debug support */
3251 slab_post_alloc_hook(s, flags, size, p);
3252 return i;
3253 error:
3254 local_irq_enable();
3255 slab_post_alloc_hook(s, flags, i, p);
3256 __kmem_cache_free_bulk(s, i, p);
3257 return 0;
3258 }
3259 EXPORT_SYMBOL(kmem_cache_alloc_bulk);
3260
3261
3262 /*
3263 * Object placement in a slab is made very easy because we always start at
3264 * offset 0. If we tune the size of the object to the alignment then we can
3265 * get the required alignment by putting one properly sized object after
3266 * another.
3267 *
3268 * Notice that the allocation order determines the sizes of the per cpu
3269 * caches. Each processor has always one slab available for allocations.
3270 * Increasing the allocation order reduces the number of times that slabs
3271 * must be moved on and off the partial lists and is therefore a factor in
3272 * locking overhead.
3273 */
3274
3275 /*
3276 * Mininum / Maximum order of slab pages. This influences locking overhead
3277 * and slab fragmentation. A higher order reduces the number of partial slabs
3278 * and increases the number of allocations possible without having to
3279 * take the list_lock.
3280 */
3281 static unsigned int slub_min_order;
3282 static unsigned int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
3283 static unsigned int slub_min_objects;
3284
3285 /*
3286 * Calculate the order of allocation given an slab object size.
3287 *
3288 * The order of allocation has significant impact on performance and other
3289 * system components. Generally order 0 allocations should be preferred since
3290 * order 0 does not cause fragmentation in the page allocator. Larger objects
3291 * be problematic to put into order 0 slabs because there may be too much
3292 * unused space left. We go to a higher order if more than 1/16th of the slab
3293 * would be wasted.
3294 *
3295 * In order to reach satisfactory performance we must ensure that a minimum
3296 * number of objects is in one slab. Otherwise we may generate too much
3297 * activity on the partial lists which requires taking the list_lock. This is
3298 * less a concern for large slabs though which are rarely used.
3299 *
3300 * slub_max_order specifies the order where we begin to stop considering the
3301 * number of objects in a slab as critical. If we reach slub_max_order then
3302 * we try to keep the page order as low as possible. So we accept more waste
3303 * of space in favor of a small page order.
3304 *
3305 * Higher order allocations also allow the placement of more objects in a
3306 * slab and thereby reduce object handling overhead. If the user has
3307 * requested a higher mininum order then we start with that one instead of
3308 * the smallest order which will fit the object.
3309 */
slab_order(unsigned int size,unsigned int min_objects,unsigned int max_order,unsigned int fract_leftover)3310 static inline unsigned int slab_order(unsigned int size,
3311 unsigned int min_objects, unsigned int max_order,
3312 unsigned int fract_leftover)
3313 {
3314 unsigned int min_order = slub_min_order;
3315 unsigned int order;
3316
3317 if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE)
3318 return get_order(size * MAX_OBJS_PER_PAGE) - 1;
3319
3320 for (order = max(min_order, (unsigned int)get_order(min_objects * size));
3321 order <= max_order; order++) {
3322
3323 unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
3324 unsigned int rem;
3325
3326 rem = slab_size % size;
3327
3328 if (rem <= slab_size / fract_leftover)
3329 break;
3330 }
3331
3332 return order;
3333 }
3334
calculate_order(unsigned int size)3335 static inline int calculate_order(unsigned int size)
3336 {
3337 unsigned int order;
3338 unsigned int min_objects;
3339 unsigned int max_objects;
3340
3341 /*
3342 * Attempt to find best configuration for a slab. This
3343 * works by first attempting to generate a layout with
3344 * the best configuration and backing off gradually.
3345 *
3346 * First we increase the acceptable waste in a slab. Then
3347 * we reduce the minimum objects required in a slab.
3348 */
3349 min_objects = slub_min_objects;
3350 if (!min_objects)
3351 min_objects = 4 * (fls(nr_cpu_ids) + 1);
3352 max_objects = order_objects(slub_max_order, size);
3353 min_objects = min(min_objects, max_objects);
3354
3355 while (min_objects > 1) {
3356 unsigned int fraction;
3357
3358 fraction = 16;
3359 while (fraction >= 4) {
3360 order = slab_order(size, min_objects,
3361 slub_max_order, fraction);
3362 if (order <= slub_max_order)
3363 return order;
3364 fraction /= 2;
3365 }
3366 min_objects--;
3367 }
3368
3369 /*
3370 * We were unable to place multiple objects in a slab. Now
3371 * lets see if we can place a single object there.
3372 */
3373 order = slab_order(size, 1, slub_max_order, 1);
3374 if (order <= slub_max_order)
3375 return order;
3376
3377 /*
3378 * Doh this slab cannot be placed using slub_max_order.
3379 */
3380 order = slab_order(size, 1, MAX_ORDER, 1);
3381 if (order < MAX_ORDER)
3382 return order;
3383 return -ENOSYS;
3384 }
3385
3386 static void
init_kmem_cache_node(struct kmem_cache_node * n)3387 init_kmem_cache_node(struct kmem_cache_node *n)
3388 {
3389 n->nr_partial = 0;
3390 spin_lock_init(&n->list_lock);
3391 INIT_LIST_HEAD(&n->partial);
3392 #ifdef CONFIG_SLUB_DEBUG
3393 atomic_long_set(&n->nr_slabs, 0);
3394 atomic_long_set(&n->total_objects, 0);
3395 INIT_LIST_HEAD(&n->full);
3396 #endif
3397 }
3398
alloc_kmem_cache_cpus(struct kmem_cache * s)3399 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
3400 {
3401 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
3402 KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
3403
3404 /*
3405 * Must align to double word boundary for the double cmpxchg
3406 * instructions to work; see __pcpu_double_call_return_bool().
3407 */
3408 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
3409 2 * sizeof(void *));
3410
3411 if (!s->cpu_slab)
3412 return 0;
3413
3414 init_kmem_cache_cpus(s);
3415
3416 return 1;
3417 }
3418
3419 static struct kmem_cache *kmem_cache_node;
3420
3421 /*
3422 * No kmalloc_node yet so do it by hand. We know that this is the first
3423 * slab on the node for this slabcache. There are no concurrent accesses
3424 * possible.
3425 *
3426 * Note that this function only works on the kmem_cache_node
3427 * when allocating for the kmem_cache_node. This is used for bootstrapping
3428 * memory on a fresh node that has no slab structures yet.
3429 */
early_kmem_cache_node_alloc(int node)3430 static void early_kmem_cache_node_alloc(int node)
3431 {
3432 struct page *page;
3433 struct kmem_cache_node *n;
3434
3435 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
3436
3437 page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
3438
3439 BUG_ON(!page);
3440 if (page_to_nid(page) != node) {
3441 pr_err("SLUB: Unable to allocate memory from node %d\n", node);
3442 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
3443 }
3444
3445 n = page->freelist;
3446 BUG_ON(!n);
3447 #ifdef CONFIG_SLUB_DEBUG
3448 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
3449 init_tracking(kmem_cache_node, n);
3450 #endif
3451 n = kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
3452 GFP_KERNEL);
3453 page->freelist = get_freepointer(kmem_cache_node, n);
3454 page->inuse = 1;
3455 page->frozen = 0;
3456 kmem_cache_node->node[node] = n;
3457 init_kmem_cache_node(n);
3458 inc_slabs_node(kmem_cache_node, node, page->objects);
3459
3460 /*
3461 * No locks need to be taken here as it has just been
3462 * initialized and there is no concurrent access.
3463 */
3464 __add_partial(n, page, DEACTIVATE_TO_HEAD);
3465 }
3466
free_kmem_cache_nodes(struct kmem_cache * s)3467 static void free_kmem_cache_nodes(struct kmem_cache *s)
3468 {
3469 int node;
3470 struct kmem_cache_node *n;
3471
3472 for_each_kmem_cache_node(s, node, n) {
3473 s->node[node] = NULL;
3474 kmem_cache_free(kmem_cache_node, n);
3475 }
3476 }
3477
__kmem_cache_release(struct kmem_cache * s)3478 void __kmem_cache_release(struct kmem_cache *s)
3479 {
3480 cache_random_seq_destroy(s);
3481 free_percpu(s->cpu_slab);
3482 free_kmem_cache_nodes(s);
3483 }
3484
init_kmem_cache_nodes(struct kmem_cache * s)3485 static int init_kmem_cache_nodes(struct kmem_cache *s)
3486 {
3487 int node;
3488
3489 for_each_node_state(node, N_NORMAL_MEMORY) {
3490 struct kmem_cache_node *n;
3491
3492 if (slab_state == DOWN) {
3493 early_kmem_cache_node_alloc(node);
3494 continue;
3495 }
3496 n = kmem_cache_alloc_node(kmem_cache_node,
3497 GFP_KERNEL, node);
3498
3499 if (!n) {
3500 free_kmem_cache_nodes(s);
3501 return 0;
3502 }
3503
3504 init_kmem_cache_node(n);
3505 s->node[node] = n;
3506 }
3507 return 1;
3508 }
3509
set_min_partial(struct kmem_cache * s,unsigned long min)3510 static void set_min_partial(struct kmem_cache *s, unsigned long min)
3511 {
3512 if (min < MIN_PARTIAL)
3513 min = MIN_PARTIAL;
3514 else if (min > MAX_PARTIAL)
3515 min = MAX_PARTIAL;
3516 s->min_partial = min;
3517 }
3518
set_cpu_partial(struct kmem_cache * s)3519 static void set_cpu_partial(struct kmem_cache *s)
3520 {
3521 #ifdef CONFIG_SLUB_CPU_PARTIAL
3522 /*
3523 * cpu_partial determined the maximum number of objects kept in the
3524 * per cpu partial lists of a processor.
3525 *
3526 * Per cpu partial lists mainly contain slabs that just have one
3527 * object freed. If they are used for allocation then they can be
3528 * filled up again with minimal effort. The slab will never hit the
3529 * per node partial lists and therefore no locking will be required.
3530 *
3531 * This setting also determines
3532 *
3533 * A) The number of objects from per cpu partial slabs dumped to the
3534 * per node list when we reach the limit.
3535 * B) The number of objects in cpu partial slabs to extract from the
3536 * per node list when we run out of per cpu objects. We only fetch
3537 * 50% to keep some capacity around for frees.
3538 */
3539 if (!kmem_cache_has_cpu_partial(s))
3540 s->cpu_partial = 0;
3541 else if (s->size >= PAGE_SIZE)
3542 s->cpu_partial = 2;
3543 else if (s->size >= 1024)
3544 s->cpu_partial = 6;
3545 else if (s->size >= 256)
3546 s->cpu_partial = 13;
3547 else
3548 s->cpu_partial = 30;
3549 #endif
3550 }
3551
3552 /*
3553 * calculate_sizes() determines the order and the distribution of data within
3554 * a slab object.
3555 */
calculate_sizes(struct kmem_cache * s,int forced_order)3556 static int calculate_sizes(struct kmem_cache *s, int forced_order)
3557 {
3558 slab_flags_t flags = s->flags;
3559 unsigned int size = s->object_size;
3560 unsigned int order;
3561
3562 /*
3563 * Round up object size to the next word boundary. We can only
3564 * place the free pointer at word boundaries and this determines
3565 * the possible location of the free pointer.
3566 */
3567 size = ALIGN(size, sizeof(void *));
3568
3569 #ifdef CONFIG_SLUB_DEBUG
3570 /*
3571 * Determine if we can poison the object itself. If the user of
3572 * the slab may touch the object after free or before allocation
3573 * then we should never poison the object itself.
3574 */
3575 if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) &&
3576 !s->ctor)
3577 s->flags |= __OBJECT_POISON;
3578 else
3579 s->flags &= ~__OBJECT_POISON;
3580
3581
3582 /*
3583 * If we are Redzoning then check if there is some space between the
3584 * end of the object and the free pointer. If not then add an
3585 * additional word to have some bytes to store Redzone information.
3586 */
3587 if ((flags & SLAB_RED_ZONE) && size == s->object_size)
3588 size += sizeof(void *);
3589 #endif
3590
3591 /*
3592 * With that we have determined the number of bytes in actual use
3593 * by the object. This is the potential offset to the free pointer.
3594 */
3595 s->inuse = size;
3596
3597 if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
3598 ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) ||
3599 s->ctor) {
3600 /*
3601 * Relocate free pointer after the object if it is not
3602 * permitted to overwrite the first word of the object on
3603 * kmem_cache_free.
3604 *
3605 * This is the case if we do RCU, have a constructor or
3606 * destructor, are poisoning the objects, or are
3607 * redzoning an object smaller than sizeof(void *).
3608 *
3609 * The assumption that s->offset >= s->inuse means free
3610 * pointer is outside of the object is used in the
3611 * freeptr_outside_object() function. If that is no
3612 * longer true, the function needs to be modified.
3613 */
3614 s->offset = size;
3615 size += sizeof(void *);
3616 }
3617
3618 #ifdef CONFIG_SLUB_DEBUG
3619 if (flags & SLAB_STORE_USER)
3620 /*
3621 * Need to store information about allocs and frees after
3622 * the object.
3623 */
3624 size += 2 * sizeof(struct track);
3625 #endif
3626
3627 kasan_cache_create(s, &size, &s->flags);
3628 #ifdef CONFIG_SLUB_DEBUG
3629 if (flags & SLAB_RED_ZONE) {
3630 /*
3631 * Add some empty padding so that we can catch
3632 * overwrites from earlier objects rather than let
3633 * tracking information or the free pointer be
3634 * corrupted if a user writes before the start
3635 * of the object.
3636 */
3637 size += sizeof(void *);
3638
3639 s->red_left_pad = sizeof(void *);
3640 s->red_left_pad = ALIGN(s->red_left_pad, s->align);
3641 size += s->red_left_pad;
3642 }
3643 #endif
3644
3645 /*
3646 * SLUB stores one object immediately after another beginning from
3647 * offset 0. In order to align the objects we have to simply size
3648 * each object to conform to the alignment.
3649 */
3650 size = ALIGN(size, s->align);
3651 s->size = size;
3652 if (forced_order >= 0)
3653 order = forced_order;
3654 else
3655 order = calculate_order(size);
3656
3657 if ((int)order < 0)
3658 return 0;
3659
3660 s->allocflags = 0;
3661 if (order)
3662 s->allocflags |= __GFP_COMP;
3663
3664 if (s->flags & SLAB_CACHE_DMA)
3665 s->allocflags |= GFP_DMA;
3666
3667 if (s->flags & SLAB_CACHE_DMA32)
3668 s->allocflags |= GFP_DMA32;
3669
3670 if (s->flags & SLAB_RECLAIM_ACCOUNT)
3671 s->allocflags |= __GFP_RECLAIMABLE;
3672
3673 /*
3674 * Determine the number of objects per slab
3675 */
3676 s->oo = oo_make(order, size);
3677 s->min = oo_make(get_order(size), size);
3678 if (oo_objects(s->oo) > oo_objects(s->max))
3679 s->max = s->oo;
3680
3681 return !!oo_objects(s->oo);
3682 }
3683
kmem_cache_open(struct kmem_cache * s,slab_flags_t flags)3684 static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
3685 {
3686 s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
3687 #ifdef CONFIG_SLAB_FREELIST_HARDENED
3688 s->random = get_random_long();
3689 #endif
3690
3691 if (!calculate_sizes(s, -1))
3692 goto error;
3693 if (disable_higher_order_debug) {
3694 /*
3695 * Disable debugging flags that store metadata if the min slab
3696 * order increased.
3697 */
3698 if (get_order(s->size) > get_order(s->object_size)) {
3699 s->flags &= ~DEBUG_METADATA_FLAGS;
3700 s->offset = 0;
3701 if (!calculate_sizes(s, -1))
3702 goto error;
3703 }
3704 }
3705
3706 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
3707 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
3708 if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0)
3709 /* Enable fast mode */
3710 s->flags |= __CMPXCHG_DOUBLE;
3711 #endif
3712
3713 /*
3714 * The larger the object size is, the more pages we want on the partial
3715 * list to avoid pounding the page allocator excessively.
3716 */
3717 set_min_partial(s, ilog2(s->size) / 2);
3718
3719 set_cpu_partial(s);
3720
3721 #ifdef CONFIG_NUMA
3722 s->remote_node_defrag_ratio = 1000;
3723 #endif
3724
3725 /* Initialize the pre-computed randomized freelist if slab is up */
3726 if (slab_state >= UP) {
3727 if (init_cache_random_seq(s))
3728 goto error;
3729 }
3730
3731 if (!init_kmem_cache_nodes(s))
3732 goto error;
3733
3734 if (alloc_kmem_cache_cpus(s))
3735 return 0;
3736
3737 error:
3738 __kmem_cache_release(s);
3739 return -EINVAL;
3740 }
3741
list_slab_objects(struct kmem_cache * s,struct page * page,const char * text)3742 static void list_slab_objects(struct kmem_cache *s, struct page *page,
3743 const char *text)
3744 {
3745 #ifdef CONFIG_SLUB_DEBUG
3746 void *addr = page_address(page);
3747 void *p;
3748 unsigned long *map = bitmap_zalloc(page->objects, GFP_ATOMIC);
3749 if (!map)
3750 return;
3751 slab_err(s, page, text, s->name);
3752 slab_lock(page);
3753
3754 get_map(s, page, map);
3755 for_each_object(p, s, addr, page->objects) {
3756
3757 if (!test_bit(slab_index(p, s, addr), map)) {
3758 pr_err("INFO: Object 0x%p @offset=%tu\n", p, p - addr);
3759 print_tracking(s, p);
3760 }
3761 }
3762 slab_unlock(page);
3763 bitmap_free(map);
3764 #endif
3765 }
3766
3767 /*
3768 * Attempt to free all partial slabs on a node.
3769 * This is called from __kmem_cache_shutdown(). We must take list_lock
3770 * because sysfs file might still access partial list after the shutdowning.
3771 */
free_partial(struct kmem_cache * s,struct kmem_cache_node * n)3772 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
3773 {
3774 LIST_HEAD(discard);
3775 struct page *page, *h;
3776
3777 BUG_ON(irqs_disabled());
3778 spin_lock_irq(&n->list_lock);
3779 list_for_each_entry_safe(page, h, &n->partial, slab_list) {
3780 if (!page->inuse) {
3781 remove_partial(n, page);
3782 list_add(&page->slab_list, &discard);
3783 } else {
3784 list_slab_objects(s, page,
3785 "Objects remaining in %s on __kmem_cache_shutdown()");
3786 }
3787 }
3788 spin_unlock_irq(&n->list_lock);
3789
3790 list_for_each_entry_safe(page, h, &discard, slab_list)
3791 discard_slab(s, page);
3792 }
3793
__kmem_cache_empty(struct kmem_cache * s)3794 bool __kmem_cache_empty(struct kmem_cache *s)
3795 {
3796 int node;
3797 struct kmem_cache_node *n;
3798
3799 for_each_kmem_cache_node(s, node, n)
3800 if (n->nr_partial || slabs_node(s, node))
3801 return false;
3802 return true;
3803 }
3804
3805 /*
3806 * Release all resources used by a slab cache.
3807 */
__kmem_cache_shutdown(struct kmem_cache * s)3808 int __kmem_cache_shutdown(struct kmem_cache *s)
3809 {
3810 int node;
3811 struct kmem_cache_node *n;
3812
3813 flush_all(s);
3814 /* Attempt to free all objects */
3815 for_each_kmem_cache_node(s, node, n) {
3816 free_partial(s, n);
3817 if (n->nr_partial || slabs_node(s, node))
3818 return 1;
3819 }
3820 sysfs_slab_remove(s);
3821 return 0;
3822 }
3823
3824 /********************************************************************
3825 * Kmalloc subsystem
3826 *******************************************************************/
3827
setup_slub_min_order(char * str)3828 static int __init setup_slub_min_order(char *str)
3829 {
3830 get_option(&str, (int *)&slub_min_order);
3831
3832 return 1;
3833 }
3834
3835 __setup("slub_min_order=", setup_slub_min_order);
3836
setup_slub_max_order(char * str)3837 static int __init setup_slub_max_order(char *str)
3838 {
3839 get_option(&str, (int *)&slub_max_order);
3840 slub_max_order = min(slub_max_order, (unsigned int)MAX_ORDER - 1);
3841
3842 return 1;
3843 }
3844
3845 __setup("slub_max_order=", setup_slub_max_order);
3846
setup_slub_min_objects(char * str)3847 static int __init setup_slub_min_objects(char *str)
3848 {
3849 get_option(&str, (int *)&slub_min_objects);
3850
3851 return 1;
3852 }
3853
3854 __setup("slub_min_objects=", setup_slub_min_objects);
3855
__kmalloc(size_t size,gfp_t flags)3856 void *__kmalloc(size_t size, gfp_t flags)
3857 {
3858 struct kmem_cache *s;
3859 void *ret;
3860
3861 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3862 return kmalloc_large(size, flags);
3863
3864 s = kmalloc_slab(size, flags);
3865
3866 if (unlikely(ZERO_OR_NULL_PTR(s)))
3867 return s;
3868
3869 ret = slab_alloc(s, flags, _RET_IP_);
3870
3871 trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
3872
3873 ret = kasan_kmalloc(s, ret, size, flags);
3874
3875 return ret;
3876 }
3877 EXPORT_SYMBOL(__kmalloc);
3878
3879 #ifdef CONFIG_NUMA
kmalloc_large_node(size_t size,gfp_t flags,int node)3880 static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3881 {
3882 struct page *page;
3883 void *ptr = NULL;
3884 unsigned int order = get_order(size);
3885
3886 flags |= __GFP_COMP;
3887 page = alloc_pages_node(node, flags, order);
3888 if (page) {
3889 ptr = page_address(page);
3890 mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
3891 1 << order);
3892 }
3893
3894 return kmalloc_large_node_hook(ptr, size, flags);
3895 }
3896
__kmalloc_node(size_t size,gfp_t flags,int node)3897 void *__kmalloc_node(size_t size, gfp_t flags, int node)
3898 {
3899 struct kmem_cache *s;
3900 void *ret;
3901
3902 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
3903 ret = kmalloc_large_node(size, flags, node);
3904
3905 trace_kmalloc_node(_RET_IP_, ret,
3906 size, PAGE_SIZE << get_order(size),
3907 flags, node);
3908
3909 return ret;
3910 }
3911
3912 s = kmalloc_slab(size, flags);
3913
3914 if (unlikely(ZERO_OR_NULL_PTR(s)))
3915 return s;
3916
3917 ret = slab_alloc_node(s, flags, node, _RET_IP_);
3918
3919 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
3920
3921 ret = kasan_kmalloc(s, ret, size, flags);
3922
3923 return ret;
3924 }
3925 EXPORT_SYMBOL(__kmalloc_node);
3926 #endif /* CONFIG_NUMA */
3927
3928 #ifdef CONFIG_HARDENED_USERCOPY
3929 /*
3930 * Rejects incorrectly sized objects and objects that are to be copied
3931 * to/from userspace but do not fall entirely within the containing slab
3932 * cache's usercopy region.
3933 *
3934 * Returns NULL if check passes, otherwise const char * to name of cache
3935 * to indicate an error.
3936 */
__check_heap_object(const void * ptr,unsigned long n,struct page * page,bool to_user)3937 void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
3938 bool to_user)
3939 {
3940 struct kmem_cache *s;
3941 unsigned int offset;
3942 size_t object_size;
3943
3944 ptr = kasan_reset_tag(ptr);
3945
3946 /* Find object and usable object size. */
3947 s = page->slab_cache;
3948
3949 /* Reject impossible pointers. */
3950 if (ptr < page_address(page))
3951 usercopy_abort("SLUB object not in SLUB page?!", NULL,
3952 to_user, 0, n);
3953
3954 /* Find offset within object. */
3955 offset = (ptr - page_address(page)) % s->size;
3956
3957 /* Adjust for redzone and reject if within the redzone. */
3958 if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
3959 if (offset < s->red_left_pad)
3960 usercopy_abort("SLUB object in left red zone",
3961 s->name, to_user, offset, n);
3962 offset -= s->red_left_pad;
3963 }
3964
3965 /* Allow address range falling entirely within usercopy region. */
3966 if (offset >= s->useroffset &&
3967 offset - s->useroffset <= s->usersize &&
3968 n <= s->useroffset - offset + s->usersize)
3969 return;
3970
3971 /*
3972 * If the copy is still within the allocated object, produce
3973 * a warning instead of rejecting the copy. This is intended
3974 * to be a temporary method to find any missing usercopy
3975 * whitelists.
3976 */
3977 object_size = slab_ksize(s);
3978 if (usercopy_fallback &&
3979 offset <= object_size && n <= object_size - offset) {
3980 usercopy_warn("SLUB object", s->name, to_user, offset, n);
3981 return;
3982 }
3983
3984 usercopy_abort("SLUB object", s->name, to_user, offset, n);
3985 }
3986 #endif /* CONFIG_HARDENED_USERCOPY */
3987
__ksize(const void * object)3988 size_t __ksize(const void *object)
3989 {
3990 struct page *page;
3991
3992 if (unlikely(object == ZERO_SIZE_PTR))
3993 return 0;
3994
3995 page = virt_to_head_page(object);
3996
3997 if (unlikely(!PageSlab(page))) {
3998 WARN_ON(!PageCompound(page));
3999 return page_size(page);
4000 }
4001
4002 return slab_ksize(page->slab_cache);
4003 }
4004 EXPORT_SYMBOL(__ksize);
4005
kfree(const void * x)4006 void kfree(const void *x)
4007 {
4008 struct page *page;
4009 void *object = (void *)x;
4010
4011 trace_kfree(_RET_IP_, x);
4012
4013 if (unlikely(ZERO_OR_NULL_PTR(x)))
4014 return;
4015
4016 page = virt_to_head_page(x);
4017 if (unlikely(!PageSlab(page))) {
4018 unsigned int order = compound_order(page);
4019
4020 BUG_ON(!PageCompound(page));
4021 kfree_hook(object);
4022 mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
4023 -(1 << order));
4024 __free_pages(page, order);
4025 return;
4026 }
4027 slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
4028 }
4029 EXPORT_SYMBOL(kfree);
4030
4031 #define SHRINK_PROMOTE_MAX 32
4032
4033 /*
4034 * kmem_cache_shrink discards empty slabs and promotes the slabs filled
4035 * up most to the head of the partial lists. New allocations will then
4036 * fill those up and thus they can be removed from the partial lists.
4037 *
4038 * The slabs with the least items are placed last. This results in them
4039 * being allocated from last increasing the chance that the last objects
4040 * are freed in them.
4041 */
__kmem_cache_shrink(struct kmem_cache * s)4042 int __kmem_cache_shrink(struct kmem_cache *s)
4043 {
4044 int node;
4045 int i;
4046 struct kmem_cache_node *n;
4047 struct page *page;
4048 struct page *t;
4049 struct list_head discard;
4050 struct list_head promote[SHRINK_PROMOTE_MAX];
4051 unsigned long flags;
4052 int ret = 0;
4053
4054 flush_all(s);
4055 for_each_kmem_cache_node(s, node, n) {
4056 INIT_LIST_HEAD(&discard);
4057 for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
4058 INIT_LIST_HEAD(promote + i);
4059
4060 spin_lock_irqsave(&n->list_lock, flags);
4061
4062 /*
4063 * Build lists of slabs to discard or promote.
4064 *
4065 * Note that concurrent frees may occur while we hold the
4066 * list_lock. page->inuse here is the upper limit.
4067 */
4068 list_for_each_entry_safe(page, t, &n->partial, slab_list) {
4069 int free = page->objects - page->inuse;
4070
4071 /* Do not reread page->inuse */
4072 barrier();
4073
4074 /* We do not keep full slabs on the list */
4075 BUG_ON(free <= 0);
4076
4077 if (free == page->objects) {
4078 list_move(&page->slab_list, &discard);
4079 n->nr_partial--;
4080 } else if (free <= SHRINK_PROMOTE_MAX)
4081 list_move(&page->slab_list, promote + free - 1);
4082 }
4083
4084 /*
4085 * Promote the slabs filled up most to the head of the
4086 * partial list.
4087 */
4088 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
4089 list_splice(promote + i, &n->partial);
4090
4091 spin_unlock_irqrestore(&n->list_lock, flags);
4092
4093 /* Release empty slabs */
4094 list_for_each_entry_safe(page, t, &discard, slab_list)
4095 discard_slab(s, page);
4096
4097 if (slabs_node(s, node))
4098 ret = 1;
4099 }
4100
4101 return ret;
4102 }
4103
4104 #ifdef CONFIG_MEMCG
__kmemcg_cache_deactivate_after_rcu(struct kmem_cache * s)4105 void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s)
4106 {
4107 /*
4108 * Called with all the locks held after a sched RCU grace period.
4109 * Even if @s becomes empty after shrinking, we can't know that @s
4110 * doesn't have allocations already in-flight and thus can't
4111 * destroy @s until the associated memcg is released.
4112 *
4113 * However, let's remove the sysfs files for empty caches here.
4114 * Each cache has a lot of interface files which aren't
4115 * particularly useful for empty draining caches; otherwise, we can
4116 * easily end up with millions of unnecessary sysfs files on
4117 * systems which have a lot of memory and transient cgroups.
4118 */
4119 if (!__kmem_cache_shrink(s))
4120 sysfs_slab_remove(s);
4121 }
4122
__kmemcg_cache_deactivate(struct kmem_cache * s)4123 void __kmemcg_cache_deactivate(struct kmem_cache *s)
4124 {
4125 /*
4126 * Disable empty slabs caching. Used to avoid pinning offline
4127 * memory cgroups by kmem pages that can be freed.
4128 */
4129 slub_set_cpu_partial(s, 0);
4130 s->min_partial = 0;
4131 }
4132 #endif /* CONFIG_MEMCG */
4133
slab_mem_going_offline_callback(void * arg)4134 static int slab_mem_going_offline_callback(void *arg)
4135 {
4136 struct kmem_cache *s;
4137
4138 mutex_lock(&slab_mutex);
4139 list_for_each_entry(s, &slab_caches, list)
4140 __kmem_cache_shrink(s);
4141 mutex_unlock(&slab_mutex);
4142
4143 return 0;
4144 }
4145
slab_mem_offline_callback(void * arg)4146 static void slab_mem_offline_callback(void *arg)
4147 {
4148 struct kmem_cache_node *n;
4149 struct kmem_cache *s;
4150 struct memory_notify *marg = arg;
4151 int offline_node;
4152
4153 offline_node = marg->status_change_nid_normal;
4154
4155 /*
4156 * If the node still has available memory. we need kmem_cache_node
4157 * for it yet.
4158 */
4159 if (offline_node < 0)
4160 return;
4161
4162 mutex_lock(&slab_mutex);
4163 list_for_each_entry(s, &slab_caches, list) {
4164 n = get_node(s, offline_node);
4165 if (n) {
4166 /*
4167 * if n->nr_slabs > 0, slabs still exist on the node
4168 * that is going down. We were unable to free them,
4169 * and offline_pages() function shouldn't call this
4170 * callback. So, we must fail.
4171 */
4172 BUG_ON(slabs_node(s, offline_node));
4173
4174 s->node[offline_node] = NULL;
4175 kmem_cache_free(kmem_cache_node, n);
4176 }
4177 }
4178 mutex_unlock(&slab_mutex);
4179 }
4180
slab_mem_going_online_callback(void * arg)4181 static int slab_mem_going_online_callback(void *arg)
4182 {
4183 struct kmem_cache_node *n;
4184 struct kmem_cache *s;
4185 struct memory_notify *marg = arg;
4186 int nid = marg->status_change_nid_normal;
4187 int ret = 0;
4188
4189 /*
4190 * If the node's memory is already available, then kmem_cache_node is
4191 * already created. Nothing to do.
4192 */
4193 if (nid < 0)
4194 return 0;
4195
4196 /*
4197 * We are bringing a node online. No memory is available yet. We must
4198 * allocate a kmem_cache_node structure in order to bring the node
4199 * online.
4200 */
4201 mutex_lock(&slab_mutex);
4202 list_for_each_entry(s, &slab_caches, list) {
4203 /*
4204 * XXX: kmem_cache_alloc_node will fallback to other nodes
4205 * since memory is not yet available from the node that
4206 * is brought up.
4207 */
4208 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
4209 if (!n) {
4210 ret = -ENOMEM;
4211 goto out;
4212 }
4213 init_kmem_cache_node(n);
4214 s->node[nid] = n;
4215 }
4216 out:
4217 mutex_unlock(&slab_mutex);
4218 return ret;
4219 }
4220
slab_memory_callback(struct notifier_block * self,unsigned long action,void * arg)4221 static int slab_memory_callback(struct notifier_block *self,
4222 unsigned long action, void *arg)
4223 {
4224 int ret = 0;
4225
4226 switch (action) {
4227 case MEM_GOING_ONLINE:
4228 ret = slab_mem_going_online_callback(arg);
4229 break;
4230 case MEM_GOING_OFFLINE:
4231 ret = slab_mem_going_offline_callback(arg);
4232 break;
4233 case MEM_OFFLINE:
4234 case MEM_CANCEL_ONLINE:
4235 slab_mem_offline_callback(arg);
4236 break;
4237 case MEM_ONLINE:
4238 case MEM_CANCEL_OFFLINE:
4239 break;
4240 }
4241 if (ret)
4242 ret = notifier_from_errno(ret);
4243 else
4244 ret = NOTIFY_OK;
4245 return ret;
4246 }
4247
4248 static struct notifier_block slab_memory_callback_nb = {
4249 .notifier_call = slab_memory_callback,
4250 .priority = SLAB_CALLBACK_PRI,
4251 };
4252
4253 /********************************************************************
4254 * Basic setup of slabs
4255 *******************************************************************/
4256
4257 /*
4258 * Used for early kmem_cache structures that were allocated using
4259 * the page allocator. Allocate them properly then fix up the pointers
4260 * that may be pointing to the wrong kmem_cache structure.
4261 */
4262
bootstrap(struct kmem_cache * static_cache)4263 static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
4264 {
4265 int node;
4266 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
4267 struct kmem_cache_node *n;
4268
4269 memcpy(s, static_cache, kmem_cache->object_size);
4270
4271 /*
4272 * This runs very early, and only the boot processor is supposed to be
4273 * up. Even if it weren't true, IRQs are not up so we couldn't fire
4274 * IPIs around.
4275 */
4276 __flush_cpu_slab(s, smp_processor_id());
4277 for_each_kmem_cache_node(s, node, n) {
4278 struct page *p;
4279
4280 list_for_each_entry(p, &n->partial, slab_list)
4281 p->slab_cache = s;
4282
4283 #ifdef CONFIG_SLUB_DEBUG
4284 list_for_each_entry(p, &n->full, slab_list)
4285 p->slab_cache = s;
4286 #endif
4287 }
4288 slab_init_memcg_params(s);
4289 list_add(&s->list, &slab_caches);
4290 memcg_link_cache(s, NULL);
4291 return s;
4292 }
4293
kmem_cache_init(void)4294 void __init kmem_cache_init(void)
4295 {
4296 static __initdata struct kmem_cache boot_kmem_cache,
4297 boot_kmem_cache_node;
4298
4299 if (debug_guardpage_minorder())
4300 slub_max_order = 0;
4301
4302 kmem_cache_node = &boot_kmem_cache_node;
4303 kmem_cache = &boot_kmem_cache;
4304
4305 create_boot_cache(kmem_cache_node, "kmem_cache_node",
4306 sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0);
4307
4308 register_hotmemory_notifier(&slab_memory_callback_nb);
4309
4310 /* Able to allocate the per node structures */
4311 slab_state = PARTIAL;
4312
4313 create_boot_cache(kmem_cache, "kmem_cache",
4314 offsetof(struct kmem_cache, node) +
4315 nr_node_ids * sizeof(struct kmem_cache_node *),
4316 SLAB_HWCACHE_ALIGN, 0, 0);
4317
4318 kmem_cache = bootstrap(&boot_kmem_cache);
4319 kmem_cache_node = bootstrap(&boot_kmem_cache_node);
4320
4321 /* Now we can use the kmem_cache to allocate kmalloc slabs */
4322 setup_kmalloc_cache_index_table();
4323 create_kmalloc_caches(0);
4324
4325 /* Setup random freelists for each cache */
4326 init_freelist_randomization();
4327
4328 cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
4329 slub_cpu_dead);
4330
4331 pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n",
4332 cache_line_size(),
4333 slub_min_order, slub_max_order, slub_min_objects,
4334 nr_cpu_ids, nr_node_ids);
4335 }
4336
kmem_cache_init_late(void)4337 void __init kmem_cache_init_late(void)
4338 {
4339 }
4340
4341 struct kmem_cache *
__kmem_cache_alias(const char * name,unsigned int size,unsigned int align,slab_flags_t flags,void (* ctor)(void *))4342 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
4343 slab_flags_t flags, void (*ctor)(void *))
4344 {
4345 struct kmem_cache *s, *c;
4346
4347 s = find_mergeable(size, align, flags, name, ctor);
4348 if (s) {
4349 s->refcount++;
4350
4351 /*
4352 * Adjust the object sizes so that we clear
4353 * the complete object on kzalloc.
4354 */
4355 s->object_size = max(s->object_size, size);
4356 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *)));
4357
4358 for_each_memcg_cache(c, s) {
4359 c->object_size = s->object_size;
4360 c->inuse = max(c->inuse, ALIGN(size, sizeof(void *)));
4361 }
4362
4363 if (sysfs_slab_alias(s, name)) {
4364 s->refcount--;
4365 s = NULL;
4366 }
4367 }
4368
4369 return s;
4370 }
4371
__kmem_cache_create(struct kmem_cache * s,slab_flags_t flags)4372 int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
4373 {
4374 int err;
4375
4376 err = kmem_cache_open(s, flags);
4377 if (err)
4378 return err;
4379
4380 /* Mutex is not taken during early boot */
4381 if (slab_state <= UP)
4382 return 0;
4383
4384 memcg_propagate_slab_attrs(s);
4385 err = sysfs_slab_add(s);
4386 if (err)
4387 __kmem_cache_release(s);
4388
4389 return err;
4390 }
4391
__kmalloc_track_caller(size_t size,gfp_t gfpflags,unsigned long caller)4392 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
4393 {
4394 struct kmem_cache *s;
4395 void *ret;
4396
4397 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
4398 return kmalloc_large(size, gfpflags);
4399
4400 s = kmalloc_slab(size, gfpflags);
4401
4402 if (unlikely(ZERO_OR_NULL_PTR(s)))
4403 return s;
4404
4405 ret = slab_alloc(s, gfpflags, caller);
4406
4407 /* Honor the call site pointer we received. */
4408 trace_kmalloc(caller, ret, size, s->size, gfpflags);
4409
4410 return ret;
4411 }
4412 EXPORT_SYMBOL(__kmalloc_track_caller);
4413
4414 #ifdef CONFIG_NUMA
__kmalloc_node_track_caller(size_t size,gfp_t gfpflags,int node,unsigned long caller)4415 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
4416 int node, unsigned long caller)
4417 {
4418 struct kmem_cache *s;
4419 void *ret;
4420
4421 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
4422 ret = kmalloc_large_node(size, gfpflags, node);
4423
4424 trace_kmalloc_node(caller, ret,
4425 size, PAGE_SIZE << get_order(size),
4426 gfpflags, node);
4427
4428 return ret;
4429 }
4430
4431 s = kmalloc_slab(size, gfpflags);
4432
4433 if (unlikely(ZERO_OR_NULL_PTR(s)))
4434 return s;
4435
4436 ret = slab_alloc_node(s, gfpflags, node, caller);
4437
4438 /* Honor the call site pointer we received. */
4439 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
4440
4441 return ret;
4442 }
4443 EXPORT_SYMBOL(__kmalloc_node_track_caller);
4444 #endif
4445
4446 #ifdef CONFIG_SYSFS
count_inuse(struct page * page)4447 static int count_inuse(struct page *page)
4448 {
4449 return page->inuse;
4450 }
4451
count_total(struct page * page)4452 static int count_total(struct page *page)
4453 {
4454 return page->objects;
4455 }
4456 #endif
4457
4458 #ifdef CONFIG_SLUB_DEBUG
validate_slab(struct kmem_cache * s,struct page * page,unsigned long * map)4459 static int validate_slab(struct kmem_cache *s, struct page *page,
4460 unsigned long *map)
4461 {
4462 void *p;
4463 void *addr = page_address(page);
4464
4465 if (!check_slab(s, page) ||
4466 !on_freelist(s, page, NULL))
4467 return 0;
4468
4469 /* Now we know that a valid freelist exists */
4470 bitmap_zero(map, page->objects);
4471
4472 get_map(s, page, map);
4473 for_each_object(p, s, addr, page->objects) {
4474 if (test_bit(slab_index(p, s, addr), map))
4475 if (!check_object(s, page, p, SLUB_RED_INACTIVE))
4476 return 0;
4477 }
4478
4479 for_each_object(p, s, addr, page->objects)
4480 if (!test_bit(slab_index(p, s, addr), map))
4481 if (!check_object(s, page, p, SLUB_RED_ACTIVE))
4482 return 0;
4483 return 1;
4484 }
4485
validate_slab_slab(struct kmem_cache * s,struct page * page,unsigned long * map)4486 static void validate_slab_slab(struct kmem_cache *s, struct page *page,
4487 unsigned long *map)
4488 {
4489 slab_lock(page);
4490 validate_slab(s, page, map);
4491 slab_unlock(page);
4492 }
4493
validate_slab_node(struct kmem_cache * s,struct kmem_cache_node * n,unsigned long * map)4494 static int validate_slab_node(struct kmem_cache *s,
4495 struct kmem_cache_node *n, unsigned long *map)
4496 {
4497 unsigned long count = 0;
4498 struct page *page;
4499 unsigned long flags;
4500
4501 spin_lock_irqsave(&n->list_lock, flags);
4502
4503 list_for_each_entry(page, &n->partial, slab_list) {
4504 validate_slab_slab(s, page, map);
4505 count++;
4506 }
4507 if (count != n->nr_partial)
4508 pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
4509 s->name, count, n->nr_partial);
4510
4511 if (!(s->flags & SLAB_STORE_USER))
4512 goto out;
4513
4514 list_for_each_entry(page, &n->full, slab_list) {
4515 validate_slab_slab(s, page, map);
4516 count++;
4517 }
4518 if (count != atomic_long_read(&n->nr_slabs))
4519 pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
4520 s->name, count, atomic_long_read(&n->nr_slabs));
4521
4522 out:
4523 spin_unlock_irqrestore(&n->list_lock, flags);
4524 return count;
4525 }
4526
validate_slab_cache(struct kmem_cache * s)4527 static long validate_slab_cache(struct kmem_cache *s)
4528 {
4529 int node;
4530 unsigned long count = 0;
4531 struct kmem_cache_node *n;
4532 unsigned long *map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL);
4533
4534 if (!map)
4535 return -ENOMEM;
4536
4537 flush_all(s);
4538 for_each_kmem_cache_node(s, node, n)
4539 count += validate_slab_node(s, n, map);
4540 bitmap_free(map);
4541 return count;
4542 }
4543 /*
4544 * Generate lists of code addresses where slabcache objects are allocated
4545 * and freed.
4546 */
4547
4548 struct location {
4549 unsigned long count;
4550 unsigned long addr;
4551 long long sum_time;
4552 long min_time;
4553 long max_time;
4554 long min_pid;
4555 long max_pid;
4556 DECLARE_BITMAP(cpus, NR_CPUS);
4557 nodemask_t nodes;
4558 };
4559
4560 struct loc_track {
4561 unsigned long max;
4562 unsigned long count;
4563 struct location *loc;
4564 };
4565
free_loc_track(struct loc_track * t)4566 static void free_loc_track(struct loc_track *t)
4567 {
4568 if (t->max)
4569 free_pages((unsigned long)t->loc,
4570 get_order(sizeof(struct location) * t->max));
4571 }
4572
alloc_loc_track(struct loc_track * t,unsigned long max,gfp_t flags)4573 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
4574 {
4575 struct location *l;
4576 int order;
4577
4578 order = get_order(sizeof(struct location) * max);
4579
4580 l = (void *)__get_free_pages(flags, order);
4581 if (!l)
4582 return 0;
4583
4584 if (t->count) {
4585 memcpy(l, t->loc, sizeof(struct location) * t->count);
4586 free_loc_track(t);
4587 }
4588 t->max = max;
4589 t->loc = l;
4590 return 1;
4591 }
4592
add_location(struct loc_track * t,struct kmem_cache * s,const struct track * track)4593 static int add_location(struct loc_track *t, struct kmem_cache *s,
4594 const struct track *track)
4595 {
4596 long start, end, pos;
4597 struct location *l;
4598 unsigned long caddr;
4599 unsigned long age = jiffies - track->when;
4600
4601 start = -1;
4602 end = t->count;
4603
4604 for ( ; ; ) {
4605 pos = start + (end - start + 1) / 2;
4606
4607 /*
4608 * There is nothing at "end". If we end up there
4609 * we need to add something to before end.
4610 */
4611 if (pos == end)
4612 break;
4613
4614 caddr = t->loc[pos].addr;
4615 if (track->addr == caddr) {
4616
4617 l = &t->loc[pos];
4618 l->count++;
4619 if (track->when) {
4620 l->sum_time += age;
4621 if (age < l->min_time)
4622 l->min_time = age;
4623 if (age > l->max_time)
4624 l->max_time = age;
4625
4626 if (track->pid < l->min_pid)
4627 l->min_pid = track->pid;
4628 if (track->pid > l->max_pid)
4629 l->max_pid = track->pid;
4630
4631 cpumask_set_cpu(track->cpu,
4632 to_cpumask(l->cpus));
4633 }
4634 node_set(page_to_nid(virt_to_page(track)), l->nodes);
4635 return 1;
4636 }
4637
4638 if (track->addr < caddr)
4639 end = pos;
4640 else
4641 start = pos;
4642 }
4643
4644 /*
4645 * Not found. Insert new tracking element.
4646 */
4647 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
4648 return 0;
4649
4650 l = t->loc + pos;
4651 if (pos < t->count)
4652 memmove(l + 1, l,
4653 (t->count - pos) * sizeof(struct location));
4654 t->count++;
4655 l->count = 1;
4656 l->addr = track->addr;
4657 l->sum_time = age;
4658 l->min_time = age;
4659 l->max_time = age;
4660 l->min_pid = track->pid;
4661 l->max_pid = track->pid;
4662 cpumask_clear(to_cpumask(l->cpus));
4663 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
4664 nodes_clear(l->nodes);
4665 node_set(page_to_nid(virt_to_page(track)), l->nodes);
4666 return 1;
4667 }
4668
process_slab(struct loc_track * t,struct kmem_cache * s,struct page * page,enum track_item alloc,unsigned long * map)4669 static void process_slab(struct loc_track *t, struct kmem_cache *s,
4670 struct page *page, enum track_item alloc,
4671 unsigned long *map)
4672 {
4673 void *addr = page_address(page);
4674 void *p;
4675
4676 bitmap_zero(map, page->objects);
4677 get_map(s, page, map);
4678
4679 for_each_object(p, s, addr, page->objects)
4680 if (!test_bit(slab_index(p, s, addr), map))
4681 add_location(t, s, get_track(s, p, alloc));
4682 }
4683
list_locations(struct kmem_cache * s,char * buf,enum track_item alloc)4684 static int list_locations(struct kmem_cache *s, char *buf,
4685 enum track_item alloc)
4686 {
4687 int len = 0;
4688 unsigned long i;
4689 struct loc_track t = { 0, 0, NULL };
4690 int node;
4691 struct kmem_cache_node *n;
4692 unsigned long *map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL);
4693
4694 if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
4695 GFP_KERNEL)) {
4696 bitmap_free(map);
4697 return sprintf(buf, "Out of memory\n");
4698 }
4699 /* Push back cpu slabs */
4700 flush_all(s);
4701
4702 for_each_kmem_cache_node(s, node, n) {
4703 unsigned long flags;
4704 struct page *page;
4705
4706 if (!atomic_long_read(&n->nr_slabs))
4707 continue;
4708
4709 spin_lock_irqsave(&n->list_lock, flags);
4710 list_for_each_entry(page, &n->partial, slab_list)
4711 process_slab(&t, s, page, alloc, map);
4712 list_for_each_entry(page, &n->full, slab_list)
4713 process_slab(&t, s, page, alloc, map);
4714 spin_unlock_irqrestore(&n->list_lock, flags);
4715 }
4716
4717 for (i = 0; i < t.count; i++) {
4718 struct location *l = &t.loc[i];
4719
4720 if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
4721 break;
4722 len += sprintf(buf + len, "%7ld ", l->count);
4723
4724 if (l->addr)
4725 len += sprintf(buf + len, "%pS", (void *)l->addr);
4726 else
4727 len += sprintf(buf + len, "<not-available>");
4728
4729 if (l->sum_time != l->min_time) {
4730 len += sprintf(buf + len, " age=%ld/%ld/%ld",
4731 l->min_time,
4732 (long)div_u64(l->sum_time, l->count),
4733 l->max_time);
4734 } else
4735 len += sprintf(buf + len, " age=%ld",
4736 l->min_time);
4737
4738 if (l->min_pid != l->max_pid)
4739 len += sprintf(buf + len, " pid=%ld-%ld",
4740 l->min_pid, l->max_pid);
4741 else
4742 len += sprintf(buf + len, " pid=%ld",
4743 l->min_pid);
4744
4745 if (num_online_cpus() > 1 &&
4746 !cpumask_empty(to_cpumask(l->cpus)) &&
4747 len < PAGE_SIZE - 60)
4748 len += scnprintf(buf + len, PAGE_SIZE - len - 50,
4749 " cpus=%*pbl",
4750 cpumask_pr_args(to_cpumask(l->cpus)));
4751
4752 if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
4753 len < PAGE_SIZE - 60)
4754 len += scnprintf(buf + len, PAGE_SIZE - len - 50,
4755 " nodes=%*pbl",
4756 nodemask_pr_args(&l->nodes));
4757
4758 len += sprintf(buf + len, "\n");
4759 }
4760
4761 free_loc_track(&t);
4762 bitmap_free(map);
4763 if (!t.count)
4764 len += sprintf(buf, "No data\n");
4765 return len;
4766 }
4767 #endif /* CONFIG_SLUB_DEBUG */
4768
4769 #ifdef SLUB_RESILIENCY_TEST
resiliency_test(void)4770 static void __init resiliency_test(void)
4771 {
4772 u8 *p;
4773 int type = KMALLOC_NORMAL;
4774
4775 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10);
4776
4777 pr_err("SLUB resiliency testing\n");
4778 pr_err("-----------------------\n");
4779 pr_err("A. Corruption after allocation\n");
4780
4781 p = kzalloc(16, GFP_KERNEL);
4782 p[16] = 0x12;
4783 pr_err("\n1. kmalloc-16: Clobber Redzone/next pointer 0x12->0x%p\n\n",
4784 p + 16);
4785
4786 validate_slab_cache(kmalloc_caches[type][4]);
4787
4788 /* Hmmm... The next two are dangerous */
4789 p = kzalloc(32, GFP_KERNEL);
4790 p[32 + sizeof(void *)] = 0x34;
4791 pr_err("\n2. kmalloc-32: Clobber next pointer/next slab 0x34 -> -0x%p\n",
4792 p);
4793 pr_err("If allocated object is overwritten then not detectable\n\n");
4794
4795 validate_slab_cache(kmalloc_caches[type][5]);
4796 p = kzalloc(64, GFP_KERNEL);
4797 p += 64 + (get_cycles() & 0xff) * sizeof(void *);
4798 *p = 0x56;
4799 pr_err("\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
4800 p);
4801 pr_err("If allocated object is overwritten then not detectable\n\n");
4802 validate_slab_cache(kmalloc_caches[type][6]);
4803
4804 pr_err("\nB. Corruption after free\n");
4805 p = kzalloc(128, GFP_KERNEL);
4806 kfree(p);
4807 *p = 0x78;
4808 pr_err("1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
4809 validate_slab_cache(kmalloc_caches[type][7]);
4810
4811 p = kzalloc(256, GFP_KERNEL);
4812 kfree(p);
4813 p[50] = 0x9a;
4814 pr_err("\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p);
4815 validate_slab_cache(kmalloc_caches[type][8]);
4816
4817 p = kzalloc(512, GFP_KERNEL);
4818 kfree(p);
4819 p[512] = 0xab;
4820 pr_err("\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
4821 validate_slab_cache(kmalloc_caches[type][9]);
4822 }
4823 #else
4824 #ifdef CONFIG_SYSFS
resiliency_test(void)4825 static void resiliency_test(void) {};
4826 #endif
4827 #endif /* SLUB_RESILIENCY_TEST */
4828
4829 #ifdef CONFIG_SYSFS
4830 enum slab_stat_type {
4831 SL_ALL, /* All slabs */
4832 SL_PARTIAL, /* Only partially allocated slabs */
4833 SL_CPU, /* Only slabs used for cpu caches */
4834 SL_OBJECTS, /* Determine allocated objects not slabs */
4835 SL_TOTAL /* Determine object capacity not slabs */
4836 };
4837
4838 #define SO_ALL (1 << SL_ALL)
4839 #define SO_PARTIAL (1 << SL_PARTIAL)
4840 #define SO_CPU (1 << SL_CPU)
4841 #define SO_OBJECTS (1 << SL_OBJECTS)
4842 #define SO_TOTAL (1 << SL_TOTAL)
4843
4844 #ifdef CONFIG_MEMCG
4845 static bool memcg_sysfs_enabled = IS_ENABLED(CONFIG_SLUB_MEMCG_SYSFS_ON);
4846
setup_slub_memcg_sysfs(char * str)4847 static int __init setup_slub_memcg_sysfs(char *str)
4848 {
4849 int v;
4850
4851 if (get_option(&str, &v) > 0)
4852 memcg_sysfs_enabled = v;
4853
4854 return 1;
4855 }
4856
4857 __setup("slub_memcg_sysfs=", setup_slub_memcg_sysfs);
4858 #endif
4859
show_slab_objects(struct kmem_cache * s,char * buf,unsigned long flags)4860 static ssize_t show_slab_objects(struct kmem_cache *s,
4861 char *buf, unsigned long flags)
4862 {
4863 unsigned long total = 0;
4864 int node;
4865 int x;
4866 unsigned long *nodes;
4867
4868 nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
4869 if (!nodes)
4870 return -ENOMEM;
4871
4872 if (flags & SO_CPU) {
4873 int cpu;
4874
4875 for_each_possible_cpu(cpu) {
4876 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
4877 cpu);
4878 int node;
4879 struct page *page;
4880
4881 page = READ_ONCE(c->page);
4882 if (!page)
4883 continue;
4884
4885 node = page_to_nid(page);
4886 if (flags & SO_TOTAL)
4887 x = page->objects;
4888 else if (flags & SO_OBJECTS)
4889 x = page->inuse;
4890 else
4891 x = 1;
4892
4893 total += x;
4894 nodes[node] += x;
4895
4896 page = slub_percpu_partial_read_once(c);
4897 if (page) {
4898 node = page_to_nid(page);
4899 if (flags & SO_TOTAL)
4900 WARN_ON_ONCE(1);
4901 else if (flags & SO_OBJECTS)
4902 WARN_ON_ONCE(1);
4903 else
4904 x = page->pages;
4905 total += x;
4906 nodes[node] += x;
4907 }
4908 }
4909 }
4910
4911 /*
4912 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
4913 * already held which will conflict with an existing lock order:
4914 *
4915 * mem_hotplug_lock->slab_mutex->kernfs_mutex
4916 *
4917 * We don't really need mem_hotplug_lock (to hold off
4918 * slab_mem_going_offline_callback) here because slab's memory hot
4919 * unplug code doesn't destroy the kmem_cache->node[] data.
4920 */
4921
4922 #ifdef CONFIG_SLUB_DEBUG
4923 if (flags & SO_ALL) {
4924 struct kmem_cache_node *n;
4925
4926 for_each_kmem_cache_node(s, node, n) {
4927
4928 if (flags & SO_TOTAL)
4929 x = atomic_long_read(&n->total_objects);
4930 else if (flags & SO_OBJECTS)
4931 x = atomic_long_read(&n->total_objects) -
4932 count_partial(n, count_free);
4933 else
4934 x = atomic_long_read(&n->nr_slabs);
4935 total += x;
4936 nodes[node] += x;
4937 }
4938
4939 } else
4940 #endif
4941 if (flags & SO_PARTIAL) {
4942 struct kmem_cache_node *n;
4943
4944 for_each_kmem_cache_node(s, node, n) {
4945 if (flags & SO_TOTAL)
4946 x = count_partial(n, count_total);
4947 else if (flags & SO_OBJECTS)
4948 x = count_partial(n, count_inuse);
4949 else
4950 x = n->nr_partial;
4951 total += x;
4952 nodes[node] += x;
4953 }
4954 }
4955 x = sprintf(buf, "%lu", total);
4956 #ifdef CONFIG_NUMA
4957 for (node = 0; node < nr_node_ids; node++)
4958 if (nodes[node])
4959 x += sprintf(buf + x, " N%d=%lu",
4960 node, nodes[node]);
4961 #endif
4962 kfree(nodes);
4963 return x + sprintf(buf + x, "\n");
4964 }
4965
4966 #ifdef CONFIG_SLUB_DEBUG
any_slab_objects(struct kmem_cache * s)4967 static int any_slab_objects(struct kmem_cache *s)
4968 {
4969 int node;
4970 struct kmem_cache_node *n;
4971
4972 for_each_kmem_cache_node(s, node, n)
4973 if (atomic_long_read(&n->total_objects))
4974 return 1;
4975
4976 return 0;
4977 }
4978 #endif
4979
4980 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
4981 #define to_slab(n) container_of(n, struct kmem_cache, kobj)
4982
4983 struct slab_attribute {
4984 struct attribute attr;
4985 ssize_t (*show)(struct kmem_cache *s, char *buf);
4986 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
4987 };
4988
4989 #define SLAB_ATTR_RO(_name) \
4990 static struct slab_attribute _name##_attr = \
4991 __ATTR(_name, 0400, _name##_show, NULL)
4992
4993 #define SLAB_ATTR(_name) \
4994 static struct slab_attribute _name##_attr = \
4995 __ATTR(_name, 0600, _name##_show, _name##_store)
4996
slab_size_show(struct kmem_cache * s,char * buf)4997 static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
4998 {
4999 return sprintf(buf, "%u\n", s->size);
5000 }
5001 SLAB_ATTR_RO(slab_size);
5002
align_show(struct kmem_cache * s,char * buf)5003 static ssize_t align_show(struct kmem_cache *s, char *buf)
5004 {
5005 return sprintf(buf, "%u\n", s->align);
5006 }
5007 SLAB_ATTR_RO(align);
5008
object_size_show(struct kmem_cache * s,char * buf)5009 static ssize_t object_size_show(struct kmem_cache *s, char *buf)
5010 {
5011 return sprintf(buf, "%u\n", s->object_size);
5012 }
5013 SLAB_ATTR_RO(object_size);
5014
objs_per_slab_show(struct kmem_cache * s,char * buf)5015 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
5016 {
5017 return sprintf(buf, "%u\n", oo_objects(s->oo));
5018 }
5019 SLAB_ATTR_RO(objs_per_slab);
5020
order_store(struct kmem_cache * s,const char * buf,size_t length)5021 static ssize_t order_store(struct kmem_cache *s,
5022 const char *buf, size_t length)
5023 {
5024 unsigned int order;
5025 int err;
5026
5027 err = kstrtouint(buf, 10, &order);
5028 if (err)
5029 return err;
5030
5031 if (order > slub_max_order || order < slub_min_order)
5032 return -EINVAL;
5033
5034 calculate_sizes(s, order);
5035 return length;
5036 }
5037
order_show(struct kmem_cache * s,char * buf)5038 static ssize_t order_show(struct kmem_cache *s, char *buf)
5039 {
5040 return sprintf(buf, "%u\n", oo_order(s->oo));
5041 }
5042 SLAB_ATTR(order);
5043
min_partial_show(struct kmem_cache * s,char * buf)5044 static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
5045 {
5046 return sprintf(buf, "%lu\n", s->min_partial);
5047 }
5048
min_partial_store(struct kmem_cache * s,const char * buf,size_t length)5049 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
5050 size_t length)
5051 {
5052 unsigned long min;
5053 int err;
5054
5055 err = kstrtoul(buf, 10, &min);
5056 if (err)
5057 return err;
5058
5059 set_min_partial(s, min);
5060 return length;
5061 }
5062 SLAB_ATTR(min_partial);
5063
cpu_partial_show(struct kmem_cache * s,char * buf)5064 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
5065 {
5066 return sprintf(buf, "%u\n", slub_cpu_partial(s));
5067 }
5068
cpu_partial_store(struct kmem_cache * s,const char * buf,size_t length)5069 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
5070 size_t length)
5071 {
5072 unsigned int objects;
5073 int err;
5074
5075 err = kstrtouint(buf, 10, &objects);
5076 if (err)
5077 return err;
5078 if (objects && !kmem_cache_has_cpu_partial(s))
5079 return -EINVAL;
5080
5081 slub_set_cpu_partial(s, objects);
5082 flush_all(s);
5083 return length;
5084 }
5085 SLAB_ATTR(cpu_partial);
5086
ctor_show(struct kmem_cache * s,char * buf)5087 static ssize_t ctor_show(struct kmem_cache *s, char *buf)
5088 {
5089 if (!s->ctor)
5090 return 0;
5091 return sprintf(buf, "%pS\n", s->ctor);
5092 }
5093 SLAB_ATTR_RO(ctor);
5094
aliases_show(struct kmem_cache * s,char * buf)5095 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
5096 {
5097 return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
5098 }
5099 SLAB_ATTR_RO(aliases);
5100
partial_show(struct kmem_cache * s,char * buf)5101 static ssize_t partial_show(struct kmem_cache *s, char *buf)
5102 {
5103 return show_slab_objects(s, buf, SO_PARTIAL);
5104 }
5105 SLAB_ATTR_RO(partial);
5106
cpu_slabs_show(struct kmem_cache * s,char * buf)5107 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
5108 {
5109 return show_slab_objects(s, buf, SO_CPU);
5110 }
5111 SLAB_ATTR_RO(cpu_slabs);
5112
objects_show(struct kmem_cache * s,char * buf)5113 static ssize_t objects_show(struct kmem_cache *s, char *buf)
5114 {
5115 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
5116 }
5117 SLAB_ATTR_RO(objects);
5118
objects_partial_show(struct kmem_cache * s,char * buf)5119 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
5120 {
5121 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
5122 }
5123 SLAB_ATTR_RO(objects_partial);
5124
slabs_cpu_partial_show(struct kmem_cache * s,char * buf)5125 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
5126 {
5127 int objects = 0;
5128 int pages = 0;
5129 int cpu;
5130 int len;
5131
5132 for_each_online_cpu(cpu) {
5133 struct page *page;
5134
5135 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
5136
5137 if (page) {
5138 pages += page->pages;
5139 objects += page->pobjects;
5140 }
5141 }
5142
5143 len = sprintf(buf, "%d(%d)", objects, pages);
5144
5145 #ifdef CONFIG_SMP
5146 for_each_online_cpu(cpu) {
5147 struct page *page;
5148
5149 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
5150
5151 if (page && len < PAGE_SIZE - 20)
5152 len += sprintf(buf + len, " C%d=%d(%d)", cpu,
5153 page->pobjects, page->pages);
5154 }
5155 #endif
5156 return len + sprintf(buf + len, "\n");
5157 }
5158 SLAB_ATTR_RO(slabs_cpu_partial);
5159
reclaim_account_show(struct kmem_cache * s,char * buf)5160 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
5161 {
5162 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
5163 }
5164
reclaim_account_store(struct kmem_cache * s,const char * buf,size_t length)5165 static ssize_t reclaim_account_store(struct kmem_cache *s,
5166 const char *buf, size_t length)
5167 {
5168 s->flags &= ~SLAB_RECLAIM_ACCOUNT;
5169 if (buf[0] == '1')
5170 s->flags |= SLAB_RECLAIM_ACCOUNT;
5171 return length;
5172 }
5173 SLAB_ATTR(reclaim_account);
5174
hwcache_align_show(struct kmem_cache * s,char * buf)5175 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
5176 {
5177 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
5178 }
5179 SLAB_ATTR_RO(hwcache_align);
5180
5181 #ifdef CONFIG_ZONE_DMA
cache_dma_show(struct kmem_cache * s,char * buf)5182 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
5183 {
5184 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
5185 }
5186 SLAB_ATTR_RO(cache_dma);
5187 #endif
5188
usersize_show(struct kmem_cache * s,char * buf)5189 static ssize_t usersize_show(struct kmem_cache *s, char *buf)
5190 {
5191 return sprintf(buf, "%u\n", s->usersize);
5192 }
5193 SLAB_ATTR_RO(usersize);
5194
destroy_by_rcu_show(struct kmem_cache * s,char * buf)5195 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
5196 {
5197 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
5198 }
5199 SLAB_ATTR_RO(destroy_by_rcu);
5200
5201 #ifdef CONFIG_SLUB_DEBUG
slabs_show(struct kmem_cache * s,char * buf)5202 static ssize_t slabs_show(struct kmem_cache *s, char *buf)
5203 {
5204 return show_slab_objects(s, buf, SO_ALL);
5205 }
5206 SLAB_ATTR_RO(slabs);
5207
total_objects_show(struct kmem_cache * s,char * buf)5208 static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
5209 {
5210 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
5211 }
5212 SLAB_ATTR_RO(total_objects);
5213
sanity_checks_show(struct kmem_cache * s,char * buf)5214 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
5215 {
5216 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
5217 }
5218
sanity_checks_store(struct kmem_cache * s,const char * buf,size_t length)5219 static ssize_t sanity_checks_store(struct kmem_cache *s,
5220 const char *buf, size_t length)
5221 {
5222 s->flags &= ~SLAB_CONSISTENCY_CHECKS;
5223 if (buf[0] == '1') {
5224 s->flags &= ~__CMPXCHG_DOUBLE;
5225 s->flags |= SLAB_CONSISTENCY_CHECKS;
5226 }
5227 return length;
5228 }
5229 SLAB_ATTR(sanity_checks);
5230
trace_show(struct kmem_cache * s,char * buf)5231 static ssize_t trace_show(struct kmem_cache *s, char *buf)
5232 {
5233 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
5234 }
5235
trace_store(struct kmem_cache * s,const char * buf,size_t length)5236 static ssize_t trace_store(struct kmem_cache *s, const char *buf,
5237 size_t length)
5238 {
5239 /*
5240 * Tracing a merged cache is going to give confusing results
5241 * as well as cause other issues like converting a mergeable
5242 * cache into an umergeable one.
5243 */
5244 if (s->refcount > 1)
5245 return -EINVAL;
5246
5247 s->flags &= ~SLAB_TRACE;
5248 if (buf[0] == '1') {
5249 s->flags &= ~__CMPXCHG_DOUBLE;
5250 s->flags |= SLAB_TRACE;
5251 }
5252 return length;
5253 }
5254 SLAB_ATTR(trace);
5255
red_zone_show(struct kmem_cache * s,char * buf)5256 static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
5257 {
5258 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
5259 }
5260
red_zone_store(struct kmem_cache * s,const char * buf,size_t length)5261 static ssize_t red_zone_store(struct kmem_cache *s,
5262 const char *buf, size_t length)
5263 {
5264 if (any_slab_objects(s))
5265 return -EBUSY;
5266
5267 s->flags &= ~SLAB_RED_ZONE;
5268 if (buf[0] == '1') {
5269 s->flags |= SLAB_RED_ZONE;
5270 }
5271 calculate_sizes(s, -1);
5272 return length;
5273 }
5274 SLAB_ATTR(red_zone);
5275
poison_show(struct kmem_cache * s,char * buf)5276 static ssize_t poison_show(struct kmem_cache *s, char *buf)
5277 {
5278 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
5279 }
5280
poison_store(struct kmem_cache * s,const char * buf,size_t length)5281 static ssize_t poison_store(struct kmem_cache *s,
5282 const char *buf, size_t length)
5283 {
5284 if (any_slab_objects(s))
5285 return -EBUSY;
5286
5287 s->flags &= ~SLAB_POISON;
5288 if (buf[0] == '1') {
5289 s->flags |= SLAB_POISON;
5290 }
5291 calculate_sizes(s, -1);
5292 return length;
5293 }
5294 SLAB_ATTR(poison);
5295
store_user_show(struct kmem_cache * s,char * buf)5296 static ssize_t store_user_show(struct kmem_cache *s, char *buf)
5297 {
5298 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
5299 }
5300
store_user_store(struct kmem_cache * s,const char * buf,size_t length)5301 static ssize_t store_user_store(struct kmem_cache *s,
5302 const char *buf, size_t length)
5303 {
5304 if (any_slab_objects(s))
5305 return -EBUSY;
5306
5307 s->flags &= ~SLAB_STORE_USER;
5308 if (buf[0] == '1') {
5309 s->flags &= ~__CMPXCHG_DOUBLE;
5310 s->flags |= SLAB_STORE_USER;
5311 }
5312 calculate_sizes(s, -1);
5313 return length;
5314 }
5315 SLAB_ATTR(store_user);
5316
validate_show(struct kmem_cache * s,char * buf)5317 static ssize_t validate_show(struct kmem_cache *s, char *buf)
5318 {
5319 return 0;
5320 }
5321
validate_store(struct kmem_cache * s,const char * buf,size_t length)5322 static ssize_t validate_store(struct kmem_cache *s,
5323 const char *buf, size_t length)
5324 {
5325 int ret = -EINVAL;
5326
5327 if (buf[0] == '1') {
5328 ret = validate_slab_cache(s);
5329 if (ret >= 0)
5330 ret = length;
5331 }
5332 return ret;
5333 }
5334 SLAB_ATTR(validate);
5335
alloc_calls_show(struct kmem_cache * s,char * buf)5336 static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
5337 {
5338 if (!(s->flags & SLAB_STORE_USER))
5339 return -ENOSYS;
5340 return list_locations(s, buf, TRACK_ALLOC);
5341 }
5342 SLAB_ATTR_RO(alloc_calls);
5343
free_calls_show(struct kmem_cache * s,char * buf)5344 static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
5345 {
5346 if (!(s->flags & SLAB_STORE_USER))
5347 return -ENOSYS;
5348 return list_locations(s, buf, TRACK_FREE);
5349 }
5350 SLAB_ATTR_RO(free_calls);
5351 #endif /* CONFIG_SLUB_DEBUG */
5352
5353 #ifdef CONFIG_FAILSLAB
failslab_show(struct kmem_cache * s,char * buf)5354 static ssize_t failslab_show(struct kmem_cache *s, char *buf)
5355 {
5356 return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
5357 }
5358
failslab_store(struct kmem_cache * s,const char * buf,size_t length)5359 static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
5360 size_t length)
5361 {
5362 if (s->refcount > 1)
5363 return -EINVAL;
5364
5365 s->flags &= ~SLAB_FAILSLAB;
5366 if (buf[0] == '1')
5367 s->flags |= SLAB_FAILSLAB;
5368 return length;
5369 }
5370 SLAB_ATTR(failslab);
5371 #endif
5372
shrink_show(struct kmem_cache * s,char * buf)5373 static ssize_t shrink_show(struct kmem_cache *s, char *buf)
5374 {
5375 return 0;
5376 }
5377
shrink_store(struct kmem_cache * s,const char * buf,size_t length)5378 static ssize_t shrink_store(struct kmem_cache *s,
5379 const char *buf, size_t length)
5380 {
5381 if (buf[0] == '1')
5382 kmem_cache_shrink_all(s);
5383 else
5384 return -EINVAL;
5385 return length;
5386 }
5387 SLAB_ATTR(shrink);
5388
5389 #ifdef CONFIG_NUMA
remote_node_defrag_ratio_show(struct kmem_cache * s,char * buf)5390 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
5391 {
5392 return sprintf(buf, "%u\n", s->remote_node_defrag_ratio / 10);
5393 }
5394
remote_node_defrag_ratio_store(struct kmem_cache * s,const char * buf,size_t length)5395 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
5396 const char *buf, size_t length)
5397 {
5398 unsigned int ratio;
5399 int err;
5400
5401 err = kstrtouint(buf, 10, &ratio);
5402 if (err)
5403 return err;
5404 if (ratio > 100)
5405 return -ERANGE;
5406
5407 s->remote_node_defrag_ratio = ratio * 10;
5408
5409 return length;
5410 }
5411 SLAB_ATTR(remote_node_defrag_ratio);
5412 #endif
5413
5414 #ifdef CONFIG_SLUB_STATS
show_stat(struct kmem_cache * s,char * buf,enum stat_item si)5415 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
5416 {
5417 unsigned long sum = 0;
5418 int cpu;
5419 int len;
5420 int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL);
5421
5422 if (!data)
5423 return -ENOMEM;
5424
5425 for_each_online_cpu(cpu) {
5426 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
5427
5428 data[cpu] = x;
5429 sum += x;
5430 }
5431
5432 len = sprintf(buf, "%lu", sum);
5433
5434 #ifdef CONFIG_SMP
5435 for_each_online_cpu(cpu) {
5436 if (data[cpu] && len < PAGE_SIZE - 20)
5437 len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
5438 }
5439 #endif
5440 kfree(data);
5441 return len + sprintf(buf + len, "\n");
5442 }
5443
clear_stat(struct kmem_cache * s,enum stat_item si)5444 static void clear_stat(struct kmem_cache *s, enum stat_item si)
5445 {
5446 int cpu;
5447
5448 for_each_online_cpu(cpu)
5449 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
5450 }
5451
5452 #define STAT_ATTR(si, text) \
5453 static ssize_t text##_show(struct kmem_cache *s, char *buf) \
5454 { \
5455 return show_stat(s, buf, si); \
5456 } \
5457 static ssize_t text##_store(struct kmem_cache *s, \
5458 const char *buf, size_t length) \
5459 { \
5460 if (buf[0] != '0') \
5461 return -EINVAL; \
5462 clear_stat(s, si); \
5463 return length; \
5464 } \
5465 SLAB_ATTR(text); \
5466
5467 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
5468 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
5469 STAT_ATTR(FREE_FASTPATH, free_fastpath);
5470 STAT_ATTR(FREE_SLOWPATH, free_slowpath);
5471 STAT_ATTR(FREE_FROZEN, free_frozen);
5472 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
5473 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
5474 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
5475 STAT_ATTR(ALLOC_SLAB, alloc_slab);
5476 STAT_ATTR(ALLOC_REFILL, alloc_refill);
5477 STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
5478 STAT_ATTR(FREE_SLAB, free_slab);
5479 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
5480 STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
5481 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
5482 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
5483 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
5484 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
5485 STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
5486 STAT_ATTR(ORDER_FALLBACK, order_fallback);
5487 STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
5488 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
5489 STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
5490 STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
5491 STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
5492 STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
5493 #endif /* CONFIG_SLUB_STATS */
5494
5495 static struct attribute *slab_attrs[] = {
5496 &slab_size_attr.attr,
5497 &object_size_attr.attr,
5498 &objs_per_slab_attr.attr,
5499 &order_attr.attr,
5500 &min_partial_attr.attr,
5501 &cpu_partial_attr.attr,
5502 &objects_attr.attr,
5503 &objects_partial_attr.attr,
5504 &partial_attr.attr,
5505 &cpu_slabs_attr.attr,
5506 &ctor_attr.attr,
5507 &aliases_attr.attr,
5508 &align_attr.attr,
5509 &hwcache_align_attr.attr,
5510 &reclaim_account_attr.attr,
5511 &destroy_by_rcu_attr.attr,
5512 &shrink_attr.attr,
5513 &slabs_cpu_partial_attr.attr,
5514 #ifdef CONFIG_SLUB_DEBUG
5515 &total_objects_attr.attr,
5516 &slabs_attr.attr,
5517 &sanity_checks_attr.attr,
5518 &trace_attr.attr,
5519 &red_zone_attr.attr,
5520 &poison_attr.attr,
5521 &store_user_attr.attr,
5522 &validate_attr.attr,
5523 &alloc_calls_attr.attr,
5524 &free_calls_attr.attr,
5525 #endif
5526 #ifdef CONFIG_ZONE_DMA
5527 &cache_dma_attr.attr,
5528 #endif
5529 #ifdef CONFIG_NUMA
5530 &remote_node_defrag_ratio_attr.attr,
5531 #endif
5532 #ifdef CONFIG_SLUB_STATS
5533 &alloc_fastpath_attr.attr,
5534 &alloc_slowpath_attr.attr,
5535 &free_fastpath_attr.attr,
5536 &free_slowpath_attr.attr,
5537 &free_frozen_attr.attr,
5538 &free_add_partial_attr.attr,
5539 &free_remove_partial_attr.attr,
5540 &alloc_from_partial_attr.attr,
5541 &alloc_slab_attr.attr,
5542 &alloc_refill_attr.attr,
5543 &alloc_node_mismatch_attr.attr,
5544 &free_slab_attr.attr,
5545 &cpuslab_flush_attr.attr,
5546 &deactivate_full_attr.attr,
5547 &deactivate_empty_attr.attr,
5548 &deactivate_to_head_attr.attr,
5549 &deactivate_to_tail_attr.attr,
5550 &deactivate_remote_frees_attr.attr,
5551 &deactivate_bypass_attr.attr,
5552 &order_fallback_attr.attr,
5553 &cmpxchg_double_fail_attr.attr,
5554 &cmpxchg_double_cpu_fail_attr.attr,
5555 &cpu_partial_alloc_attr.attr,
5556 &cpu_partial_free_attr.attr,
5557 &cpu_partial_node_attr.attr,
5558 &cpu_partial_drain_attr.attr,
5559 #endif
5560 #ifdef CONFIG_FAILSLAB
5561 &failslab_attr.attr,
5562 #endif
5563 &usersize_attr.attr,
5564
5565 NULL
5566 };
5567
5568 static const struct attribute_group slab_attr_group = {
5569 .attrs = slab_attrs,
5570 };
5571
slab_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)5572 static ssize_t slab_attr_show(struct kobject *kobj,
5573 struct attribute *attr,
5574 char *buf)
5575 {
5576 struct slab_attribute *attribute;
5577 struct kmem_cache *s;
5578 int err;
5579
5580 attribute = to_slab_attr(attr);
5581 s = to_slab(kobj);
5582
5583 if (!attribute->show)
5584 return -EIO;
5585
5586 err = attribute->show(s, buf);
5587
5588 return err;
5589 }
5590
slab_attr_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t len)5591 static ssize_t slab_attr_store(struct kobject *kobj,
5592 struct attribute *attr,
5593 const char *buf, size_t len)
5594 {
5595 struct slab_attribute *attribute;
5596 struct kmem_cache *s;
5597 int err;
5598
5599 attribute = to_slab_attr(attr);
5600 s = to_slab(kobj);
5601
5602 if (!attribute->store)
5603 return -EIO;
5604
5605 err = attribute->store(s, buf, len);
5606 #ifdef CONFIG_MEMCG
5607 if (slab_state >= FULL && err >= 0 && is_root_cache(s)) {
5608 struct kmem_cache *c;
5609
5610 mutex_lock(&slab_mutex);
5611 if (s->max_attr_size < len)
5612 s->max_attr_size = len;
5613
5614 /*
5615 * This is a best effort propagation, so this function's return
5616 * value will be determined by the parent cache only. This is
5617 * basically because not all attributes will have a well
5618 * defined semantics for rollbacks - most of the actions will
5619 * have permanent effects.
5620 *
5621 * Returning the error value of any of the children that fail
5622 * is not 100 % defined, in the sense that users seeing the
5623 * error code won't be able to know anything about the state of
5624 * the cache.
5625 *
5626 * Only returning the error code for the parent cache at least
5627 * has well defined semantics. The cache being written to
5628 * directly either failed or succeeded, in which case we loop
5629 * through the descendants with best-effort propagation.
5630 */
5631 for_each_memcg_cache(c, s)
5632 attribute->store(c, buf, len);
5633 mutex_unlock(&slab_mutex);
5634 }
5635 #endif
5636 return err;
5637 }
5638
memcg_propagate_slab_attrs(struct kmem_cache * s)5639 static void memcg_propagate_slab_attrs(struct kmem_cache *s)
5640 {
5641 #ifdef CONFIG_MEMCG
5642 int i;
5643 char *buffer = NULL;
5644 struct kmem_cache *root_cache;
5645
5646 if (is_root_cache(s))
5647 return;
5648
5649 root_cache = s->memcg_params.root_cache;
5650
5651 /*
5652 * This mean this cache had no attribute written. Therefore, no point
5653 * in copying default values around
5654 */
5655 if (!root_cache->max_attr_size)
5656 return;
5657
5658 for (i = 0; i < ARRAY_SIZE(slab_attrs); i++) {
5659 char mbuf[64];
5660 char *buf;
5661 struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
5662 ssize_t len;
5663
5664 if (!attr || !attr->store || !attr->show)
5665 continue;
5666
5667 /*
5668 * It is really bad that we have to allocate here, so we will
5669 * do it only as a fallback. If we actually allocate, though,
5670 * we can just use the allocated buffer until the end.
5671 *
5672 * Most of the slub attributes will tend to be very small in
5673 * size, but sysfs allows buffers up to a page, so they can
5674 * theoretically happen.
5675 */
5676 if (buffer)
5677 buf = buffer;
5678 else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf) &&
5679 !IS_ENABLED(CONFIG_SLUB_STATS))
5680 buf = mbuf;
5681 else {
5682 buffer = (char *) get_zeroed_page(GFP_KERNEL);
5683 if (WARN_ON(!buffer))
5684 continue;
5685 buf = buffer;
5686 }
5687
5688 len = attr->show(root_cache, buf);
5689 if (len > 0)
5690 attr->store(s, buf, len);
5691 }
5692
5693 if (buffer)
5694 free_page((unsigned long)buffer);
5695 #endif /* CONFIG_MEMCG */
5696 }
5697
kmem_cache_release(struct kobject * k)5698 static void kmem_cache_release(struct kobject *k)
5699 {
5700 slab_kmem_cache_release(to_slab(k));
5701 }
5702
5703 static const struct sysfs_ops slab_sysfs_ops = {
5704 .show = slab_attr_show,
5705 .store = slab_attr_store,
5706 };
5707
5708 static struct kobj_type slab_ktype = {
5709 .sysfs_ops = &slab_sysfs_ops,
5710 .release = kmem_cache_release,
5711 };
5712
uevent_filter(struct kset * kset,struct kobject * kobj)5713 static int uevent_filter(struct kset *kset, struct kobject *kobj)
5714 {
5715 struct kobj_type *ktype = get_ktype(kobj);
5716
5717 if (ktype == &slab_ktype)
5718 return 1;
5719 return 0;
5720 }
5721
5722 static const struct kset_uevent_ops slab_uevent_ops = {
5723 .filter = uevent_filter,
5724 };
5725
5726 static struct kset *slab_kset;
5727
cache_kset(struct kmem_cache * s)5728 static inline struct kset *cache_kset(struct kmem_cache *s)
5729 {
5730 #ifdef CONFIG_MEMCG
5731 if (!is_root_cache(s))
5732 return s->memcg_params.root_cache->memcg_kset;
5733 #endif
5734 return slab_kset;
5735 }
5736
5737 #define ID_STR_LENGTH 64
5738
5739 /* Create a unique string id for a slab cache:
5740 *
5741 * Format :[flags-]size
5742 */
create_unique_id(struct kmem_cache * s)5743 static char *create_unique_id(struct kmem_cache *s)
5744 {
5745 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
5746 char *p = name;
5747
5748 if (!name)
5749 return ERR_PTR(-ENOMEM);
5750
5751 *p++ = ':';
5752 /*
5753 * First flags affecting slabcache operations. We will only
5754 * get here for aliasable slabs so we do not need to support
5755 * too many flags. The flags here must cover all flags that
5756 * are matched during merging to guarantee that the id is
5757 * unique.
5758 */
5759 if (s->flags & SLAB_CACHE_DMA)
5760 *p++ = 'd';
5761 if (s->flags & SLAB_CACHE_DMA32)
5762 *p++ = 'D';
5763 if (s->flags & SLAB_RECLAIM_ACCOUNT)
5764 *p++ = 'a';
5765 if (s->flags & SLAB_CONSISTENCY_CHECKS)
5766 *p++ = 'F';
5767 if (s->flags & SLAB_ACCOUNT)
5768 *p++ = 'A';
5769 if (p != name + 1)
5770 *p++ = '-';
5771 p += sprintf(p, "%07u", s->size);
5772
5773 BUG_ON(p > name + ID_STR_LENGTH - 1);
5774 return name;
5775 }
5776
sysfs_slab_remove_workfn(struct work_struct * work)5777 static void sysfs_slab_remove_workfn(struct work_struct *work)
5778 {
5779 struct kmem_cache *s =
5780 container_of(work, struct kmem_cache, kobj_remove_work);
5781
5782 if (!s->kobj.state_in_sysfs)
5783 /*
5784 * For a memcg cache, this may be called during
5785 * deactivation and again on shutdown. Remove only once.
5786 * A cache is never shut down before deactivation is
5787 * complete, so no need to worry about synchronization.
5788 */
5789 goto out;
5790
5791 #ifdef CONFIG_MEMCG
5792 kset_unregister(s->memcg_kset);
5793 #endif
5794 kobject_uevent(&s->kobj, KOBJ_REMOVE);
5795 out:
5796 kobject_put(&s->kobj);
5797 }
5798
sysfs_slab_add(struct kmem_cache * s)5799 static int sysfs_slab_add(struct kmem_cache *s)
5800 {
5801 int err;
5802 const char *name;
5803 struct kset *kset = cache_kset(s);
5804 int unmergeable = slab_unmergeable(s);
5805
5806 INIT_WORK(&s->kobj_remove_work, sysfs_slab_remove_workfn);
5807
5808 if (!kset) {
5809 kobject_init(&s->kobj, &slab_ktype);
5810 return 0;
5811 }
5812
5813 if (!unmergeable && disable_higher_order_debug &&
5814 (slub_debug & DEBUG_METADATA_FLAGS))
5815 unmergeable = 1;
5816
5817 if (unmergeable) {
5818 /*
5819 * Slabcache can never be merged so we can use the name proper.
5820 * This is typically the case for debug situations. In that
5821 * case we can catch duplicate names easily.
5822 */
5823 sysfs_remove_link(&slab_kset->kobj, s->name);
5824 name = s->name;
5825 } else {
5826 /*
5827 * Create a unique name for the slab as a target
5828 * for the symlinks.
5829 */
5830 name = create_unique_id(s);
5831 if (IS_ERR(name))
5832 return PTR_ERR(name);
5833 }
5834
5835 s->kobj.kset = kset;
5836 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
5837 if (err)
5838 goto out;
5839
5840 err = sysfs_create_group(&s->kobj, &slab_attr_group);
5841 if (err)
5842 goto out_del_kobj;
5843
5844 #ifdef CONFIG_MEMCG
5845 if (is_root_cache(s) && memcg_sysfs_enabled) {
5846 s->memcg_kset = kset_create_and_add("cgroup", NULL, &s->kobj);
5847 if (!s->memcg_kset) {
5848 err = -ENOMEM;
5849 goto out_del_kobj;
5850 }
5851 }
5852 #endif
5853
5854 kobject_uevent(&s->kobj, KOBJ_ADD);
5855 if (!unmergeable) {
5856 /* Setup first alias */
5857 sysfs_slab_alias(s, s->name);
5858 }
5859 out:
5860 if (!unmergeable)
5861 kfree(name);
5862 return err;
5863 out_del_kobj:
5864 kobject_del(&s->kobj);
5865 goto out;
5866 }
5867
sysfs_slab_remove(struct kmem_cache * s)5868 static void sysfs_slab_remove(struct kmem_cache *s)
5869 {
5870 if (slab_state < FULL)
5871 /*
5872 * Sysfs has not been setup yet so no need to remove the
5873 * cache from sysfs.
5874 */
5875 return;
5876
5877 kobject_get(&s->kobj);
5878 schedule_work(&s->kobj_remove_work);
5879 }
5880
sysfs_slab_unlink(struct kmem_cache * s)5881 void sysfs_slab_unlink(struct kmem_cache *s)
5882 {
5883 if (slab_state >= FULL)
5884 kobject_del(&s->kobj);
5885 }
5886
sysfs_slab_release(struct kmem_cache * s)5887 void sysfs_slab_release(struct kmem_cache *s)
5888 {
5889 if (slab_state >= FULL)
5890 kobject_put(&s->kobj);
5891 }
5892
5893 /*
5894 * Need to buffer aliases during bootup until sysfs becomes
5895 * available lest we lose that information.
5896 */
5897 struct saved_alias {
5898 struct kmem_cache *s;
5899 const char *name;
5900 struct saved_alias *next;
5901 };
5902
5903 static struct saved_alias *alias_list;
5904
sysfs_slab_alias(struct kmem_cache * s,const char * name)5905 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
5906 {
5907 struct saved_alias *al;
5908
5909 if (slab_state == FULL) {
5910 /*
5911 * If we have a leftover link then remove it.
5912 */
5913 sysfs_remove_link(&slab_kset->kobj, name);
5914 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
5915 }
5916
5917 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
5918 if (!al)
5919 return -ENOMEM;
5920
5921 al->s = s;
5922 al->name = name;
5923 al->next = alias_list;
5924 alias_list = al;
5925 return 0;
5926 }
5927
slab_sysfs_init(void)5928 static int __init slab_sysfs_init(void)
5929 {
5930 struct kmem_cache *s;
5931 int err;
5932
5933 mutex_lock(&slab_mutex);
5934
5935 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
5936 if (!slab_kset) {
5937 mutex_unlock(&slab_mutex);
5938 pr_err("Cannot register slab subsystem.\n");
5939 return -ENOSYS;
5940 }
5941
5942 slab_state = FULL;
5943
5944 list_for_each_entry(s, &slab_caches, list) {
5945 err = sysfs_slab_add(s);
5946 if (err)
5947 pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
5948 s->name);
5949 }
5950
5951 while (alias_list) {
5952 struct saved_alias *al = alias_list;
5953
5954 alias_list = alias_list->next;
5955 err = sysfs_slab_alias(al->s, al->name);
5956 if (err)
5957 pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
5958 al->name);
5959 kfree(al);
5960 }
5961
5962 mutex_unlock(&slab_mutex);
5963 resiliency_test();
5964 return 0;
5965 }
5966
5967 __initcall(slab_sysfs_init);
5968 #endif /* CONFIG_SYSFS */
5969
5970 /*
5971 * The /proc/slabinfo ABI
5972 */
5973 #ifdef CONFIG_SLUB_DEBUG
get_slabinfo(struct kmem_cache * s,struct slabinfo * sinfo)5974 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
5975 {
5976 unsigned long nr_slabs = 0;
5977 unsigned long nr_objs = 0;
5978 unsigned long nr_free = 0;
5979 int node;
5980 struct kmem_cache_node *n;
5981
5982 for_each_kmem_cache_node(s, node, n) {
5983 nr_slabs += node_nr_slabs(n);
5984 nr_objs += node_nr_objs(n);
5985 nr_free += count_partial(n, count_free);
5986 }
5987
5988 sinfo->active_objs = nr_objs - nr_free;
5989 sinfo->num_objs = nr_objs;
5990 sinfo->active_slabs = nr_slabs;
5991 sinfo->num_slabs = nr_slabs;
5992 sinfo->objects_per_slab = oo_objects(s->oo);
5993 sinfo->cache_order = oo_order(s->oo);
5994 }
5995
slabinfo_show_stats(struct seq_file * m,struct kmem_cache * s)5996 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s)
5997 {
5998 }
5999
slabinfo_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)6000 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
6001 size_t count, loff_t *ppos)
6002 {
6003 return -EIO;
6004 }
6005 #endif /* CONFIG_SLUB_DEBUG */
6006