1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * SLUB: A slab allocator that limits cache line use instead of queuing
4 * objects in per cpu and per node lists.
5 *
6 * The allocator synchronizes using per slab locks or atomic operations
7 * and only uses a centralized lock to manage a pool of partial slabs.
8 *
9 * (C) 2007 SGI, Christoph Lameter
10 * (C) 2011 Linux Foundation, Christoph Lameter
11 */
12
13 #include <linux/mm.h>
14 #include <linux/swap.h> /* struct reclaim_state */
15 #include <linux/module.h>
16 #include <linux/bit_spinlock.h>
17 #include <linux/interrupt.h>
18 #include <linux/swab.h>
19 #include <linux/bitops.h>
20 #include <linux/slab.h>
21 #include "slab.h"
22 #include <linux/proc_fs.h>
23 #include <linux/seq_file.h>
24 #include <linux/kasan.h>
25 #include <linux/cpu.h>
26 #include <linux/cpuset.h>
27 #include <linux/mempolicy.h>
28 #include <linux/ctype.h>
29 #include <linux/debugobjects.h>
30 #include <linux/kallsyms.h>
31 #include <linux/kfence.h>
32 #include <linux/memory.h>
33 #include <linux/math64.h>
34 #include <linux/fault-inject.h>
35 #include <linux/stacktrace.h>
36 #include <linux/prefetch.h>
37 #include <linux/memcontrol.h>
38 #include <linux/random.h>
39 #include <kunit/test.h>
40
41 #include <linux/debugfs.h>
42 #include <trace/events/kmem.h>
43
44 #include "internal.h"
45
46 /*
47 * Lock order:
48 * 1. slab_mutex (Global Mutex)
49 * 2. node->list_lock (Spinlock)
50 * 3. kmem_cache->cpu_slab->lock (Local lock)
51 * 4. slab_lock(page) (Only on some arches or for debugging)
52 * 5. object_map_lock (Only for debugging)
53 *
54 * slab_mutex
55 *
56 * The role of the slab_mutex is to protect the list of all the slabs
57 * and to synchronize major metadata changes to slab cache structures.
58 * Also synchronizes memory hotplug callbacks.
59 *
60 * slab_lock
61 *
62 * The slab_lock is a wrapper around the page lock, thus it is a bit
63 * spinlock.
64 *
65 * The slab_lock is only used for debugging and on arches that do not
66 * have the ability to do a cmpxchg_double. It only protects:
67 * A. page->freelist -> List of object free in a page
68 * B. page->inuse -> Number of objects in use
69 * C. page->objects -> Number of objects in page
70 * D. page->frozen -> frozen state
71 *
72 * Frozen slabs
73 *
74 * If a slab is frozen then it is exempt from list management. It is not
75 * on any list except per cpu partial list. The processor that froze the
76 * slab is the one who can perform list operations on the page. Other
77 * processors may put objects onto the freelist but the processor that
78 * froze the slab is the only one that can retrieve the objects from the
79 * page's freelist.
80 *
81 * list_lock
82 *
83 * The list_lock protects the partial and full list on each node and
84 * the partial slab counter. If taken then no new slabs may be added or
85 * removed from the lists nor make the number of partial slabs be modified.
86 * (Note that the total number of slabs is an atomic value that may be
87 * modified without taking the list lock).
88 *
89 * The list_lock is a centralized lock and thus we avoid taking it as
90 * much as possible. As long as SLUB does not have to handle partial
91 * slabs, operations can continue without any centralized lock. F.e.
92 * allocating a long series of objects that fill up slabs does not require
93 * the list lock.
94 *
95 * cpu_slab->lock local lock
96 *
97 * This locks protect slowpath manipulation of all kmem_cache_cpu fields
98 * except the stat counters. This is a percpu structure manipulated only by
99 * the local cpu, so the lock protects against being preempted or interrupted
100 * by an irq. Fast path operations rely on lockless operations instead.
101 * On PREEMPT_RT, the local lock does not actually disable irqs (and thus
102 * prevent the lockless operations), so fastpath operations also need to take
103 * the lock and are no longer lockless.
104 *
105 * lockless fastpaths
106 *
107 * The fast path allocation (slab_alloc_node()) and freeing (do_slab_free())
108 * are fully lockless when satisfied from the percpu slab (and when
109 * cmpxchg_double is possible to use, otherwise slab_lock is taken).
110 * They also don't disable preemption or migration or irqs. They rely on
111 * the transaction id (tid) field to detect being preempted or moved to
112 * another cpu.
113 *
114 * irq, preemption, migration considerations
115 *
116 * Interrupts are disabled as part of list_lock or local_lock operations, or
117 * around the slab_lock operation, in order to make the slab allocator safe
118 * to use in the context of an irq.
119 *
120 * In addition, preemption (or migration on PREEMPT_RT) is disabled in the
121 * allocation slowpath, bulk allocation, and put_cpu_partial(), so that the
122 * local cpu doesn't change in the process and e.g. the kmem_cache_cpu pointer
123 * doesn't have to be revalidated in each section protected by the local lock.
124 *
125 * SLUB assigns one slab for allocation to each processor.
126 * Allocations only occur from these slabs called cpu slabs.
127 *
128 * Slabs with free elements are kept on a partial list and during regular
129 * operations no list for full slabs is used. If an object in a full slab is
130 * freed then the slab will show up again on the partial lists.
131 * We track full slabs for debugging purposes though because otherwise we
132 * cannot scan all objects.
133 *
134 * Slabs are freed when they become empty. Teardown and setup is
135 * minimal so we rely on the page allocators per cpu caches for
136 * fast frees and allocs.
137 *
138 * page->frozen The slab is frozen and exempt from list processing.
139 * This means that the slab is dedicated to a purpose
140 * such as satisfying allocations for a specific
141 * processor. Objects may be freed in the slab while
142 * it is frozen but slab_free will then skip the usual
143 * list operations. It is up to the processor holding
144 * the slab to integrate the slab into the slab lists
145 * when the slab is no longer needed.
146 *
147 * One use of this flag is to mark slabs that are
148 * used for allocations. Then such a slab becomes a cpu
149 * slab. The cpu slab may be equipped with an additional
150 * freelist that allows lockless access to
151 * free objects in addition to the regular freelist
152 * that requires the slab lock.
153 *
154 * SLAB_DEBUG_FLAGS Slab requires special handling due to debug
155 * options set. This moves slab handling out of
156 * the fast path and disables lockless freelists.
157 */
158
159 /*
160 * We could simply use migrate_disable()/enable() but as long as it's a
161 * function call even on !PREEMPT_RT, use inline preempt_disable() there.
162 */
163 #ifndef CONFIG_PREEMPT_RT
164 #define slub_get_cpu_ptr(var) get_cpu_ptr(var)
165 #define slub_put_cpu_ptr(var) put_cpu_ptr(var)
166 #else
167 #define slub_get_cpu_ptr(var) \
168 ({ \
169 migrate_disable(); \
170 this_cpu_ptr(var); \
171 })
172 #define slub_put_cpu_ptr(var) \
173 do { \
174 (void)(var); \
175 migrate_enable(); \
176 } while (0)
177 #endif
178
179 #ifdef CONFIG_SLUB_DEBUG
180 #ifdef CONFIG_SLUB_DEBUG_ON
181 DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
182 #else
183 DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
184 #endif
185 #endif /* CONFIG_SLUB_DEBUG */
186
kmem_cache_debug(struct kmem_cache * s)187 static inline bool kmem_cache_debug(struct kmem_cache *s)
188 {
189 return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS);
190 }
191
fixup_red_left(struct kmem_cache * s,void * p)192 void *fixup_red_left(struct kmem_cache *s, void *p)
193 {
194 if (kmem_cache_debug_flags(s, SLAB_RED_ZONE))
195 p += s->red_left_pad;
196
197 return p;
198 }
199
kmem_cache_has_cpu_partial(struct kmem_cache * s)200 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
201 {
202 #ifdef CONFIG_SLUB_CPU_PARTIAL
203 return !kmem_cache_debug(s);
204 #else
205 return false;
206 #endif
207 }
208
209 /*
210 * Issues still to be resolved:
211 *
212 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
213 *
214 * - Variable sizing of the per node arrays
215 */
216
217 /* Enable to log cmpxchg failures */
218 #undef SLUB_DEBUG_CMPXCHG
219
220 /*
221 * Minimum number of partial slabs. These will be left on the partial
222 * lists even if they are empty. kmem_cache_shrink may reclaim them.
223 */
224 #define MIN_PARTIAL 5
225
226 /*
227 * Maximum number of desirable partial slabs.
228 * The existence of more partial slabs makes kmem_cache_shrink
229 * sort the partial list by the number of objects in use.
230 */
231 #define MAX_PARTIAL 10
232
233 #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
234 SLAB_POISON | SLAB_STORE_USER)
235
236 /*
237 * These debug flags cannot use CMPXCHG because there might be consistency
238 * issues when checking or reading debug information
239 */
240 #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
241 SLAB_TRACE)
242
243
244 /*
245 * Debugging flags that require metadata to be stored in the slab. These get
246 * disabled when slub_debug=O is used and a cache's min order increases with
247 * metadata.
248 */
249 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
250
251 #define OO_SHIFT 16
252 #define OO_MASK ((1 << OO_SHIFT) - 1)
253 #define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
254
255 /* Internal SLUB flags */
256 /* Poison object */
257 #define __OBJECT_POISON ((slab_flags_t __force)0x80000000U)
258 /* Use cmpxchg_double */
259 #define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U)
260
261 #ifdef CONFIG_SYSFS
262 static int sysfs_slab_add(struct kmem_cache *);
263 static int sysfs_slab_alias(struct kmem_cache *, const char *);
264 #else
sysfs_slab_add(struct kmem_cache * s)265 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
sysfs_slab_alias(struct kmem_cache * s,const char * p)266 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
267 { return 0; }
268 #endif
269
270 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
271 static void debugfs_slab_add(struct kmem_cache *);
272 #else
debugfs_slab_add(struct kmem_cache * s)273 static inline void debugfs_slab_add(struct kmem_cache *s) { }
274 #endif
275
stat(const struct kmem_cache * s,enum stat_item si)276 static inline void stat(const struct kmem_cache *s, enum stat_item si)
277 {
278 #ifdef CONFIG_SLUB_STATS
279 /*
280 * The rmw is racy on a preemptible kernel but this is acceptable, so
281 * avoid this_cpu_add()'s irq-disable overhead.
282 */
283 raw_cpu_inc(s->cpu_slab->stat[si]);
284 #endif
285 }
286
287 /*
288 * Tracks for which NUMA nodes we have kmem_cache_nodes allocated.
289 * Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily
290 * differ during memory hotplug/hotremove operations.
291 * Protected by slab_mutex.
292 */
293 static nodemask_t slab_nodes;
294
295 /*
296 * Workqueue used for flush_cpu_slab().
297 */
298 static struct workqueue_struct *flushwq;
299
300 /********************************************************************
301 * Core slab cache functions
302 *******************************************************************/
303
304 /*
305 * Returns freelist pointer (ptr). With hardening, this is obfuscated
306 * with an XOR of the address where the pointer is held and a per-cache
307 * random number.
308 */
freelist_ptr(const struct kmem_cache * s,void * ptr,unsigned long ptr_addr)309 static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
310 unsigned long ptr_addr)
311 {
312 #ifdef CONFIG_SLAB_FREELIST_HARDENED
313 /*
314 * When CONFIG_KASAN_SW/HW_TAGS is enabled, ptr_addr might be tagged.
315 * Normally, this doesn't cause any issues, as both set_freepointer()
316 * and get_freepointer() are called with a pointer with the same tag.
317 * However, there are some issues with CONFIG_SLUB_DEBUG code. For
318 * example, when __free_slub() iterates over objects in a cache, it
319 * passes untagged pointers to check_object(). check_object() in turns
320 * calls get_freepointer() with an untagged pointer, which causes the
321 * freepointer to be restored incorrectly.
322 */
323 return (void *)((unsigned long)ptr ^ s->random ^
324 swab((unsigned long)kasan_reset_tag((void *)ptr_addr)));
325 #else
326 return ptr;
327 #endif
328 }
329
330 /* Returns the freelist pointer recorded at location ptr_addr. */
freelist_dereference(const struct kmem_cache * s,void * ptr_addr)331 static inline void *freelist_dereference(const struct kmem_cache *s,
332 void *ptr_addr)
333 {
334 return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr),
335 (unsigned long)ptr_addr);
336 }
337
get_freepointer(struct kmem_cache * s,void * object)338 static inline void *get_freepointer(struct kmem_cache *s, void *object)
339 {
340 object = kasan_reset_tag(object);
341 return freelist_dereference(s, object + s->offset);
342 }
343
prefetch_freepointer(const struct kmem_cache * s,void * object)344 static void prefetch_freepointer(const struct kmem_cache *s, void *object)
345 {
346 prefetch(object + s->offset);
347 }
348
get_freepointer_safe(struct kmem_cache * s,void * object)349 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
350 {
351 unsigned long freepointer_addr;
352 void *p;
353
354 if (!debug_pagealloc_enabled_static())
355 return get_freepointer(s, object);
356
357 object = kasan_reset_tag(object);
358 freepointer_addr = (unsigned long)object + s->offset;
359 copy_from_kernel_nofault(&p, (void **)freepointer_addr, sizeof(p));
360 return freelist_ptr(s, p, freepointer_addr);
361 }
362
set_freepointer(struct kmem_cache * s,void * object,void * fp)363 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
364 {
365 unsigned long freeptr_addr = (unsigned long)object + s->offset;
366
367 #ifdef CONFIG_SLAB_FREELIST_HARDENED
368 BUG_ON(object == fp); /* naive detection of double free or corruption */
369 #endif
370
371 freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr);
372 *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
373 }
374
375 /* Loop over all objects in a slab */
376 #define for_each_object(__p, __s, __addr, __objects) \
377 for (__p = fixup_red_left(__s, __addr); \
378 __p < (__addr) + (__objects) * (__s)->size; \
379 __p += (__s)->size)
380
order_objects(unsigned int order,unsigned int size)381 static inline unsigned int order_objects(unsigned int order, unsigned int size)
382 {
383 return ((unsigned int)PAGE_SIZE << order) / size;
384 }
385
oo_make(unsigned int order,unsigned int size)386 static inline struct kmem_cache_order_objects oo_make(unsigned int order,
387 unsigned int size)
388 {
389 struct kmem_cache_order_objects x = {
390 (order << OO_SHIFT) + order_objects(order, size)
391 };
392
393 return x;
394 }
395
oo_order(struct kmem_cache_order_objects x)396 static inline unsigned int oo_order(struct kmem_cache_order_objects x)
397 {
398 return x.x >> OO_SHIFT;
399 }
400
oo_objects(struct kmem_cache_order_objects x)401 static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
402 {
403 return x.x & OO_MASK;
404 }
405
406 /*
407 * Per slab locking using the pagelock
408 */
__slab_lock(struct page * page)409 static __always_inline void __slab_lock(struct page *page)
410 {
411 VM_BUG_ON_PAGE(PageTail(page), page);
412 bit_spin_lock(PG_locked, &page->flags);
413 }
414
__slab_unlock(struct page * page)415 static __always_inline void __slab_unlock(struct page *page)
416 {
417 VM_BUG_ON_PAGE(PageTail(page), page);
418 __bit_spin_unlock(PG_locked, &page->flags);
419 }
420
slab_lock(struct page * page,unsigned long * flags)421 static __always_inline void slab_lock(struct page *page, unsigned long *flags)
422 {
423 if (IS_ENABLED(CONFIG_PREEMPT_RT))
424 local_irq_save(*flags);
425 __slab_lock(page);
426 }
427
slab_unlock(struct page * page,unsigned long * flags)428 static __always_inline void slab_unlock(struct page *page, unsigned long *flags)
429 {
430 __slab_unlock(page);
431 if (IS_ENABLED(CONFIG_PREEMPT_RT))
432 local_irq_restore(*flags);
433 }
434
435 /*
436 * Interrupts must be disabled (for the fallback code to work right), typically
437 * by an _irqsave() lock variant. Except on PREEMPT_RT where locks are different
438 * so we disable interrupts as part of slab_[un]lock().
439 */
__cmpxchg_double_slab(struct kmem_cache * s,struct page * page,void * freelist_old,unsigned long counters_old,void * freelist_new,unsigned long counters_new,const char * n)440 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
441 void *freelist_old, unsigned long counters_old,
442 void *freelist_new, unsigned long counters_new,
443 const char *n)
444 {
445 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
446 lockdep_assert_irqs_disabled();
447 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
448 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
449 if (s->flags & __CMPXCHG_DOUBLE) {
450 if (cmpxchg_double(&page->freelist, &page->counters,
451 freelist_old, counters_old,
452 freelist_new, counters_new))
453 return true;
454 } else
455 #endif
456 {
457 /* init to 0 to prevent spurious warnings */
458 unsigned long flags = 0;
459
460 slab_lock(page, &flags);
461 if (page->freelist == freelist_old &&
462 page->counters == counters_old) {
463 page->freelist = freelist_new;
464 page->counters = counters_new;
465 slab_unlock(page, &flags);
466 return true;
467 }
468 slab_unlock(page, &flags);
469 }
470
471 cpu_relax();
472 stat(s, CMPXCHG_DOUBLE_FAIL);
473
474 #ifdef SLUB_DEBUG_CMPXCHG
475 pr_info("%s %s: cmpxchg double redo ", n, s->name);
476 #endif
477
478 return false;
479 }
480
cmpxchg_double_slab(struct kmem_cache * s,struct page * page,void * freelist_old,unsigned long counters_old,void * freelist_new,unsigned long counters_new,const char * n)481 static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
482 void *freelist_old, unsigned long counters_old,
483 void *freelist_new, unsigned long counters_new,
484 const char *n)
485 {
486 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
487 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
488 if (s->flags & __CMPXCHG_DOUBLE) {
489 if (cmpxchg_double(&page->freelist, &page->counters,
490 freelist_old, counters_old,
491 freelist_new, counters_new))
492 return true;
493 } else
494 #endif
495 {
496 unsigned long flags;
497
498 local_irq_save(flags);
499 __slab_lock(page);
500 if (page->freelist == freelist_old &&
501 page->counters == counters_old) {
502 page->freelist = freelist_new;
503 page->counters = counters_new;
504 __slab_unlock(page);
505 local_irq_restore(flags);
506 return true;
507 }
508 __slab_unlock(page);
509 local_irq_restore(flags);
510 }
511
512 cpu_relax();
513 stat(s, CMPXCHG_DOUBLE_FAIL);
514
515 #ifdef SLUB_DEBUG_CMPXCHG
516 pr_info("%s %s: cmpxchg double redo ", n, s->name);
517 #endif
518
519 return false;
520 }
521
522 #ifdef CONFIG_SLUB_DEBUG
523 static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
524 static DEFINE_RAW_SPINLOCK(object_map_lock);
525
__fill_map(unsigned long * obj_map,struct kmem_cache * s,struct page * page)526 static void __fill_map(unsigned long *obj_map, struct kmem_cache *s,
527 struct page *page)
528 {
529 void *addr = page_address(page);
530 void *p;
531
532 bitmap_zero(obj_map, page->objects);
533
534 for (p = page->freelist; p; p = get_freepointer(s, p))
535 set_bit(__obj_to_index(s, addr, p), obj_map);
536 }
537
slab_add_kunit_errors(void)538 static bool slab_add_kunit_errors(void)
539 {
540 struct kunit_resource *resource;
541
542 if (likely(!current->kunit_test))
543 return false;
544
545 resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
546 if (!resource)
547 return false;
548
549 (*(int *)resource->data)++;
550 kunit_put_resource(resource);
551 return true;
552 }
553
554 /*
555 * Determine a map of object in use on a page.
556 *
557 * Node listlock must be held to guarantee that the page does
558 * not vanish from under us.
559 */
get_map(struct kmem_cache * s,struct page * page)560 static unsigned long *get_map(struct kmem_cache *s, struct page *page)
561 __acquires(&object_map_lock)
562 {
563 VM_BUG_ON(!irqs_disabled());
564
565 raw_spin_lock(&object_map_lock);
566
567 __fill_map(object_map, s, page);
568
569 return object_map;
570 }
571
put_map(unsigned long * map)572 static void put_map(unsigned long *map) __releases(&object_map_lock)
573 {
574 VM_BUG_ON(map != object_map);
575 raw_spin_unlock(&object_map_lock);
576 }
577
size_from_object(struct kmem_cache * s)578 static inline unsigned int size_from_object(struct kmem_cache *s)
579 {
580 if (s->flags & SLAB_RED_ZONE)
581 return s->size - s->red_left_pad;
582
583 return s->size;
584 }
585
restore_red_left(struct kmem_cache * s,void * p)586 static inline void *restore_red_left(struct kmem_cache *s, void *p)
587 {
588 if (s->flags & SLAB_RED_ZONE)
589 p -= s->red_left_pad;
590
591 return p;
592 }
593
594 /*
595 * Debug settings:
596 */
597 #if defined(CONFIG_SLUB_DEBUG_ON)
598 static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
599 #else
600 static slab_flags_t slub_debug;
601 #endif
602
603 static char *slub_debug_string;
604 static int disable_higher_order_debug;
605
606 /*
607 * slub is about to manipulate internal object metadata. This memory lies
608 * outside the range of the allocated object, so accessing it would normally
609 * be reported by kasan as a bounds error. metadata_access_enable() is used
610 * to tell kasan that these accesses are OK.
611 */
metadata_access_enable(void)612 static inline void metadata_access_enable(void)
613 {
614 kasan_disable_current();
615 }
616
metadata_access_disable(void)617 static inline void metadata_access_disable(void)
618 {
619 kasan_enable_current();
620 }
621
622 /*
623 * Object debugging
624 */
625
626 /* Verify that a pointer has an address that is valid within a slab page */
check_valid_pointer(struct kmem_cache * s,struct page * page,void * object)627 static inline int check_valid_pointer(struct kmem_cache *s,
628 struct page *page, void *object)
629 {
630 void *base;
631
632 if (!object)
633 return 1;
634
635 base = page_address(page);
636 object = kasan_reset_tag(object);
637 object = restore_red_left(s, object);
638 if (object < base || object >= base + page->objects * s->size ||
639 (object - base) % s->size) {
640 return 0;
641 }
642
643 return 1;
644 }
645
print_section(char * level,char * text,u8 * addr,unsigned int length)646 static void print_section(char *level, char *text, u8 *addr,
647 unsigned int length)
648 {
649 metadata_access_enable();
650 print_hex_dump(level, text, DUMP_PREFIX_ADDRESS,
651 16, 1, kasan_reset_tag((void *)addr), length, 1);
652 metadata_access_disable();
653 }
654
655 /*
656 * See comment in calculate_sizes().
657 */
freeptr_outside_object(struct kmem_cache * s)658 static inline bool freeptr_outside_object(struct kmem_cache *s)
659 {
660 return s->offset >= s->inuse;
661 }
662
663 /*
664 * Return offset of the end of info block which is inuse + free pointer if
665 * not overlapping with object.
666 */
get_info_end(struct kmem_cache * s)667 static inline unsigned int get_info_end(struct kmem_cache *s)
668 {
669 if (freeptr_outside_object(s))
670 return s->inuse + sizeof(void *);
671 else
672 return s->inuse;
673 }
674
get_track(struct kmem_cache * s,void * object,enum track_item alloc)675 static struct track *get_track(struct kmem_cache *s, void *object,
676 enum track_item alloc)
677 {
678 struct track *p;
679
680 p = object + get_info_end(s);
681
682 return kasan_reset_tag(p + alloc);
683 }
684
685 /*
686 * This function will be used to loop through all the slab objects in
687 * a page to give track structure for each object, the function fn will
688 * be using this track structure and extract required info into its private
689 * data, the return value will be the number of track structures that are
690 * processed.
691 */
get_each_object_track(struct kmem_cache * s,struct page * page,enum track_item alloc,int (* fn)(const struct kmem_cache *,const void *,const struct track *,void *),void * private)692 unsigned long get_each_object_track(struct kmem_cache *s,
693 struct page *page, enum track_item alloc,
694 int (*fn)(const struct kmem_cache *, const void *,
695 const struct track *, void *), void *private)
696 {
697 void *p;
698 struct track *t;
699 int ret;
700 unsigned long num_track = 0;
701 unsigned long flags = 0;
702
703 if (!slub_debug || !(s->flags & SLAB_STORE_USER))
704 return 0;
705
706 slab_lock(page, &flags);
707 for_each_object(p, s, page_address(page), page->objects) {
708 t = get_track(s, p, alloc);
709 metadata_access_enable();
710 ret = fn(s, p, t, private);
711 metadata_access_disable();
712 if (ret < 0)
713 break;
714 num_track += 1;
715 }
716 slab_unlock(page, &flags);
717 return num_track;
718 }
719 EXPORT_SYMBOL_NS_GPL(get_each_object_track, MINIDUMP);
720
set_track(struct kmem_cache * s,void * object,enum track_item alloc,unsigned long addr)721 static void set_track(struct kmem_cache *s, void *object,
722 enum track_item alloc, unsigned long addr)
723 {
724 struct track *p = get_track(s, object, alloc);
725
726 if (addr) {
727 #ifdef CONFIG_STACKTRACE
728 unsigned int nr_entries;
729
730 metadata_access_enable();
731 nr_entries = stack_trace_save(kasan_reset_tag(p->addrs),
732 TRACK_ADDRS_COUNT, 3);
733 metadata_access_disable();
734
735 if (nr_entries < TRACK_ADDRS_COUNT)
736 p->addrs[nr_entries] = 0;
737 #endif
738 p->addr = addr;
739 p->cpu = smp_processor_id();
740 p->pid = current->pid;
741 p->when = jiffies;
742 } else {
743 memset(p, 0, sizeof(struct track));
744 }
745 }
746
init_tracking(struct kmem_cache * s,void * object)747 static void init_tracking(struct kmem_cache *s, void *object)
748 {
749 if (!(s->flags & SLAB_STORE_USER))
750 return;
751
752 set_track(s, object, TRACK_FREE, 0UL);
753 set_track(s, object, TRACK_ALLOC, 0UL);
754 }
755
print_track(const char * s,struct track * t,unsigned long pr_time)756 static void print_track(const char *s, struct track *t, unsigned long pr_time)
757 {
758 if (!t->addr)
759 return;
760
761 pr_err("%s in %pS age=%lu cpu=%u pid=%d\n",
762 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
763 #ifdef CONFIG_STACKTRACE
764 {
765 int i;
766 for (i = 0; i < TRACK_ADDRS_COUNT; i++)
767 if (t->addrs[i])
768 pr_err("\t%pS\n", (void *)t->addrs[i]);
769 else
770 break;
771 }
772 #endif
773 }
774
print_tracking(struct kmem_cache * s,void * object)775 void print_tracking(struct kmem_cache *s, void *object)
776 {
777 unsigned long pr_time = jiffies;
778 if (!(s->flags & SLAB_STORE_USER))
779 return;
780
781 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time);
782 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time);
783 }
784
print_page_info(struct page * page)785 static void print_page_info(struct page *page)
786 {
787 pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%#lx(%pGp)\n",
788 page, page->objects, page->inuse, page->freelist,
789 page->flags, &page->flags);
790
791 }
792
slab_bug(struct kmem_cache * s,char * fmt,...)793 static void slab_bug(struct kmem_cache *s, char *fmt, ...)
794 {
795 struct va_format vaf;
796 va_list args;
797
798 va_start(args, fmt);
799 vaf.fmt = fmt;
800 vaf.va = &args;
801 pr_err("=============================================================================\n");
802 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
803 pr_err("-----------------------------------------------------------------------------\n\n");
804 va_end(args);
805 }
806
807 __printf(2, 3)
slab_fix(struct kmem_cache * s,char * fmt,...)808 static void slab_fix(struct kmem_cache *s, char *fmt, ...)
809 {
810 struct va_format vaf;
811 va_list args;
812
813 if (slab_add_kunit_errors())
814 return;
815
816 va_start(args, fmt);
817 vaf.fmt = fmt;
818 vaf.va = &args;
819 pr_err("FIX %s: %pV\n", s->name, &vaf);
820 va_end(args);
821 }
822
freelist_corrupted(struct kmem_cache * s,struct page * page,void ** freelist,void * nextfree)823 static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
824 void **freelist, void *nextfree)
825 {
826 if ((s->flags & SLAB_CONSISTENCY_CHECKS) &&
827 !check_valid_pointer(s, page, nextfree) && freelist) {
828 object_err(s, page, *freelist, "Freechain corrupt");
829 *freelist = NULL;
830 slab_fix(s, "Isolate corrupted freechain");
831 return true;
832 }
833
834 return false;
835 }
836
print_trailer(struct kmem_cache * s,struct page * page,u8 * p)837 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
838 {
839 unsigned int off; /* Offset of last byte */
840 u8 *addr = page_address(page);
841
842 print_tracking(s, p);
843
844 print_page_info(page);
845
846 pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n",
847 p, p - addr, get_freepointer(s, p));
848
849 if (s->flags & SLAB_RED_ZONE)
850 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
851 s->red_left_pad);
852 else if (p > addr + 16)
853 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
854
855 print_section(KERN_ERR, "Object ", p,
856 min_t(unsigned int, s->object_size, PAGE_SIZE));
857 if (s->flags & SLAB_RED_ZONE)
858 print_section(KERN_ERR, "Redzone ", p + s->object_size,
859 s->inuse - s->object_size);
860
861 off = get_info_end(s);
862
863 if (s->flags & SLAB_STORE_USER)
864 off += 2 * sizeof(struct track);
865
866 off += kasan_metadata_size(s);
867
868 if (off != size_from_object(s))
869 /* Beginning of the filler is the free pointer */
870 print_section(KERN_ERR, "Padding ", p + off,
871 size_from_object(s) - off);
872
873 dump_stack();
874 }
875
object_err(struct kmem_cache * s,struct page * page,u8 * object,char * reason)876 void object_err(struct kmem_cache *s, struct page *page,
877 u8 *object, char *reason)
878 {
879 if (slab_add_kunit_errors())
880 return;
881
882 slab_bug(s, "%s", reason);
883 print_trailer(s, page, object);
884 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
885 }
886
slab_err(struct kmem_cache * s,struct page * page,const char * fmt,...)887 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
888 const char *fmt, ...)
889 {
890 va_list args;
891 char buf[100];
892
893 if (slab_add_kunit_errors())
894 return;
895
896 va_start(args, fmt);
897 vsnprintf(buf, sizeof(buf), fmt, args);
898 va_end(args);
899 slab_bug(s, "%s", buf);
900 print_page_info(page);
901 dump_stack();
902 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
903 }
904
init_object(struct kmem_cache * s,void * object,u8 val)905 static void init_object(struct kmem_cache *s, void *object, u8 val)
906 {
907 u8 *p = kasan_reset_tag(object);
908
909 if (s->flags & SLAB_RED_ZONE)
910 memset(p - s->red_left_pad, val, s->red_left_pad);
911
912 if (s->flags & __OBJECT_POISON) {
913 memset(p, POISON_FREE, s->object_size - 1);
914 p[s->object_size - 1] = POISON_END;
915 }
916
917 if (s->flags & SLAB_RED_ZONE)
918 memset(p + s->object_size, val, s->inuse - s->object_size);
919 }
920
restore_bytes(struct kmem_cache * s,char * message,u8 data,void * from,void * to)921 static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
922 void *from, void *to)
923 {
924 slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data);
925 memset(from, data, to - from);
926 }
927
check_bytes_and_report(struct kmem_cache * s,struct page * page,u8 * object,char * what,u8 * start,unsigned int value,unsigned int bytes)928 static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
929 u8 *object, char *what,
930 u8 *start, unsigned int value, unsigned int bytes)
931 {
932 u8 *fault;
933 u8 *end;
934 u8 *addr = page_address(page);
935
936 metadata_access_enable();
937 fault = memchr_inv(kasan_reset_tag(start), value, bytes);
938 metadata_access_disable();
939 if (!fault)
940 return 1;
941
942 end = start + bytes;
943 while (end > fault && end[-1] == value)
944 end--;
945
946 if (slab_add_kunit_errors())
947 goto skip_bug_print;
948
949 slab_bug(s, "%s overwritten", what);
950 pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
951 fault, end - 1, fault - addr,
952 fault[0], value);
953 print_trailer(s, page, object);
954 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
955
956 skip_bug_print:
957 restore_bytes(s, what, value, fault, end);
958 return 0;
959 }
960
961 /*
962 * Object layout:
963 *
964 * object address
965 * Bytes of the object to be managed.
966 * If the freepointer may overlay the object then the free
967 * pointer is at the middle of the object.
968 *
969 * Poisoning uses 0x6b (POISON_FREE) and the last byte is
970 * 0xa5 (POISON_END)
971 *
972 * object + s->object_size
973 * Padding to reach word boundary. This is also used for Redzoning.
974 * Padding is extended by another word if Redzoning is enabled and
975 * object_size == inuse.
976 *
977 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
978 * 0xcc (RED_ACTIVE) for objects in use.
979 *
980 * object + s->inuse
981 * Meta data starts here.
982 *
983 * A. Free pointer (if we cannot overwrite object on free)
984 * B. Tracking data for SLAB_STORE_USER
985 * C. Padding to reach required alignment boundary or at minimum
986 * one word if debugging is on to be able to detect writes
987 * before the word boundary.
988 *
989 * Padding is done using 0x5a (POISON_INUSE)
990 *
991 * object + s->size
992 * Nothing is used beyond s->size.
993 *
994 * If slabcaches are merged then the object_size and inuse boundaries are mostly
995 * ignored. And therefore no slab options that rely on these boundaries
996 * may be used with merged slabcaches.
997 */
998
check_pad_bytes(struct kmem_cache * s,struct page * page,u8 * p)999 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
1000 {
1001 unsigned long off = get_info_end(s); /* The end of info */
1002
1003 if (s->flags & SLAB_STORE_USER)
1004 /* We also have user information there */
1005 off += 2 * sizeof(struct track);
1006
1007 off += kasan_metadata_size(s);
1008
1009 if (size_from_object(s) == off)
1010 return 1;
1011
1012 return check_bytes_and_report(s, page, p, "Object padding",
1013 p + off, POISON_INUSE, size_from_object(s) - off);
1014 }
1015
1016 /* Check the pad bytes at the end of a slab page */
slab_pad_check(struct kmem_cache * s,struct page * page)1017 static int slab_pad_check(struct kmem_cache *s, struct page *page)
1018 {
1019 u8 *start;
1020 u8 *fault;
1021 u8 *end;
1022 u8 *pad;
1023 int length;
1024 int remainder;
1025
1026 if (!(s->flags & SLAB_POISON))
1027 return 1;
1028
1029 start = page_address(page);
1030 length = page_size(page);
1031 end = start + length;
1032 remainder = length % s->size;
1033 if (!remainder)
1034 return 1;
1035
1036 pad = end - remainder;
1037 metadata_access_enable();
1038 fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder);
1039 metadata_access_disable();
1040 if (!fault)
1041 return 1;
1042 while (end > fault && end[-1] == POISON_INUSE)
1043 end--;
1044
1045 slab_err(s, page, "Padding overwritten. 0x%p-0x%p @offset=%tu",
1046 fault, end - 1, fault - start);
1047 print_section(KERN_ERR, "Padding ", pad, remainder);
1048
1049 restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
1050 return 0;
1051 }
1052
check_object(struct kmem_cache * s,struct page * page,void * object,u8 val)1053 static int check_object(struct kmem_cache *s, struct page *page,
1054 void *object, u8 val)
1055 {
1056 u8 *p = object;
1057 u8 *endobject = object + s->object_size;
1058
1059 if (s->flags & SLAB_RED_ZONE) {
1060 if (!check_bytes_and_report(s, page, object, "Left Redzone",
1061 object - s->red_left_pad, val, s->red_left_pad))
1062 return 0;
1063
1064 if (!check_bytes_and_report(s, page, object, "Right Redzone",
1065 endobject, val, s->inuse - s->object_size))
1066 return 0;
1067 } else {
1068 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
1069 check_bytes_and_report(s, page, p, "Alignment padding",
1070 endobject, POISON_INUSE,
1071 s->inuse - s->object_size);
1072 }
1073 }
1074
1075 if (s->flags & SLAB_POISON) {
1076 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
1077 (!check_bytes_and_report(s, page, p, "Poison", p,
1078 POISON_FREE, s->object_size - 1) ||
1079 !check_bytes_and_report(s, page, p, "End Poison",
1080 p + s->object_size - 1, POISON_END, 1)))
1081 return 0;
1082 /*
1083 * check_pad_bytes cleans up on its own.
1084 */
1085 check_pad_bytes(s, page, p);
1086 }
1087
1088 if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE)
1089 /*
1090 * Object and freepointer overlap. Cannot check
1091 * freepointer while object is allocated.
1092 */
1093 return 1;
1094
1095 /* Check free pointer validity */
1096 if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
1097 object_err(s, page, p, "Freepointer corrupt");
1098 /*
1099 * No choice but to zap it and thus lose the remainder
1100 * of the free objects in this slab. May cause
1101 * another error because the object count is now wrong.
1102 */
1103 set_freepointer(s, p, NULL);
1104 return 0;
1105 }
1106 return 1;
1107 }
1108
check_slab(struct kmem_cache * s,struct page * page)1109 static int check_slab(struct kmem_cache *s, struct page *page)
1110 {
1111 int maxobj;
1112
1113 if (!PageSlab(page)) {
1114 slab_err(s, page, "Not a valid slab page");
1115 return 0;
1116 }
1117
1118 maxobj = order_objects(compound_order(page), s->size);
1119 if (page->objects > maxobj) {
1120 slab_err(s, page, "objects %u > max %u",
1121 page->objects, maxobj);
1122 return 0;
1123 }
1124 if (page->inuse > page->objects) {
1125 slab_err(s, page, "inuse %u > max %u",
1126 page->inuse, page->objects);
1127 return 0;
1128 }
1129 /* Slab_pad_check fixes things up after itself */
1130 slab_pad_check(s, page);
1131 return 1;
1132 }
1133
1134 /*
1135 * Determine if a certain object on a page is on the freelist. Must hold the
1136 * slab lock to guarantee that the chains are in a consistent state.
1137 */
on_freelist(struct kmem_cache * s,struct page * page,void * search)1138 static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
1139 {
1140 int nr = 0;
1141 void *fp;
1142 void *object = NULL;
1143 int max_objects;
1144
1145 fp = page->freelist;
1146 while (fp && nr <= page->objects) {
1147 if (fp == search)
1148 return 1;
1149 if (!check_valid_pointer(s, page, fp)) {
1150 if (object) {
1151 object_err(s, page, object,
1152 "Freechain corrupt");
1153 set_freepointer(s, object, NULL);
1154 } else {
1155 slab_err(s, page, "Freepointer corrupt");
1156 page->freelist = NULL;
1157 page->inuse = page->objects;
1158 slab_fix(s, "Freelist cleared");
1159 return 0;
1160 }
1161 break;
1162 }
1163 object = fp;
1164 fp = get_freepointer(s, object);
1165 nr++;
1166 }
1167
1168 max_objects = order_objects(compound_order(page), s->size);
1169 if (max_objects > MAX_OBJS_PER_PAGE)
1170 max_objects = MAX_OBJS_PER_PAGE;
1171
1172 if (page->objects != max_objects) {
1173 slab_err(s, page, "Wrong number of objects. Found %d but should be %d",
1174 page->objects, max_objects);
1175 page->objects = max_objects;
1176 slab_fix(s, "Number of objects adjusted");
1177 }
1178 if (page->inuse != page->objects - nr) {
1179 slab_err(s, page, "Wrong object count. Counter is %d but counted were %d",
1180 page->inuse, page->objects - nr);
1181 page->inuse = page->objects - nr;
1182 slab_fix(s, "Object count adjusted");
1183 }
1184 return search == NULL;
1185 }
1186
trace(struct kmem_cache * s,struct page * page,void * object,int alloc)1187 static void trace(struct kmem_cache *s, struct page *page, void *object,
1188 int alloc)
1189 {
1190 if (s->flags & SLAB_TRACE) {
1191 pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
1192 s->name,
1193 alloc ? "alloc" : "free",
1194 object, page->inuse,
1195 page->freelist);
1196
1197 if (!alloc)
1198 print_section(KERN_INFO, "Object ", (void *)object,
1199 s->object_size);
1200
1201 dump_stack();
1202 }
1203 }
1204
1205 /*
1206 * Tracking of fully allocated slabs for debugging purposes.
1207 */
add_full(struct kmem_cache * s,struct kmem_cache_node * n,struct page * page)1208 static void add_full(struct kmem_cache *s,
1209 struct kmem_cache_node *n, struct page *page)
1210 {
1211 if (!(s->flags & SLAB_STORE_USER))
1212 return;
1213
1214 lockdep_assert_held(&n->list_lock);
1215 list_add(&page->slab_list, &n->full);
1216 }
1217
remove_full(struct kmem_cache * s,struct kmem_cache_node * n,struct page * page)1218 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
1219 {
1220 if (!(s->flags & SLAB_STORE_USER))
1221 return;
1222
1223 lockdep_assert_held(&n->list_lock);
1224 list_del(&page->slab_list);
1225 }
1226
1227 /* Tracking of the number of slabs for debugging purposes */
slabs_node(struct kmem_cache * s,int node)1228 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1229 {
1230 struct kmem_cache_node *n = get_node(s, node);
1231
1232 return atomic_long_read(&n->nr_slabs);
1233 }
1234
node_nr_slabs(struct kmem_cache_node * n)1235 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1236 {
1237 return atomic_long_read(&n->nr_slabs);
1238 }
1239
inc_slabs_node(struct kmem_cache * s,int node,int objects)1240 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1241 {
1242 struct kmem_cache_node *n = get_node(s, node);
1243
1244 /*
1245 * May be called early in order to allocate a slab for the
1246 * kmem_cache_node structure. Solve the chicken-egg
1247 * dilemma by deferring the increment of the count during
1248 * bootstrap (see early_kmem_cache_node_alloc).
1249 */
1250 if (likely(n)) {
1251 atomic_long_inc(&n->nr_slabs);
1252 atomic_long_add(objects, &n->total_objects);
1253 }
1254 }
dec_slabs_node(struct kmem_cache * s,int node,int objects)1255 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1256 {
1257 struct kmem_cache_node *n = get_node(s, node);
1258
1259 atomic_long_dec(&n->nr_slabs);
1260 atomic_long_sub(objects, &n->total_objects);
1261 }
1262
1263 /* Object debug checks for alloc/free paths */
setup_object_debug(struct kmem_cache * s,struct page * page,void * object)1264 static void setup_object_debug(struct kmem_cache *s, struct page *page,
1265 void *object)
1266 {
1267 if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))
1268 return;
1269
1270 init_object(s, object, SLUB_RED_INACTIVE);
1271 init_tracking(s, object);
1272 }
1273
1274 static
setup_page_debug(struct kmem_cache * s,struct page * page,void * addr)1275 void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr)
1276 {
1277 if (!kmem_cache_debug_flags(s, SLAB_POISON))
1278 return;
1279
1280 metadata_access_enable();
1281 memset(kasan_reset_tag(addr), POISON_INUSE, page_size(page));
1282 metadata_access_disable();
1283 }
1284
alloc_consistency_checks(struct kmem_cache * s,struct page * page,void * object)1285 static inline int alloc_consistency_checks(struct kmem_cache *s,
1286 struct page *page, void *object)
1287 {
1288 if (!check_slab(s, page))
1289 return 0;
1290
1291 if (!check_valid_pointer(s, page, object)) {
1292 object_err(s, page, object, "Freelist Pointer check fails");
1293 return 0;
1294 }
1295
1296 if (!check_object(s, page, object, SLUB_RED_INACTIVE))
1297 return 0;
1298
1299 return 1;
1300 }
1301
alloc_debug_processing(struct kmem_cache * s,struct page * page,void * object,unsigned long addr)1302 static noinline int alloc_debug_processing(struct kmem_cache *s,
1303 struct page *page,
1304 void *object, unsigned long addr)
1305 {
1306 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1307 if (!alloc_consistency_checks(s, page, object))
1308 goto bad;
1309 }
1310
1311 /* Success perform special debug activities for allocs */
1312 if (s->flags & SLAB_STORE_USER)
1313 set_track(s, object, TRACK_ALLOC, addr);
1314 trace(s, page, object, 1);
1315 init_object(s, object, SLUB_RED_ACTIVE);
1316 return 1;
1317
1318 bad:
1319 if (PageSlab(page)) {
1320 /*
1321 * If this is a slab page then lets do the best we can
1322 * to avoid issues in the future. Marking all objects
1323 * as used avoids touching the remaining objects.
1324 */
1325 slab_fix(s, "Marking all objects used");
1326 page->inuse = page->objects;
1327 page->freelist = NULL;
1328 }
1329 return 0;
1330 }
1331
free_consistency_checks(struct kmem_cache * s,struct page * page,void * object,unsigned long addr)1332 static inline int free_consistency_checks(struct kmem_cache *s,
1333 struct page *page, void *object, unsigned long addr)
1334 {
1335 if (!check_valid_pointer(s, page, object)) {
1336 slab_err(s, page, "Invalid object pointer 0x%p", object);
1337 return 0;
1338 }
1339
1340 if (on_freelist(s, page, object)) {
1341 object_err(s, page, object, "Object already free");
1342 return 0;
1343 }
1344
1345 if (!check_object(s, page, object, SLUB_RED_ACTIVE))
1346 return 0;
1347
1348 if (unlikely(s != page->slab_cache)) {
1349 if (!PageSlab(page)) {
1350 slab_err(s, page, "Attempt to free object(0x%p) outside of slab",
1351 object);
1352 } else if (!page->slab_cache) {
1353 pr_err("SLUB <none>: no slab for object 0x%p.\n",
1354 object);
1355 dump_stack();
1356 } else
1357 object_err(s, page, object,
1358 "page slab pointer corrupt.");
1359 return 0;
1360 }
1361 return 1;
1362 }
1363
1364 /* Supports checking bulk free of a constructed freelist */
free_debug_processing(struct kmem_cache * s,struct page * page,void * head,void * tail,int bulk_cnt,unsigned long addr)1365 static noinline int free_debug_processing(
1366 struct kmem_cache *s, struct page *page,
1367 void *head, void *tail, int bulk_cnt,
1368 unsigned long addr)
1369 {
1370 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1371 void *object = head;
1372 int cnt = 0;
1373 unsigned long flags, flags2;
1374 int ret = 0;
1375
1376 spin_lock_irqsave(&n->list_lock, flags);
1377 slab_lock(page, &flags2);
1378
1379 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1380 if (!check_slab(s, page))
1381 goto out;
1382 }
1383
1384 next_object:
1385 cnt++;
1386
1387 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1388 if (!free_consistency_checks(s, page, object, addr))
1389 goto out;
1390 }
1391
1392 if (s->flags & SLAB_STORE_USER)
1393 set_track(s, object, TRACK_FREE, addr);
1394 trace(s, page, object, 0);
1395 /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
1396 init_object(s, object, SLUB_RED_INACTIVE);
1397
1398 /* Reached end of constructed freelist yet? */
1399 if (object != tail) {
1400 object = get_freepointer(s, object);
1401 goto next_object;
1402 }
1403 ret = 1;
1404
1405 out:
1406 if (cnt != bulk_cnt)
1407 slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
1408 bulk_cnt, cnt);
1409
1410 slab_unlock(page, &flags2);
1411 spin_unlock_irqrestore(&n->list_lock, flags);
1412 if (!ret)
1413 slab_fix(s, "Object at 0x%p not freed", object);
1414 return ret;
1415 }
1416
1417 /*
1418 * Parse a block of slub_debug options. Blocks are delimited by ';'
1419 *
1420 * @str: start of block
1421 * @flags: returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified
1422 * @slabs: return start of list of slabs, or NULL when there's no list
1423 * @init: assume this is initial parsing and not per-kmem-create parsing
1424 *
1425 * returns the start of next block if there's any, or NULL
1426 */
1427 static char *
parse_slub_debug_flags(char * str,slab_flags_t * flags,char ** slabs,bool init)1428 parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init)
1429 {
1430 bool higher_order_disable = false;
1431
1432 /* Skip any completely empty blocks */
1433 while (*str && *str == ';')
1434 str++;
1435
1436 if (*str == ',') {
1437 /*
1438 * No options but restriction on slabs. This means full
1439 * debugging for slabs matching a pattern.
1440 */
1441 *flags = DEBUG_DEFAULT_FLAGS;
1442 goto check_slabs;
1443 }
1444 *flags = 0;
1445
1446 /* Determine which debug features should be switched on */
1447 for (; *str && *str != ',' && *str != ';'; str++) {
1448 switch (tolower(*str)) {
1449 case '-':
1450 *flags = 0;
1451 break;
1452 case 'f':
1453 *flags |= SLAB_CONSISTENCY_CHECKS;
1454 break;
1455 case 'z':
1456 *flags |= SLAB_RED_ZONE;
1457 break;
1458 case 'p':
1459 *flags |= SLAB_POISON;
1460 break;
1461 case 'u':
1462 *flags |= SLAB_STORE_USER;
1463 break;
1464 case 't':
1465 *flags |= SLAB_TRACE;
1466 break;
1467 case 'a':
1468 *flags |= SLAB_FAILSLAB;
1469 break;
1470 case 'o':
1471 /*
1472 * Avoid enabling debugging on caches if its minimum
1473 * order would increase as a result.
1474 */
1475 higher_order_disable = true;
1476 break;
1477 default:
1478 if (init)
1479 pr_err("slub_debug option '%c' unknown. skipped\n", *str);
1480 }
1481 }
1482 check_slabs:
1483 if (*str == ',')
1484 *slabs = ++str;
1485 else
1486 *slabs = NULL;
1487
1488 /* Skip over the slab list */
1489 while (*str && *str != ';')
1490 str++;
1491
1492 /* Skip any completely empty blocks */
1493 while (*str && *str == ';')
1494 str++;
1495
1496 if (init && higher_order_disable)
1497 disable_higher_order_debug = 1;
1498
1499 if (*str)
1500 return str;
1501 else
1502 return NULL;
1503 }
1504
setup_slub_debug(char * str)1505 static int __init setup_slub_debug(char *str)
1506 {
1507 slab_flags_t flags;
1508 slab_flags_t global_flags;
1509 char *saved_str;
1510 char *slab_list;
1511 bool global_slub_debug_changed = false;
1512 bool slab_list_specified = false;
1513
1514 global_flags = DEBUG_DEFAULT_FLAGS;
1515 if (*str++ != '=' || !*str)
1516 /*
1517 * No options specified. Switch on full debugging.
1518 */
1519 goto out;
1520
1521 saved_str = str;
1522 while (str) {
1523 str = parse_slub_debug_flags(str, &flags, &slab_list, true);
1524
1525 if (!slab_list) {
1526 global_flags = flags;
1527 global_slub_debug_changed = true;
1528 } else {
1529 slab_list_specified = true;
1530 }
1531 }
1532
1533 /*
1534 * For backwards compatibility, a single list of flags with list of
1535 * slabs means debugging is only changed for those slabs, so the global
1536 * slub_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending
1537 * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as
1538 * long as there is no option specifying flags without a slab list.
1539 */
1540 if (slab_list_specified) {
1541 if (!global_slub_debug_changed)
1542 global_flags = slub_debug;
1543 slub_debug_string = saved_str;
1544 }
1545 out:
1546 slub_debug = global_flags;
1547 if (slub_debug != 0 || slub_debug_string)
1548 static_branch_enable(&slub_debug_enabled);
1549 else
1550 static_branch_disable(&slub_debug_enabled);
1551 if ((static_branch_unlikely(&init_on_alloc) ||
1552 static_branch_unlikely(&init_on_free)) &&
1553 (slub_debug & SLAB_POISON))
1554 pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n");
1555 return 1;
1556 }
1557
1558 __setup("slub_debug", setup_slub_debug);
1559
1560 /*
1561 * kmem_cache_flags - apply debugging options to the cache
1562 * @object_size: the size of an object without meta data
1563 * @flags: flags to set
1564 * @name: name of the cache
1565 *
1566 * Debug option(s) are applied to @flags. In addition to the debug
1567 * option(s), if a slab name (or multiple) is specified i.e.
1568 * slub_debug=<Debug-Options>,<slab name1>,<slab name2> ...
1569 * then only the select slabs will receive the debug option(s).
1570 */
kmem_cache_flags(unsigned int object_size,slab_flags_t flags,const char * name)1571 slab_flags_t kmem_cache_flags(unsigned int object_size,
1572 slab_flags_t flags, const char *name)
1573 {
1574 char *iter;
1575 size_t len;
1576 char *next_block;
1577 slab_flags_t block_flags;
1578 slab_flags_t slub_debug_local = slub_debug;
1579
1580 /*
1581 * If the slab cache is for debugging (e.g. kmemleak) then
1582 * don't store user (stack trace) information by default,
1583 * but let the user enable it via the command line below.
1584 */
1585 if (flags & SLAB_NOLEAKTRACE)
1586 slub_debug_local &= ~SLAB_STORE_USER;
1587
1588 len = strlen(name);
1589 next_block = slub_debug_string;
1590 /* Go through all blocks of debug options, see if any matches our slab's name */
1591 while (next_block) {
1592 next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false);
1593 if (!iter)
1594 continue;
1595 /* Found a block that has a slab list, search it */
1596 while (*iter) {
1597 char *end, *glob;
1598 size_t cmplen;
1599
1600 end = strchrnul(iter, ',');
1601 if (next_block && next_block < end)
1602 end = next_block - 1;
1603
1604 glob = strnchr(iter, end - iter, '*');
1605 if (glob)
1606 cmplen = glob - iter;
1607 else
1608 cmplen = max_t(size_t, len, (end - iter));
1609
1610 if (!strncmp(name, iter, cmplen)) {
1611 flags |= block_flags;
1612 return flags;
1613 }
1614
1615 if (!*end || *end == ';')
1616 break;
1617 iter = end + 1;
1618 }
1619 }
1620
1621 return flags | slub_debug_local;
1622 }
1623 #else /* !CONFIG_SLUB_DEBUG */
setup_object_debug(struct kmem_cache * s,struct page * page,void * object)1624 static inline void setup_object_debug(struct kmem_cache *s,
1625 struct page *page, void *object) {}
1626 static inline
setup_page_debug(struct kmem_cache * s,struct page * page,void * addr)1627 void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {}
1628
alloc_debug_processing(struct kmem_cache * s,struct page * page,void * object,unsigned long addr)1629 static inline int alloc_debug_processing(struct kmem_cache *s,
1630 struct page *page, void *object, unsigned long addr) { return 0; }
1631
free_debug_processing(struct kmem_cache * s,struct page * page,void * head,void * tail,int bulk_cnt,unsigned long addr)1632 static inline int free_debug_processing(
1633 struct kmem_cache *s, struct page *page,
1634 void *head, void *tail, int bulk_cnt,
1635 unsigned long addr) { return 0; }
1636
slab_pad_check(struct kmem_cache * s,struct page * page)1637 static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1638 { return 1; }
check_object(struct kmem_cache * s,struct page * page,void * object,u8 val)1639 static inline int check_object(struct kmem_cache *s, struct page *page,
1640 void *object, u8 val) { return 1; }
add_full(struct kmem_cache * s,struct kmem_cache_node * n,struct page * page)1641 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1642 struct page *page) {}
remove_full(struct kmem_cache * s,struct kmem_cache_node * n,struct page * page)1643 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
1644 struct page *page) {}
kmem_cache_flags(unsigned int object_size,slab_flags_t flags,const char * name)1645 slab_flags_t kmem_cache_flags(unsigned int object_size,
1646 slab_flags_t flags, const char *name)
1647 {
1648 return flags;
1649 }
1650 #define slub_debug 0
1651
1652 #define disable_higher_order_debug 0
1653
slabs_node(struct kmem_cache * s,int node)1654 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1655 { return 0; }
node_nr_slabs(struct kmem_cache_node * n)1656 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1657 { return 0; }
inc_slabs_node(struct kmem_cache * s,int node,int objects)1658 static inline void inc_slabs_node(struct kmem_cache *s, int node,
1659 int objects) {}
dec_slabs_node(struct kmem_cache * s,int node,int objects)1660 static inline void dec_slabs_node(struct kmem_cache *s, int node,
1661 int objects) {}
1662
freelist_corrupted(struct kmem_cache * s,struct page * page,void ** freelist,void * nextfree)1663 static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
1664 void **freelist, void *nextfree)
1665 {
1666 return false;
1667 }
1668 #endif /* CONFIG_SLUB_DEBUG */
1669
1670 /*
1671 * Hooks for other subsystems that check memory allocations. In a typical
1672 * production configuration these hooks all should produce no code at all.
1673 */
kmalloc_large_node_hook(void * ptr,size_t size,gfp_t flags)1674 static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
1675 {
1676 ptr = kasan_kmalloc_large(ptr, size, flags);
1677 /* As ptr might get tagged, call kmemleak hook after KASAN. */
1678 kmemleak_alloc(ptr, size, 1, flags);
1679 return ptr;
1680 }
1681
kfree_hook(void * x)1682 static __always_inline void kfree_hook(void *x)
1683 {
1684 kmemleak_free(x);
1685 kasan_kfree_large(x);
1686 }
1687
slab_free_hook(struct kmem_cache * s,void * x,bool init)1688 static __always_inline bool slab_free_hook(struct kmem_cache *s,
1689 void *x, bool init)
1690 {
1691 kmemleak_free_recursive(x, s->flags);
1692
1693 debug_check_no_locks_freed(x, s->object_size);
1694
1695 if (!(s->flags & SLAB_DEBUG_OBJECTS))
1696 debug_check_no_obj_freed(x, s->object_size);
1697
1698 /* Use KCSAN to help debug racy use-after-free. */
1699 if (!(s->flags & SLAB_TYPESAFE_BY_RCU))
1700 __kcsan_check_access(x, s->object_size,
1701 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
1702
1703 /*
1704 * As memory initialization might be integrated into KASAN,
1705 * kasan_slab_free and initialization memset's must be
1706 * kept together to avoid discrepancies in behavior.
1707 *
1708 * The initialization memset's clear the object and the metadata,
1709 * but don't touch the SLAB redzone.
1710 */
1711 if (init) {
1712 int rsize;
1713
1714 if (!kasan_has_integrated_init())
1715 memset(kasan_reset_tag(x), 0, s->object_size);
1716 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0;
1717 memset((char *)kasan_reset_tag(x) + s->inuse, 0,
1718 s->size - s->inuse - rsize);
1719 }
1720 /* KASAN might put x into memory quarantine, delaying its reuse. */
1721 return kasan_slab_free(s, x, init);
1722 }
1723
slab_free_freelist_hook(struct kmem_cache * s,void ** head,void ** tail,int * cnt)1724 static inline bool slab_free_freelist_hook(struct kmem_cache *s,
1725 void **head, void **tail,
1726 int *cnt)
1727 {
1728
1729 void *object;
1730 void *next = *head;
1731 void *old_tail = *tail ? *tail : *head;
1732
1733 if (is_kfence_address(next)) {
1734 slab_free_hook(s, next, false);
1735 return true;
1736 }
1737
1738 /* Head and tail of the reconstructed freelist */
1739 *head = NULL;
1740 *tail = NULL;
1741
1742 do {
1743 object = next;
1744 next = get_freepointer(s, object);
1745
1746 /* If object's reuse doesn't have to be delayed */
1747 if (!slab_free_hook(s, object, slab_want_init_on_free(s))) {
1748 /* Move object to the new freelist */
1749 set_freepointer(s, object, *head);
1750 *head = object;
1751 if (!*tail)
1752 *tail = object;
1753 } else {
1754 /*
1755 * Adjust the reconstructed freelist depth
1756 * accordingly if object's reuse is delayed.
1757 */
1758 --(*cnt);
1759 }
1760 } while (object != old_tail);
1761
1762 if (*head == *tail)
1763 *tail = NULL;
1764
1765 return *head != NULL;
1766 }
1767
setup_object(struct kmem_cache * s,struct page * page,void * object)1768 static void *setup_object(struct kmem_cache *s, struct page *page,
1769 void *object)
1770 {
1771 setup_object_debug(s, page, object);
1772 object = kasan_init_slab_obj(s, object);
1773 if (unlikely(s->ctor)) {
1774 kasan_unpoison_object_data(s, object);
1775 s->ctor(object);
1776 kasan_poison_object_data(s, object);
1777 }
1778 return object;
1779 }
1780
1781 /*
1782 * Slab allocation and freeing
1783 */
alloc_slab_page(struct kmem_cache * s,gfp_t flags,int node,struct kmem_cache_order_objects oo)1784 static inline struct page *alloc_slab_page(struct kmem_cache *s,
1785 gfp_t flags, int node, struct kmem_cache_order_objects oo)
1786 {
1787 struct page *page;
1788 unsigned int order = oo_order(oo);
1789
1790 if (node == NUMA_NO_NODE)
1791 page = alloc_pages(flags, order);
1792 else
1793 page = __alloc_pages_node(node, flags, order);
1794
1795 return page;
1796 }
1797
1798 #ifdef CONFIG_SLAB_FREELIST_RANDOM
1799 /* Pre-initialize the random sequence cache */
init_cache_random_seq(struct kmem_cache * s)1800 static int init_cache_random_seq(struct kmem_cache *s)
1801 {
1802 unsigned int count = oo_objects(s->oo);
1803 int err;
1804
1805 /* Bailout if already initialised */
1806 if (s->random_seq)
1807 return 0;
1808
1809 err = cache_random_seq_create(s, count, GFP_KERNEL);
1810 if (err) {
1811 pr_err("SLUB: Unable to initialize free list for %s\n",
1812 s->name);
1813 return err;
1814 }
1815
1816 /* Transform to an offset on the set of pages */
1817 if (s->random_seq) {
1818 unsigned int i;
1819
1820 for (i = 0; i < count; i++)
1821 s->random_seq[i] *= s->size;
1822 }
1823 return 0;
1824 }
1825
1826 /* Initialize each random sequence freelist per cache */
init_freelist_randomization(void)1827 static void __init init_freelist_randomization(void)
1828 {
1829 struct kmem_cache *s;
1830
1831 mutex_lock(&slab_mutex);
1832
1833 list_for_each_entry(s, &slab_caches, list)
1834 init_cache_random_seq(s);
1835
1836 mutex_unlock(&slab_mutex);
1837 }
1838
1839 /* Get the next entry on the pre-computed freelist randomized */
next_freelist_entry(struct kmem_cache * s,struct page * page,unsigned long * pos,void * start,unsigned long page_limit,unsigned long freelist_count)1840 static void *next_freelist_entry(struct kmem_cache *s, struct page *page,
1841 unsigned long *pos, void *start,
1842 unsigned long page_limit,
1843 unsigned long freelist_count)
1844 {
1845 unsigned int idx;
1846
1847 /*
1848 * If the target page allocation failed, the number of objects on the
1849 * page might be smaller than the usual size defined by the cache.
1850 */
1851 do {
1852 idx = s->random_seq[*pos];
1853 *pos += 1;
1854 if (*pos >= freelist_count)
1855 *pos = 0;
1856 } while (unlikely(idx >= page_limit));
1857
1858 return (char *)start + idx;
1859 }
1860
1861 /* Shuffle the single linked freelist based on a random pre-computed sequence */
shuffle_freelist(struct kmem_cache * s,struct page * page)1862 static bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1863 {
1864 void *start;
1865 void *cur;
1866 void *next;
1867 unsigned long idx, pos, page_limit, freelist_count;
1868
1869 if (page->objects < 2 || !s->random_seq)
1870 return false;
1871
1872 freelist_count = oo_objects(s->oo);
1873 pos = get_random_int() % freelist_count;
1874
1875 page_limit = page->objects * s->size;
1876 start = fixup_red_left(s, page_address(page));
1877
1878 /* First entry is used as the base of the freelist */
1879 cur = next_freelist_entry(s, page, &pos, start, page_limit,
1880 freelist_count);
1881 cur = setup_object(s, page, cur);
1882 page->freelist = cur;
1883
1884 for (idx = 1; idx < page->objects; idx++) {
1885 next = next_freelist_entry(s, page, &pos, start, page_limit,
1886 freelist_count);
1887 next = setup_object(s, page, next);
1888 set_freepointer(s, cur, next);
1889 cur = next;
1890 }
1891 set_freepointer(s, cur, NULL);
1892
1893 return true;
1894 }
1895 #else
init_cache_random_seq(struct kmem_cache * s)1896 static inline int init_cache_random_seq(struct kmem_cache *s)
1897 {
1898 return 0;
1899 }
init_freelist_randomization(void)1900 static inline void init_freelist_randomization(void) { }
shuffle_freelist(struct kmem_cache * s,struct page * page)1901 static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1902 {
1903 return false;
1904 }
1905 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
1906
allocate_slab(struct kmem_cache * s,gfp_t flags,int node)1907 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1908 {
1909 struct page *page;
1910 struct kmem_cache_order_objects oo = s->oo;
1911 gfp_t alloc_gfp;
1912 void *start, *p, *next;
1913 int idx;
1914 bool shuffle;
1915
1916 flags &= gfp_allowed_mask;
1917
1918 flags |= s->allocflags;
1919
1920 /*
1921 * Let the initial higher-order allocation fail under memory pressure
1922 * so we fall-back to the minimum order allocation.
1923 */
1924 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
1925 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
1926 alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
1927
1928 page = alloc_slab_page(s, alloc_gfp, node, oo);
1929 if (unlikely(!page)) {
1930 oo = s->min;
1931 alloc_gfp = flags;
1932 /*
1933 * Allocation may have failed due to fragmentation.
1934 * Try a lower order alloc if possible
1935 */
1936 page = alloc_slab_page(s, alloc_gfp, node, oo);
1937 if (unlikely(!page))
1938 goto out;
1939 stat(s, ORDER_FALLBACK);
1940 }
1941
1942 page->objects = oo_objects(oo);
1943
1944 account_slab_page(page, oo_order(oo), s, flags);
1945
1946 page->slab_cache = s;
1947 __SetPageSlab(page);
1948 if (page_is_pfmemalloc(page))
1949 SetPageSlabPfmemalloc(page);
1950
1951 kasan_poison_slab(page);
1952
1953 start = page_address(page);
1954
1955 setup_page_debug(s, page, start);
1956
1957 shuffle = shuffle_freelist(s, page);
1958
1959 if (!shuffle) {
1960 start = fixup_red_left(s, start);
1961 start = setup_object(s, page, start);
1962 page->freelist = start;
1963 for (idx = 0, p = start; idx < page->objects - 1; idx++) {
1964 next = p + s->size;
1965 next = setup_object(s, page, next);
1966 set_freepointer(s, p, next);
1967 p = next;
1968 }
1969 set_freepointer(s, p, NULL);
1970 }
1971
1972 page->inuse = page->objects;
1973 page->frozen = 1;
1974
1975 out:
1976 if (!page)
1977 return NULL;
1978
1979 inc_slabs_node(s, page_to_nid(page), page->objects);
1980
1981 return page;
1982 }
1983
new_slab(struct kmem_cache * s,gfp_t flags,int node)1984 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1985 {
1986 if (unlikely(flags & GFP_SLAB_BUG_MASK))
1987 flags = kmalloc_fix_flags(flags);
1988
1989 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO));
1990
1991 return allocate_slab(s,
1992 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1993 }
1994
__free_slab(struct kmem_cache * s,struct page * page)1995 static void __free_slab(struct kmem_cache *s, struct page *page)
1996 {
1997 int order = compound_order(page);
1998 int pages = 1 << order;
1999
2000 if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
2001 void *p;
2002
2003 slab_pad_check(s, page);
2004 for_each_object(p, s, page_address(page),
2005 page->objects)
2006 check_object(s, page, p, SLUB_RED_INACTIVE);
2007 }
2008
2009 __ClearPageSlabPfmemalloc(page);
2010 __ClearPageSlab(page);
2011 /* In union with page->mapping where page allocator expects NULL */
2012 page->slab_cache = NULL;
2013 if (current->reclaim_state)
2014 current->reclaim_state->reclaimed_slab += pages;
2015 unaccount_slab_page(page, order, s);
2016 __free_pages(page, order);
2017 }
2018
rcu_free_slab(struct rcu_head * h)2019 static void rcu_free_slab(struct rcu_head *h)
2020 {
2021 struct page *page = container_of(h, struct page, rcu_head);
2022
2023 __free_slab(page->slab_cache, page);
2024 }
2025
free_slab(struct kmem_cache * s,struct page * page)2026 static void free_slab(struct kmem_cache *s, struct page *page)
2027 {
2028 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
2029 call_rcu(&page->rcu_head, rcu_free_slab);
2030 } else
2031 __free_slab(s, page);
2032 }
2033
discard_slab(struct kmem_cache * s,struct page * page)2034 static void discard_slab(struct kmem_cache *s, struct page *page)
2035 {
2036 dec_slabs_node(s, page_to_nid(page), page->objects);
2037 free_slab(s, page);
2038 }
2039
2040 /*
2041 * Management of partially allocated slabs.
2042 */
2043 static inline void
__add_partial(struct kmem_cache_node * n,struct page * page,int tail)2044 __add_partial(struct kmem_cache_node *n, struct page *page, int tail)
2045 {
2046 n->nr_partial++;
2047 if (tail == DEACTIVATE_TO_TAIL)
2048 list_add_tail(&page->slab_list, &n->partial);
2049 else
2050 list_add(&page->slab_list, &n->partial);
2051 }
2052
add_partial(struct kmem_cache_node * n,struct page * page,int tail)2053 static inline void add_partial(struct kmem_cache_node *n,
2054 struct page *page, int tail)
2055 {
2056 lockdep_assert_held(&n->list_lock);
2057 __add_partial(n, page, tail);
2058 }
2059
remove_partial(struct kmem_cache_node * n,struct page * page)2060 static inline void remove_partial(struct kmem_cache_node *n,
2061 struct page *page)
2062 {
2063 lockdep_assert_held(&n->list_lock);
2064 list_del(&page->slab_list);
2065 n->nr_partial--;
2066 }
2067
2068 /*
2069 * Remove slab from the partial list, freeze it and
2070 * return the pointer to the freelist.
2071 *
2072 * Returns a list of objects or NULL if it fails.
2073 */
acquire_slab(struct kmem_cache * s,struct kmem_cache_node * n,struct page * page,int mode,int * objects)2074 static inline void *acquire_slab(struct kmem_cache *s,
2075 struct kmem_cache_node *n, struct page *page,
2076 int mode, int *objects)
2077 {
2078 void *freelist;
2079 unsigned long counters;
2080 struct page new;
2081
2082 lockdep_assert_held(&n->list_lock);
2083
2084 /*
2085 * Zap the freelist and set the frozen bit.
2086 * The old freelist is the list of objects for the
2087 * per cpu allocation list.
2088 */
2089 freelist = page->freelist;
2090 counters = page->counters;
2091 new.counters = counters;
2092 *objects = new.objects - new.inuse;
2093 if (mode) {
2094 new.inuse = page->objects;
2095 new.freelist = NULL;
2096 } else {
2097 new.freelist = freelist;
2098 }
2099
2100 VM_BUG_ON(new.frozen);
2101 new.frozen = 1;
2102
2103 if (!__cmpxchg_double_slab(s, page,
2104 freelist, counters,
2105 new.freelist, new.counters,
2106 "acquire_slab"))
2107 return NULL;
2108
2109 remove_partial(n, page);
2110 WARN_ON(!freelist);
2111 return freelist;
2112 }
2113
2114 #ifdef CONFIG_SLUB_CPU_PARTIAL
2115 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
2116 #else
put_cpu_partial(struct kmem_cache * s,struct page * page,int drain)2117 static inline void put_cpu_partial(struct kmem_cache *s, struct page *page,
2118 int drain) { }
2119 #endif
2120 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
2121
2122 /*
2123 * Try to allocate a partial slab from a specific node.
2124 */
get_partial_node(struct kmem_cache * s,struct kmem_cache_node * n,struct page ** ret_page,gfp_t gfpflags)2125 static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
2126 struct page **ret_page, gfp_t gfpflags)
2127 {
2128 struct page *page, *page2;
2129 void *object = NULL;
2130 unsigned int available = 0;
2131 unsigned long flags;
2132 int objects;
2133
2134 /*
2135 * Racy check. If we mistakenly see no partial slabs then we
2136 * just allocate an empty slab. If we mistakenly try to get a
2137 * partial slab and there is none available then get_partial()
2138 * will return NULL.
2139 */
2140 if (!n || !n->nr_partial)
2141 return NULL;
2142
2143 spin_lock_irqsave(&n->list_lock, flags);
2144 list_for_each_entry_safe(page, page2, &n->partial, slab_list) {
2145 void *t;
2146
2147 if (!pfmemalloc_match(page, gfpflags))
2148 continue;
2149
2150 t = acquire_slab(s, n, page, object == NULL, &objects);
2151 if (!t)
2152 break;
2153
2154 available += objects;
2155 if (!object) {
2156 *ret_page = page;
2157 stat(s, ALLOC_FROM_PARTIAL);
2158 object = t;
2159 } else {
2160 put_cpu_partial(s, page, 0);
2161 stat(s, CPU_PARTIAL_NODE);
2162 }
2163 if (!kmem_cache_has_cpu_partial(s)
2164 || available > slub_cpu_partial(s) / 2)
2165 break;
2166
2167 }
2168 spin_unlock_irqrestore(&n->list_lock, flags);
2169 return object;
2170 }
2171
2172 /*
2173 * Get a page from somewhere. Search in increasing NUMA distances.
2174 */
get_any_partial(struct kmem_cache * s,gfp_t flags,struct page ** ret_page)2175 static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
2176 struct page **ret_page)
2177 {
2178 #ifdef CONFIG_NUMA
2179 struct zonelist *zonelist;
2180 struct zoneref *z;
2181 struct zone *zone;
2182 enum zone_type highest_zoneidx = gfp_zone(flags);
2183 void *object;
2184 unsigned int cpuset_mems_cookie;
2185
2186 /*
2187 * The defrag ratio allows a configuration of the tradeoffs between
2188 * inter node defragmentation and node local allocations. A lower
2189 * defrag_ratio increases the tendency to do local allocations
2190 * instead of attempting to obtain partial slabs from other nodes.
2191 *
2192 * If the defrag_ratio is set to 0 then kmalloc() always
2193 * returns node local objects. If the ratio is higher then kmalloc()
2194 * may return off node objects because partial slabs are obtained
2195 * from other nodes and filled up.
2196 *
2197 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100
2198 * (which makes defrag_ratio = 1000) then every (well almost)
2199 * allocation will first attempt to defrag slab caches on other nodes.
2200 * This means scanning over all nodes to look for partial slabs which
2201 * may be expensive if we do it every time we are trying to find a slab
2202 * with available objects.
2203 */
2204 if (!s->remote_node_defrag_ratio ||
2205 get_cycles() % 1024 > s->remote_node_defrag_ratio)
2206 return NULL;
2207
2208 do {
2209 cpuset_mems_cookie = read_mems_allowed_begin();
2210 zonelist = node_zonelist(mempolicy_slab_node(), flags);
2211 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
2212 struct kmem_cache_node *n;
2213
2214 n = get_node(s, zone_to_nid(zone));
2215
2216 if (n && cpuset_zone_allowed(zone, flags) &&
2217 n->nr_partial > s->min_partial) {
2218 object = get_partial_node(s, n, ret_page, flags);
2219 if (object) {
2220 /*
2221 * Don't check read_mems_allowed_retry()
2222 * here - if mems_allowed was updated in
2223 * parallel, that was a harmless race
2224 * between allocation and the cpuset
2225 * update
2226 */
2227 return object;
2228 }
2229 }
2230 }
2231 } while (read_mems_allowed_retry(cpuset_mems_cookie));
2232 #endif /* CONFIG_NUMA */
2233 return NULL;
2234 }
2235
2236 /*
2237 * Get a partial page, lock it and return it.
2238 */
get_partial(struct kmem_cache * s,gfp_t flags,int node,struct page ** ret_page)2239 static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
2240 struct page **ret_page)
2241 {
2242 void *object;
2243 int searchnode = node;
2244
2245 if (node == NUMA_NO_NODE)
2246 searchnode = numa_mem_id();
2247
2248 object = get_partial_node(s, get_node(s, searchnode), ret_page, flags);
2249 if (object || node != NUMA_NO_NODE)
2250 return object;
2251
2252 return get_any_partial(s, flags, ret_page);
2253 }
2254
2255 #ifdef CONFIG_PREEMPTION
2256 /*
2257 * Calculate the next globally unique transaction for disambiguation
2258 * during cmpxchg. The transactions start with the cpu number and are then
2259 * incremented by CONFIG_NR_CPUS.
2260 */
2261 #define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS)
2262 #else
2263 /*
2264 * No preemption supported therefore also no need to check for
2265 * different cpus.
2266 */
2267 #define TID_STEP 1
2268 #endif
2269
next_tid(unsigned long tid)2270 static inline unsigned long next_tid(unsigned long tid)
2271 {
2272 return tid + TID_STEP;
2273 }
2274
2275 #ifdef SLUB_DEBUG_CMPXCHG
tid_to_cpu(unsigned long tid)2276 static inline unsigned int tid_to_cpu(unsigned long tid)
2277 {
2278 return tid % TID_STEP;
2279 }
2280
tid_to_event(unsigned long tid)2281 static inline unsigned long tid_to_event(unsigned long tid)
2282 {
2283 return tid / TID_STEP;
2284 }
2285 #endif
2286
init_tid(int cpu)2287 static inline unsigned int init_tid(int cpu)
2288 {
2289 return cpu;
2290 }
2291
note_cmpxchg_failure(const char * n,const struct kmem_cache * s,unsigned long tid)2292 static inline void note_cmpxchg_failure(const char *n,
2293 const struct kmem_cache *s, unsigned long tid)
2294 {
2295 #ifdef SLUB_DEBUG_CMPXCHG
2296 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
2297
2298 pr_info("%s %s: cmpxchg redo ", n, s->name);
2299
2300 #ifdef CONFIG_PREEMPTION
2301 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
2302 pr_warn("due to cpu change %d -> %d\n",
2303 tid_to_cpu(tid), tid_to_cpu(actual_tid));
2304 else
2305 #endif
2306 if (tid_to_event(tid) != tid_to_event(actual_tid))
2307 pr_warn("due to cpu running other code. Event %ld->%ld\n",
2308 tid_to_event(tid), tid_to_event(actual_tid));
2309 else
2310 pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n",
2311 actual_tid, tid, next_tid(tid));
2312 #endif
2313 stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
2314 }
2315
init_kmem_cache_cpus(struct kmem_cache * s)2316 static void init_kmem_cache_cpus(struct kmem_cache *s)
2317 {
2318 int cpu;
2319 struct kmem_cache_cpu *c;
2320
2321 for_each_possible_cpu(cpu) {
2322 c = per_cpu_ptr(s->cpu_slab, cpu);
2323 local_lock_init(&c->lock);
2324 c->tid = init_tid(cpu);
2325 }
2326 }
2327
2328 /*
2329 * Finishes removing the cpu slab. Merges cpu's freelist with page's freelist,
2330 * unfreezes the slabs and puts it on the proper list.
2331 * Assumes the slab has been already safely taken away from kmem_cache_cpu
2332 * by the caller.
2333 */
deactivate_slab(struct kmem_cache * s,struct page * page,void * freelist)2334 static void deactivate_slab(struct kmem_cache *s, struct page *page,
2335 void *freelist)
2336 {
2337 enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
2338 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
2339 int lock = 0, free_delta = 0;
2340 enum slab_modes l = M_NONE, m = M_NONE;
2341 void *nextfree, *freelist_iter, *freelist_tail;
2342 int tail = DEACTIVATE_TO_HEAD;
2343 unsigned long flags = 0;
2344 struct page new;
2345 struct page old;
2346
2347 if (page->freelist) {
2348 stat(s, DEACTIVATE_REMOTE_FREES);
2349 tail = DEACTIVATE_TO_TAIL;
2350 }
2351
2352 /*
2353 * Stage one: Count the objects on cpu's freelist as free_delta and
2354 * remember the last object in freelist_tail for later splicing.
2355 */
2356 freelist_tail = NULL;
2357 freelist_iter = freelist;
2358 while (freelist_iter) {
2359 nextfree = get_freepointer(s, freelist_iter);
2360
2361 /*
2362 * If 'nextfree' is invalid, it is possible that the object at
2363 * 'freelist_iter' is already corrupted. So isolate all objects
2364 * starting at 'freelist_iter' by skipping them.
2365 */
2366 if (freelist_corrupted(s, page, &freelist_iter, nextfree))
2367 break;
2368
2369 freelist_tail = freelist_iter;
2370 free_delta++;
2371
2372 freelist_iter = nextfree;
2373 }
2374
2375 /*
2376 * Stage two: Unfreeze the page while splicing the per-cpu
2377 * freelist to the head of page's freelist.
2378 *
2379 * Ensure that the page is unfrozen while the list presence
2380 * reflects the actual number of objects during unfreeze.
2381 *
2382 * We setup the list membership and then perform a cmpxchg
2383 * with the count. If there is a mismatch then the page
2384 * is not unfrozen but the page is on the wrong list.
2385 *
2386 * Then we restart the process which may have to remove
2387 * the page from the list that we just put it on again
2388 * because the number of objects in the slab may have
2389 * changed.
2390 */
2391 redo:
2392
2393 old.freelist = READ_ONCE(page->freelist);
2394 old.counters = READ_ONCE(page->counters);
2395 VM_BUG_ON(!old.frozen);
2396
2397 /* Determine target state of the slab */
2398 new.counters = old.counters;
2399 if (freelist_tail) {
2400 new.inuse -= free_delta;
2401 set_freepointer(s, freelist_tail, old.freelist);
2402 new.freelist = freelist;
2403 } else
2404 new.freelist = old.freelist;
2405
2406 new.frozen = 0;
2407
2408 if (!new.inuse && n->nr_partial >= s->min_partial)
2409 m = M_FREE;
2410 else if (new.freelist) {
2411 m = M_PARTIAL;
2412 if (!lock) {
2413 lock = 1;
2414 /*
2415 * Taking the spinlock removes the possibility
2416 * that acquire_slab() will see a slab page that
2417 * is frozen
2418 */
2419 spin_lock_irqsave(&n->list_lock, flags);
2420 }
2421 } else {
2422 m = M_FULL;
2423 if (kmem_cache_debug_flags(s, SLAB_STORE_USER) && !lock) {
2424 lock = 1;
2425 /*
2426 * This also ensures that the scanning of full
2427 * slabs from diagnostic functions will not see
2428 * any frozen slabs.
2429 */
2430 spin_lock_irqsave(&n->list_lock, flags);
2431 }
2432 }
2433
2434 if (l != m) {
2435 if (l == M_PARTIAL)
2436 remove_partial(n, page);
2437 else if (l == M_FULL)
2438 remove_full(s, n, page);
2439
2440 if (m == M_PARTIAL)
2441 add_partial(n, page, tail);
2442 else if (m == M_FULL)
2443 add_full(s, n, page);
2444 }
2445
2446 l = m;
2447 if (!cmpxchg_double_slab(s, page,
2448 old.freelist, old.counters,
2449 new.freelist, new.counters,
2450 "unfreezing slab"))
2451 goto redo;
2452
2453 if (lock)
2454 spin_unlock_irqrestore(&n->list_lock, flags);
2455
2456 if (m == M_PARTIAL)
2457 stat(s, tail);
2458 else if (m == M_FULL)
2459 stat(s, DEACTIVATE_FULL);
2460 else if (m == M_FREE) {
2461 stat(s, DEACTIVATE_EMPTY);
2462 discard_slab(s, page);
2463 stat(s, FREE_SLAB);
2464 }
2465 }
2466
2467 #ifdef CONFIG_SLUB_CPU_PARTIAL
__unfreeze_partials(struct kmem_cache * s,struct page * partial_page)2468 static void __unfreeze_partials(struct kmem_cache *s, struct page *partial_page)
2469 {
2470 struct kmem_cache_node *n = NULL, *n2 = NULL;
2471 struct page *page, *discard_page = NULL;
2472 unsigned long flags = 0;
2473
2474 while (partial_page) {
2475 struct page new;
2476 struct page old;
2477
2478 page = partial_page;
2479 partial_page = page->next;
2480
2481 n2 = get_node(s, page_to_nid(page));
2482 if (n != n2) {
2483 if (n)
2484 spin_unlock_irqrestore(&n->list_lock, flags);
2485
2486 n = n2;
2487 spin_lock_irqsave(&n->list_lock, flags);
2488 }
2489
2490 do {
2491
2492 old.freelist = page->freelist;
2493 old.counters = page->counters;
2494 VM_BUG_ON(!old.frozen);
2495
2496 new.counters = old.counters;
2497 new.freelist = old.freelist;
2498
2499 new.frozen = 0;
2500
2501 } while (!__cmpxchg_double_slab(s, page,
2502 old.freelist, old.counters,
2503 new.freelist, new.counters,
2504 "unfreezing slab"));
2505
2506 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
2507 page->next = discard_page;
2508 discard_page = page;
2509 } else {
2510 add_partial(n, page, DEACTIVATE_TO_TAIL);
2511 stat(s, FREE_ADD_PARTIAL);
2512 }
2513 }
2514
2515 if (n)
2516 spin_unlock_irqrestore(&n->list_lock, flags);
2517
2518 while (discard_page) {
2519 page = discard_page;
2520 discard_page = discard_page->next;
2521
2522 stat(s, DEACTIVATE_EMPTY);
2523 discard_slab(s, page);
2524 stat(s, FREE_SLAB);
2525 }
2526 }
2527
2528 /*
2529 * Unfreeze all the cpu partial slabs.
2530 */
unfreeze_partials(struct kmem_cache * s)2531 static void unfreeze_partials(struct kmem_cache *s)
2532 {
2533 struct page *partial_page;
2534 unsigned long flags;
2535
2536 local_lock_irqsave(&s->cpu_slab->lock, flags);
2537 partial_page = this_cpu_read(s->cpu_slab->partial);
2538 this_cpu_write(s->cpu_slab->partial, NULL);
2539 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
2540
2541 if (partial_page)
2542 __unfreeze_partials(s, partial_page);
2543 }
2544
unfreeze_partials_cpu(struct kmem_cache * s,struct kmem_cache_cpu * c)2545 static void unfreeze_partials_cpu(struct kmem_cache *s,
2546 struct kmem_cache_cpu *c)
2547 {
2548 struct page *partial_page;
2549
2550 partial_page = slub_percpu_partial(c);
2551 c->partial = NULL;
2552
2553 if (partial_page)
2554 __unfreeze_partials(s, partial_page);
2555 }
2556
2557 /*
2558 * Put a page that was just frozen (in __slab_free|get_partial_node) into a
2559 * partial page slot if available.
2560 *
2561 * If we did not find a slot then simply move all the partials to the
2562 * per node partial list.
2563 */
put_cpu_partial(struct kmem_cache * s,struct page * page,int drain)2564 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
2565 {
2566 struct page *oldpage;
2567 struct page *page_to_unfreeze = NULL;
2568 unsigned long flags;
2569 int pages = 0;
2570 int pobjects = 0;
2571
2572 local_lock_irqsave(&s->cpu_slab->lock, flags);
2573
2574 oldpage = this_cpu_read(s->cpu_slab->partial);
2575
2576 if (oldpage) {
2577 if (drain && oldpage->pobjects > slub_cpu_partial(s)) {
2578 /*
2579 * Partial array is full. Move the existing set to the
2580 * per node partial list. Postpone the actual unfreezing
2581 * outside of the critical section.
2582 */
2583 page_to_unfreeze = oldpage;
2584 oldpage = NULL;
2585 } else {
2586 pobjects = oldpage->pobjects;
2587 pages = oldpage->pages;
2588 }
2589 }
2590
2591 pages++;
2592 pobjects += page->objects - page->inuse;
2593
2594 page->pages = pages;
2595 page->pobjects = pobjects;
2596 page->next = oldpage;
2597
2598 this_cpu_write(s->cpu_slab->partial, page);
2599
2600 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
2601
2602 if (page_to_unfreeze) {
2603 __unfreeze_partials(s, page_to_unfreeze);
2604 stat(s, CPU_PARTIAL_DRAIN);
2605 }
2606 }
2607
2608 #else /* CONFIG_SLUB_CPU_PARTIAL */
2609
unfreeze_partials(struct kmem_cache * s)2610 static inline void unfreeze_partials(struct kmem_cache *s) { }
unfreeze_partials_cpu(struct kmem_cache * s,struct kmem_cache_cpu * c)2611 static inline void unfreeze_partials_cpu(struct kmem_cache *s,
2612 struct kmem_cache_cpu *c) { }
2613
2614 #endif /* CONFIG_SLUB_CPU_PARTIAL */
2615
flush_slab(struct kmem_cache * s,struct kmem_cache_cpu * c)2616 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
2617 {
2618 unsigned long flags;
2619 struct page *page;
2620 void *freelist;
2621
2622 local_lock_irqsave(&s->cpu_slab->lock, flags);
2623
2624 page = c->page;
2625 freelist = c->freelist;
2626
2627 c->page = NULL;
2628 c->freelist = NULL;
2629 c->tid = next_tid(c->tid);
2630
2631 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
2632
2633 if (page) {
2634 deactivate_slab(s, page, freelist);
2635 stat(s, CPUSLAB_FLUSH);
2636 }
2637 }
2638
__flush_cpu_slab(struct kmem_cache * s,int cpu)2639 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
2640 {
2641 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2642 void *freelist = c->freelist;
2643 struct page *page = c->page;
2644
2645 c->page = NULL;
2646 c->freelist = NULL;
2647 c->tid = next_tid(c->tid);
2648
2649 if (page) {
2650 deactivate_slab(s, page, freelist);
2651 stat(s, CPUSLAB_FLUSH);
2652 }
2653
2654 unfreeze_partials_cpu(s, c);
2655 }
2656
2657 struct slub_flush_work {
2658 struct work_struct work;
2659 struct kmem_cache *s;
2660 bool skip;
2661 };
2662
2663 /*
2664 * Flush cpu slab.
2665 *
2666 * Called from CPU work handler with migration disabled.
2667 */
flush_cpu_slab(struct work_struct * w)2668 static void flush_cpu_slab(struct work_struct *w)
2669 {
2670 struct kmem_cache *s;
2671 struct kmem_cache_cpu *c;
2672 struct slub_flush_work *sfw;
2673
2674 sfw = container_of(w, struct slub_flush_work, work);
2675
2676 s = sfw->s;
2677 c = this_cpu_ptr(s->cpu_slab);
2678
2679 if (c->page)
2680 flush_slab(s, c);
2681
2682 unfreeze_partials(s);
2683 }
2684
has_cpu_slab(int cpu,struct kmem_cache * s)2685 static bool has_cpu_slab(int cpu, struct kmem_cache *s)
2686 {
2687 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2688
2689 return c->page || slub_percpu_partial(c);
2690 }
2691
2692 static DEFINE_MUTEX(flush_lock);
2693 static DEFINE_PER_CPU(struct slub_flush_work, slub_flush);
2694
flush_all_cpus_locked(struct kmem_cache * s)2695 static void flush_all_cpus_locked(struct kmem_cache *s)
2696 {
2697 struct slub_flush_work *sfw;
2698 unsigned int cpu;
2699
2700 lockdep_assert_cpus_held();
2701 mutex_lock(&flush_lock);
2702
2703 for_each_online_cpu(cpu) {
2704 sfw = &per_cpu(slub_flush, cpu);
2705 if (!has_cpu_slab(cpu, s)) {
2706 sfw->skip = true;
2707 continue;
2708 }
2709 INIT_WORK(&sfw->work, flush_cpu_slab);
2710 sfw->skip = false;
2711 sfw->s = s;
2712 queue_work_on(cpu, flushwq, &sfw->work);
2713 }
2714
2715 for_each_online_cpu(cpu) {
2716 sfw = &per_cpu(slub_flush, cpu);
2717 if (sfw->skip)
2718 continue;
2719 flush_work(&sfw->work);
2720 }
2721
2722 mutex_unlock(&flush_lock);
2723 }
2724
flush_all(struct kmem_cache * s)2725 static void flush_all(struct kmem_cache *s)
2726 {
2727 cpus_read_lock();
2728 flush_all_cpus_locked(s);
2729 cpus_read_unlock();
2730 }
2731
2732 /*
2733 * Use the cpu notifier to insure that the cpu slabs are flushed when
2734 * necessary.
2735 */
slub_cpu_dead(unsigned int cpu)2736 static int slub_cpu_dead(unsigned int cpu)
2737 {
2738 struct kmem_cache *s;
2739
2740 mutex_lock(&slab_mutex);
2741 list_for_each_entry(s, &slab_caches, list)
2742 __flush_cpu_slab(s, cpu);
2743 mutex_unlock(&slab_mutex);
2744 return 0;
2745 }
2746
2747 /*
2748 * Check if the objects in a per cpu structure fit numa
2749 * locality expectations.
2750 */
node_match(struct page * page,int node)2751 static inline int node_match(struct page *page, int node)
2752 {
2753 #ifdef CONFIG_NUMA
2754 if (node != NUMA_NO_NODE && page_to_nid(page) != node)
2755 return 0;
2756 #endif
2757 return 1;
2758 }
2759
2760 #ifdef CONFIG_SLUB_DEBUG
count_free(struct page * page)2761 static int count_free(struct page *page)
2762 {
2763 return page->objects - page->inuse;
2764 }
2765
node_nr_objs(struct kmem_cache_node * n)2766 static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
2767 {
2768 return atomic_long_read(&n->total_objects);
2769 }
2770 #endif /* CONFIG_SLUB_DEBUG */
2771
2772 #if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS)
count_partial(struct kmem_cache_node * n,int (* get_count)(struct page *))2773 static unsigned long count_partial(struct kmem_cache_node *n,
2774 int (*get_count)(struct page *))
2775 {
2776 unsigned long flags;
2777 unsigned long x = 0;
2778 struct page *page;
2779
2780 spin_lock_irqsave(&n->list_lock, flags);
2781 list_for_each_entry(page, &n->partial, slab_list)
2782 x += get_count(page);
2783 spin_unlock_irqrestore(&n->list_lock, flags);
2784 return x;
2785 }
2786 #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
2787
2788 static noinline void
slab_out_of_memory(struct kmem_cache * s,gfp_t gfpflags,int nid)2789 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2790 {
2791 #ifdef CONFIG_SLUB_DEBUG
2792 static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
2793 DEFAULT_RATELIMIT_BURST);
2794 int node;
2795 struct kmem_cache_node *n;
2796
2797 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
2798 return;
2799
2800 pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
2801 nid, gfpflags, &gfpflags);
2802 pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
2803 s->name, s->object_size, s->size, oo_order(s->oo),
2804 oo_order(s->min));
2805
2806 if (oo_order(s->min) > get_order(s->object_size))
2807 pr_warn(" %s debugging increased min order, use slub_debug=O to disable.\n",
2808 s->name);
2809
2810 for_each_kmem_cache_node(s, node, n) {
2811 unsigned long nr_slabs;
2812 unsigned long nr_objs;
2813 unsigned long nr_free;
2814
2815 nr_free = count_partial(n, count_free);
2816 nr_slabs = node_nr_slabs(n);
2817 nr_objs = node_nr_objs(n);
2818
2819 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n",
2820 node, nr_slabs, nr_objs, nr_free);
2821 }
2822 #endif
2823 }
2824
pfmemalloc_match(struct page * page,gfp_t gfpflags)2825 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
2826 {
2827 if (unlikely(PageSlabPfmemalloc(page)))
2828 return gfp_pfmemalloc_allowed(gfpflags);
2829
2830 return true;
2831 }
2832
2833 /*
2834 * A variant of pfmemalloc_match() that tests page flags without asserting
2835 * PageSlab. Intended for opportunistic checks before taking a lock and
2836 * rechecking that nobody else freed the page under us.
2837 */
pfmemalloc_match_unsafe(struct page * page,gfp_t gfpflags)2838 static inline bool pfmemalloc_match_unsafe(struct page *page, gfp_t gfpflags)
2839 {
2840 if (unlikely(__PageSlabPfmemalloc(page)))
2841 return gfp_pfmemalloc_allowed(gfpflags);
2842
2843 return true;
2844 }
2845
2846 /*
2847 * Check the page->freelist of a page and either transfer the freelist to the
2848 * per cpu freelist or deactivate the page.
2849 *
2850 * The page is still frozen if the return value is not NULL.
2851 *
2852 * If this function returns NULL then the page has been unfrozen.
2853 */
get_freelist(struct kmem_cache * s,struct page * page)2854 static inline void *get_freelist(struct kmem_cache *s, struct page *page)
2855 {
2856 struct page new;
2857 unsigned long counters;
2858 void *freelist;
2859
2860 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock));
2861
2862 do {
2863 freelist = page->freelist;
2864 counters = page->counters;
2865
2866 new.counters = counters;
2867 VM_BUG_ON(!new.frozen);
2868
2869 new.inuse = page->objects;
2870 new.frozen = freelist != NULL;
2871
2872 } while (!__cmpxchg_double_slab(s, page,
2873 freelist, counters,
2874 NULL, new.counters,
2875 "get_freelist"));
2876
2877 return freelist;
2878 }
2879
2880 /*
2881 * Slow path. The lockless freelist is empty or we need to perform
2882 * debugging duties.
2883 *
2884 * Processing is still very fast if new objects have been freed to the
2885 * regular freelist. In that case we simply take over the regular freelist
2886 * as the lockless freelist and zap the regular freelist.
2887 *
2888 * If that is not working then we fall back to the partial lists. We take the
2889 * first element of the freelist as the object to allocate now and move the
2890 * rest of the freelist to the lockless freelist.
2891 *
2892 * And if we were unable to get a new slab from the partial slab lists then
2893 * we need to allocate a new slab. This is the slowest path since it involves
2894 * a call to the page allocator and the setup of a new slab.
2895 *
2896 * Version of __slab_alloc to use when we know that preemption is
2897 * already disabled (which is the case for bulk allocation).
2898 */
___slab_alloc(struct kmem_cache * s,gfp_t gfpflags,int node,unsigned long addr,struct kmem_cache_cpu * c)2899 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2900 unsigned long addr, struct kmem_cache_cpu *c)
2901 {
2902 void *freelist;
2903 struct page *page;
2904 unsigned long flags;
2905
2906 stat(s, ALLOC_SLOWPATH);
2907
2908 reread_page:
2909
2910 page = READ_ONCE(c->page);
2911 if (!page) {
2912 /*
2913 * if the node is not online or has no normal memory, just
2914 * ignore the node constraint
2915 */
2916 if (unlikely(node != NUMA_NO_NODE &&
2917 !node_isset(node, slab_nodes)))
2918 node = NUMA_NO_NODE;
2919 goto new_slab;
2920 }
2921 redo:
2922
2923 if (unlikely(!node_match(page, node))) {
2924 /*
2925 * same as above but node_match() being false already
2926 * implies node != NUMA_NO_NODE
2927 */
2928 if (!node_isset(node, slab_nodes)) {
2929 node = NUMA_NO_NODE;
2930 goto redo;
2931 } else {
2932 stat(s, ALLOC_NODE_MISMATCH);
2933 goto deactivate_slab;
2934 }
2935 }
2936
2937 /*
2938 * By rights, we should be searching for a slab page that was
2939 * PFMEMALLOC but right now, we are losing the pfmemalloc
2940 * information when the page leaves the per-cpu allocator
2941 */
2942 if (unlikely(!pfmemalloc_match_unsafe(page, gfpflags)))
2943 goto deactivate_slab;
2944
2945 /* must check again c->page in case we got preempted and it changed */
2946 local_lock_irqsave(&s->cpu_slab->lock, flags);
2947 if (unlikely(page != c->page)) {
2948 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
2949 goto reread_page;
2950 }
2951 freelist = c->freelist;
2952 if (freelist)
2953 goto load_freelist;
2954
2955 freelist = get_freelist(s, page);
2956
2957 if (!freelist) {
2958 c->page = NULL;
2959 c->tid = next_tid(c->tid);
2960 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
2961 stat(s, DEACTIVATE_BYPASS);
2962 goto new_slab;
2963 }
2964
2965 stat(s, ALLOC_REFILL);
2966
2967 load_freelist:
2968
2969 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock));
2970
2971 /*
2972 * freelist is pointing to the list of objects to be used.
2973 * page is pointing to the page from which the objects are obtained.
2974 * That page must be frozen for per cpu allocations to work.
2975 */
2976 VM_BUG_ON(!c->page->frozen);
2977 c->freelist = get_freepointer(s, freelist);
2978 c->tid = next_tid(c->tid);
2979 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
2980 return freelist;
2981
2982 deactivate_slab:
2983
2984 local_lock_irqsave(&s->cpu_slab->lock, flags);
2985 if (page != c->page) {
2986 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
2987 goto reread_page;
2988 }
2989 freelist = c->freelist;
2990 c->page = NULL;
2991 c->freelist = NULL;
2992 c->tid = next_tid(c->tid);
2993 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
2994 deactivate_slab(s, page, freelist);
2995
2996 new_slab:
2997
2998 if (slub_percpu_partial(c)) {
2999 local_lock_irqsave(&s->cpu_slab->lock, flags);
3000 if (unlikely(c->page)) {
3001 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3002 goto reread_page;
3003 }
3004 if (unlikely(!slub_percpu_partial(c))) {
3005 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3006 /* we were preempted and partial list got empty */
3007 goto new_objects;
3008 }
3009
3010 page = c->page = slub_percpu_partial(c);
3011 slub_set_percpu_partial(c, page);
3012 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3013 stat(s, CPU_PARTIAL_ALLOC);
3014 goto redo;
3015 }
3016
3017 new_objects:
3018
3019 freelist = get_partial(s, gfpflags, node, &page);
3020 if (freelist)
3021 goto check_new_page;
3022
3023 slub_put_cpu_ptr(s->cpu_slab);
3024 page = new_slab(s, gfpflags, node);
3025 c = slub_get_cpu_ptr(s->cpu_slab);
3026
3027 if (unlikely(!page)) {
3028 slab_out_of_memory(s, gfpflags, node);
3029 return NULL;
3030 }
3031
3032 /*
3033 * No other reference to the page yet so we can
3034 * muck around with it freely without cmpxchg
3035 */
3036 freelist = page->freelist;
3037 page->freelist = NULL;
3038
3039 stat(s, ALLOC_SLAB);
3040
3041 check_new_page:
3042
3043 if (kmem_cache_debug(s)) {
3044 if (!alloc_debug_processing(s, page, freelist, addr)) {
3045 /* Slab failed checks. Next slab needed */
3046 goto new_slab;
3047 } else {
3048 /*
3049 * For debug case, we don't load freelist so that all
3050 * allocations go through alloc_debug_processing()
3051 */
3052 goto return_single;
3053 }
3054 }
3055
3056 if (unlikely(!pfmemalloc_match(page, gfpflags)))
3057 /*
3058 * For !pfmemalloc_match() case we don't load freelist so that
3059 * we don't make further mismatched allocations easier.
3060 */
3061 goto return_single;
3062
3063 retry_load_page:
3064
3065 local_lock_irqsave(&s->cpu_slab->lock, flags);
3066 if (unlikely(c->page)) {
3067 void *flush_freelist = c->freelist;
3068 struct page *flush_page = c->page;
3069
3070 c->page = NULL;
3071 c->freelist = NULL;
3072 c->tid = next_tid(c->tid);
3073
3074 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3075
3076 deactivate_slab(s, flush_page, flush_freelist);
3077
3078 stat(s, CPUSLAB_FLUSH);
3079
3080 goto retry_load_page;
3081 }
3082 c->page = page;
3083
3084 goto load_freelist;
3085
3086 return_single:
3087
3088 deactivate_slab(s, page, get_freepointer(s, freelist));
3089 return freelist;
3090 }
3091
3092 /*
3093 * A wrapper for ___slab_alloc() for contexts where preemption is not yet
3094 * disabled. Compensates for possible cpu changes by refetching the per cpu area
3095 * pointer.
3096 */
__slab_alloc(struct kmem_cache * s,gfp_t gfpflags,int node,unsigned long addr,struct kmem_cache_cpu * c)3097 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
3098 unsigned long addr, struct kmem_cache_cpu *c)
3099 {
3100 void *p;
3101
3102 #ifdef CONFIG_PREEMPT_COUNT
3103 /*
3104 * We may have been preempted and rescheduled on a different
3105 * cpu before disabling preemption. Need to reload cpu area
3106 * pointer.
3107 */
3108 c = slub_get_cpu_ptr(s->cpu_slab);
3109 #endif
3110
3111 p = ___slab_alloc(s, gfpflags, node, addr, c);
3112 #ifdef CONFIG_PREEMPT_COUNT
3113 slub_put_cpu_ptr(s->cpu_slab);
3114 #endif
3115 return p;
3116 }
3117
3118 /*
3119 * If the object has been wiped upon free, make sure it's fully initialized by
3120 * zeroing out freelist pointer.
3121 */
maybe_wipe_obj_freeptr(struct kmem_cache * s,void * obj)3122 static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
3123 void *obj)
3124 {
3125 if (unlikely(slab_want_init_on_free(s)) && obj)
3126 memset((void *)((char *)kasan_reset_tag(obj) + s->offset),
3127 0, sizeof(void *));
3128 }
3129
3130 /*
3131 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
3132 * have the fastpath folded into their functions. So no function call
3133 * overhead for requests that can be satisfied on the fastpath.
3134 *
3135 * The fastpath works by first checking if the lockless freelist can be used.
3136 * If not then __slab_alloc is called for slow processing.
3137 *
3138 * Otherwise we can simply pick the next object from the lockless free list.
3139 */
slab_alloc_node(struct kmem_cache * s,gfp_t gfpflags,int node,unsigned long addr,size_t orig_size)3140 static __always_inline void *slab_alloc_node(struct kmem_cache *s,
3141 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
3142 {
3143 void *object;
3144 struct kmem_cache_cpu *c;
3145 struct page *page;
3146 unsigned long tid;
3147 struct obj_cgroup *objcg = NULL;
3148 bool init = false;
3149
3150 s = slab_pre_alloc_hook(s, &objcg, 1, gfpflags);
3151 if (!s)
3152 return NULL;
3153
3154 object = kfence_alloc(s, orig_size, gfpflags);
3155 if (unlikely(object))
3156 goto out;
3157
3158 redo:
3159 /*
3160 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
3161 * enabled. We may switch back and forth between cpus while
3162 * reading from one cpu area. That does not matter as long
3163 * as we end up on the original cpu again when doing the cmpxchg.
3164 *
3165 * We must guarantee that tid and kmem_cache_cpu are retrieved on the
3166 * same cpu. We read first the kmem_cache_cpu pointer and use it to read
3167 * the tid. If we are preempted and switched to another cpu between the
3168 * two reads, it's OK as the two are still associated with the same cpu
3169 * and cmpxchg later will validate the cpu.
3170 */
3171 c = raw_cpu_ptr(s->cpu_slab);
3172 tid = READ_ONCE(c->tid);
3173
3174 /*
3175 * Irqless object alloc/free algorithm used here depends on sequence
3176 * of fetching cpu_slab's data. tid should be fetched before anything
3177 * on c to guarantee that object and page associated with previous tid
3178 * won't be used with current tid. If we fetch tid first, object and
3179 * page could be one associated with next tid and our alloc/free
3180 * request will be failed. In this case, we will retry. So, no problem.
3181 */
3182 barrier();
3183
3184 /*
3185 * The transaction ids are globally unique per cpu and per operation on
3186 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
3187 * occurs on the right processor and that there was no operation on the
3188 * linked list in between.
3189 */
3190
3191 object = c->freelist;
3192 page = c->page;
3193 /*
3194 * We cannot use the lockless fastpath on PREEMPT_RT because if a
3195 * slowpath has taken the local_lock_irqsave(), it is not protected
3196 * against a fast path operation in an irq handler. So we need to take
3197 * the slow path which uses local_lock. It is still relatively fast if
3198 * there is a suitable cpu freelist.
3199 */
3200 if (IS_ENABLED(CONFIG_PREEMPT_RT) ||
3201 unlikely(!object || !page || !node_match(page, node))) {
3202 object = __slab_alloc(s, gfpflags, node, addr, c);
3203 } else {
3204 void *next_object = get_freepointer_safe(s, object);
3205
3206 /*
3207 * The cmpxchg will only match if there was no additional
3208 * operation and if we are on the right processor.
3209 *
3210 * The cmpxchg does the following atomically (without lock
3211 * semantics!)
3212 * 1. Relocate first pointer to the current per cpu area.
3213 * 2. Verify that tid and freelist have not been changed
3214 * 3. If they were not changed replace tid and freelist
3215 *
3216 * Since this is without lock semantics the protection is only
3217 * against code executing on this cpu *not* from access by
3218 * other cpus.
3219 */
3220 if (unlikely(!this_cpu_cmpxchg_double(
3221 s->cpu_slab->freelist, s->cpu_slab->tid,
3222 object, tid,
3223 next_object, next_tid(tid)))) {
3224
3225 note_cmpxchg_failure("slab_alloc", s, tid);
3226 goto redo;
3227 }
3228 prefetch_freepointer(s, next_object);
3229 stat(s, ALLOC_FASTPATH);
3230 }
3231
3232 maybe_wipe_obj_freeptr(s, object);
3233 init = slab_want_init_on_alloc(gfpflags, s);
3234
3235 out:
3236 slab_post_alloc_hook(s, objcg, gfpflags, 1, &object, init);
3237
3238 return object;
3239 }
3240
slab_alloc(struct kmem_cache * s,gfp_t gfpflags,unsigned long addr,size_t orig_size)3241 static __always_inline void *slab_alloc(struct kmem_cache *s,
3242 gfp_t gfpflags, unsigned long addr, size_t orig_size)
3243 {
3244 return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr, orig_size);
3245 }
3246
kmem_cache_alloc(struct kmem_cache * s,gfp_t gfpflags)3247 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
3248 {
3249 void *ret = slab_alloc(s, gfpflags, _RET_IP_, s->object_size);
3250
3251 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
3252 s->size, gfpflags);
3253
3254 return ret;
3255 }
3256 EXPORT_SYMBOL(kmem_cache_alloc);
3257
3258 #ifdef CONFIG_TRACING
kmem_cache_alloc_trace(struct kmem_cache * s,gfp_t gfpflags,size_t size)3259 void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
3260 {
3261 void *ret = slab_alloc(s, gfpflags, _RET_IP_, size);
3262 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
3263 ret = kasan_kmalloc(s, ret, size, gfpflags);
3264 return ret;
3265 }
3266 EXPORT_SYMBOL(kmem_cache_alloc_trace);
3267 #endif
3268
3269 #ifdef CONFIG_NUMA
kmem_cache_alloc_node(struct kmem_cache * s,gfp_t gfpflags,int node)3270 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
3271 {
3272 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, s->object_size);
3273
3274 trace_kmem_cache_alloc_node(_RET_IP_, ret,
3275 s->object_size, s->size, gfpflags, node);
3276
3277 return ret;
3278 }
3279 EXPORT_SYMBOL(kmem_cache_alloc_node);
3280
3281 #ifdef CONFIG_TRACING
kmem_cache_alloc_node_trace(struct kmem_cache * s,gfp_t gfpflags,int node,size_t size)3282 void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
3283 gfp_t gfpflags,
3284 int node, size_t size)
3285 {
3286 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, size);
3287
3288 trace_kmalloc_node(_RET_IP_, ret,
3289 size, s->size, gfpflags, node);
3290
3291 ret = kasan_kmalloc(s, ret, size, gfpflags);
3292 return ret;
3293 }
3294 EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
3295 #endif
3296 #endif /* CONFIG_NUMA */
3297
3298 /*
3299 * Slow path handling. This may still be called frequently since objects
3300 * have a longer lifetime than the cpu slabs in most processing loads.
3301 *
3302 * So we still attempt to reduce cache line usage. Just take the slab
3303 * lock and free the item. If there is no additional partial page
3304 * handling required then we can return immediately.
3305 */
__slab_free(struct kmem_cache * s,struct page * page,void * head,void * tail,int cnt,unsigned long addr)3306 static void __slab_free(struct kmem_cache *s, struct page *page,
3307 void *head, void *tail, int cnt,
3308 unsigned long addr)
3309
3310 {
3311 void *prior;
3312 int was_frozen;
3313 struct page new;
3314 unsigned long counters;
3315 struct kmem_cache_node *n = NULL;
3316 unsigned long flags;
3317
3318 stat(s, FREE_SLOWPATH);
3319
3320 if (kfence_free(head))
3321 return;
3322
3323 if (kmem_cache_debug(s) &&
3324 !free_debug_processing(s, page, head, tail, cnt, addr))
3325 return;
3326
3327 do {
3328 if (unlikely(n)) {
3329 spin_unlock_irqrestore(&n->list_lock, flags);
3330 n = NULL;
3331 }
3332 prior = page->freelist;
3333 counters = page->counters;
3334 set_freepointer(s, tail, prior);
3335 new.counters = counters;
3336 was_frozen = new.frozen;
3337 new.inuse -= cnt;
3338 if ((!new.inuse || !prior) && !was_frozen) {
3339
3340 if (kmem_cache_has_cpu_partial(s) && !prior) {
3341
3342 /*
3343 * Slab was on no list before and will be
3344 * partially empty
3345 * We can defer the list move and instead
3346 * freeze it.
3347 */
3348 new.frozen = 1;
3349
3350 } else { /* Needs to be taken off a list */
3351
3352 n = get_node(s, page_to_nid(page));
3353 /*
3354 * Speculatively acquire the list_lock.
3355 * If the cmpxchg does not succeed then we may
3356 * drop the list_lock without any processing.
3357 *
3358 * Otherwise the list_lock will synchronize with
3359 * other processors updating the list of slabs.
3360 */
3361 spin_lock_irqsave(&n->list_lock, flags);
3362
3363 }
3364 }
3365
3366 } while (!cmpxchg_double_slab(s, page,
3367 prior, counters,
3368 head, new.counters,
3369 "__slab_free"));
3370
3371 if (likely(!n)) {
3372
3373 if (likely(was_frozen)) {
3374 /*
3375 * The list lock was not taken therefore no list
3376 * activity can be necessary.
3377 */
3378 stat(s, FREE_FROZEN);
3379 } else if (new.frozen) {
3380 /*
3381 * If we just froze the page then put it onto the
3382 * per cpu partial list.
3383 */
3384 put_cpu_partial(s, page, 1);
3385 stat(s, CPU_PARTIAL_FREE);
3386 }
3387
3388 return;
3389 }
3390
3391 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
3392 goto slab_empty;
3393
3394 /*
3395 * Objects left in the slab. If it was not on the partial list before
3396 * then add it.
3397 */
3398 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
3399 remove_full(s, n, page);
3400 add_partial(n, page, DEACTIVATE_TO_TAIL);
3401 stat(s, FREE_ADD_PARTIAL);
3402 }
3403 spin_unlock_irqrestore(&n->list_lock, flags);
3404 return;
3405
3406 slab_empty:
3407 if (prior) {
3408 /*
3409 * Slab on the partial list.
3410 */
3411 remove_partial(n, page);
3412 stat(s, FREE_REMOVE_PARTIAL);
3413 } else {
3414 /* Slab must be on the full list */
3415 remove_full(s, n, page);
3416 }
3417
3418 spin_unlock_irqrestore(&n->list_lock, flags);
3419 stat(s, FREE_SLAB);
3420 discard_slab(s, page);
3421 }
3422
3423 /*
3424 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
3425 * can perform fastpath freeing without additional function calls.
3426 *
3427 * The fastpath is only possible if we are freeing to the current cpu slab
3428 * of this processor. This typically the case if we have just allocated
3429 * the item before.
3430 *
3431 * If fastpath is not possible then fall back to __slab_free where we deal
3432 * with all sorts of special processing.
3433 *
3434 * Bulk free of a freelist with several objects (all pointing to the
3435 * same page) possible by specifying head and tail ptr, plus objects
3436 * count (cnt). Bulk free indicated by tail pointer being set.
3437 */
do_slab_free(struct kmem_cache * s,struct page * page,void * head,void * tail,int cnt,unsigned long addr)3438 static __always_inline void do_slab_free(struct kmem_cache *s,
3439 struct page *page, void *head, void *tail,
3440 int cnt, unsigned long addr)
3441 {
3442 void *tail_obj = tail ? : head;
3443 struct kmem_cache_cpu *c;
3444 unsigned long tid;
3445
3446 /* memcg_slab_free_hook() is already called for bulk free. */
3447 if (!tail)
3448 memcg_slab_free_hook(s, &head, 1);
3449 redo:
3450 /*
3451 * Determine the currently cpus per cpu slab.
3452 * The cpu may change afterward. However that does not matter since
3453 * data is retrieved via this pointer. If we are on the same cpu
3454 * during the cmpxchg then the free will succeed.
3455 */
3456 c = raw_cpu_ptr(s->cpu_slab);
3457 tid = READ_ONCE(c->tid);
3458
3459 /* Same with comment on barrier() in slab_alloc_node() */
3460 barrier();
3461
3462 if (likely(page == c->page)) {
3463 #ifndef CONFIG_PREEMPT_RT
3464 void **freelist = READ_ONCE(c->freelist);
3465
3466 set_freepointer(s, tail_obj, freelist);
3467
3468 if (unlikely(!this_cpu_cmpxchg_double(
3469 s->cpu_slab->freelist, s->cpu_slab->tid,
3470 freelist, tid,
3471 head, next_tid(tid)))) {
3472
3473 note_cmpxchg_failure("slab_free", s, tid);
3474 goto redo;
3475 }
3476 #else /* CONFIG_PREEMPT_RT */
3477 /*
3478 * We cannot use the lockless fastpath on PREEMPT_RT because if
3479 * a slowpath has taken the local_lock_irqsave(), it is not
3480 * protected against a fast path operation in an irq handler. So
3481 * we need to take the local_lock. We shouldn't simply defer to
3482 * __slab_free() as that wouldn't use the cpu freelist at all.
3483 */
3484 void **freelist;
3485
3486 local_lock(&s->cpu_slab->lock);
3487 c = this_cpu_ptr(s->cpu_slab);
3488 if (unlikely(page != c->page)) {
3489 local_unlock(&s->cpu_slab->lock);
3490 goto redo;
3491 }
3492 tid = c->tid;
3493 freelist = c->freelist;
3494
3495 set_freepointer(s, tail_obj, freelist);
3496 c->freelist = head;
3497 c->tid = next_tid(tid);
3498
3499 local_unlock(&s->cpu_slab->lock);
3500 #endif
3501 stat(s, FREE_FASTPATH);
3502 } else
3503 __slab_free(s, page, head, tail_obj, cnt, addr);
3504
3505 }
3506
slab_free(struct kmem_cache * s,struct page * page,void * head,void * tail,int cnt,unsigned long addr)3507 static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
3508 void *head, void *tail, int cnt,
3509 unsigned long addr)
3510 {
3511 /*
3512 * With KASAN enabled slab_free_freelist_hook modifies the freelist
3513 * to remove objects, whose reuse must be delayed.
3514 */
3515 if (slab_free_freelist_hook(s, &head, &tail, &cnt))
3516 do_slab_free(s, page, head, tail, cnt, addr);
3517 }
3518
3519 #ifdef CONFIG_KASAN_GENERIC
___cache_free(struct kmem_cache * cache,void * x,unsigned long addr)3520 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
3521 {
3522 do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr);
3523 }
3524 #endif
3525
kmem_cache_free(struct kmem_cache * s,void * x)3526 void kmem_cache_free(struct kmem_cache *s, void *x)
3527 {
3528 s = cache_from_obj(s, x);
3529 if (!s)
3530 return;
3531 slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
3532 trace_kmem_cache_free(_RET_IP_, x, s->name);
3533 }
3534 EXPORT_SYMBOL(kmem_cache_free);
3535
3536 struct detached_freelist {
3537 struct page *page;
3538 void *tail;
3539 void *freelist;
3540 int cnt;
3541 struct kmem_cache *s;
3542 };
3543
free_nonslab_page(struct page * page,void * object)3544 static inline void free_nonslab_page(struct page *page, void *object)
3545 {
3546 unsigned int order = compound_order(page);
3547
3548 VM_BUG_ON_PAGE(!PageCompound(page), page);
3549 kfree_hook(object);
3550 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order));
3551 __free_pages(page, order);
3552 }
3553
3554 /*
3555 * This function progressively scans the array with free objects (with
3556 * a limited look ahead) and extract objects belonging to the same
3557 * page. It builds a detached freelist directly within the given
3558 * page/objects. This can happen without any need for
3559 * synchronization, because the objects are owned by running process.
3560 * The freelist is build up as a single linked list in the objects.
3561 * The idea is, that this detached freelist can then be bulk
3562 * transferred to the real freelist(s), but only requiring a single
3563 * synchronization primitive. Look ahead in the array is limited due
3564 * to performance reasons.
3565 */
3566 static inline
build_detached_freelist(struct kmem_cache * s,size_t size,void ** p,struct detached_freelist * df)3567 int build_detached_freelist(struct kmem_cache *s, size_t size,
3568 void **p, struct detached_freelist *df)
3569 {
3570 size_t first_skipped_index = 0;
3571 int lookahead = 3;
3572 void *object;
3573 struct page *page;
3574
3575 /* Always re-init detached_freelist */
3576 df->page = NULL;
3577
3578 do {
3579 object = p[--size];
3580 /* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */
3581 } while (!object && size);
3582
3583 if (!object)
3584 return 0;
3585
3586 page = virt_to_head_page(object);
3587 if (!s) {
3588 /* Handle kalloc'ed objects */
3589 if (unlikely(!PageSlab(page))) {
3590 free_nonslab_page(page, object);
3591 p[size] = NULL; /* mark object processed */
3592 return size;
3593 }
3594 /* Derive kmem_cache from object */
3595 df->s = page->slab_cache;
3596 } else {
3597 df->s = cache_from_obj(s, object); /* Support for memcg */
3598 }
3599
3600 if (is_kfence_address(object)) {
3601 slab_free_hook(df->s, object, false);
3602 __kfence_free(object);
3603 p[size] = NULL; /* mark object processed */
3604 return size;
3605 }
3606
3607 /* Start new detached freelist */
3608 df->page = page;
3609 set_freepointer(df->s, object, NULL);
3610 df->tail = object;
3611 df->freelist = object;
3612 p[size] = NULL; /* mark object processed */
3613 df->cnt = 1;
3614
3615 while (size) {
3616 object = p[--size];
3617 if (!object)
3618 continue; /* Skip processed objects */
3619
3620 /* df->page is always set at this point */
3621 if (df->page == virt_to_head_page(object)) {
3622 /* Opportunity build freelist */
3623 set_freepointer(df->s, object, df->freelist);
3624 df->freelist = object;
3625 df->cnt++;
3626 p[size] = NULL; /* mark object processed */
3627
3628 continue;
3629 }
3630
3631 /* Limit look ahead search */
3632 if (!--lookahead)
3633 break;
3634
3635 if (!first_skipped_index)
3636 first_skipped_index = size + 1;
3637 }
3638
3639 return first_skipped_index;
3640 }
3641
3642 /* Note that interrupts must be enabled when calling this function. */
kmem_cache_free_bulk(struct kmem_cache * s,size_t size,void ** p)3643 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
3644 {
3645 if (WARN_ON(!size))
3646 return;
3647
3648 memcg_slab_free_hook(s, p, size);
3649 do {
3650 struct detached_freelist df;
3651
3652 size = build_detached_freelist(s, size, p, &df);
3653 if (!df.page)
3654 continue;
3655
3656 slab_free(df.s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_);
3657 } while (likely(size));
3658 }
3659 EXPORT_SYMBOL(kmem_cache_free_bulk);
3660
3661 /* Note that interrupts must be enabled when calling this function. */
kmem_cache_alloc_bulk(struct kmem_cache * s,gfp_t flags,size_t size,void ** p)3662 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3663 void **p)
3664 {
3665 struct kmem_cache_cpu *c;
3666 int i;
3667 struct obj_cgroup *objcg = NULL;
3668
3669 /* memcg and kmem_cache debug support */
3670 s = slab_pre_alloc_hook(s, &objcg, size, flags);
3671 if (unlikely(!s))
3672 return false;
3673 /*
3674 * Drain objects in the per cpu slab, while disabling local
3675 * IRQs, which protects against PREEMPT and interrupts
3676 * handlers invoking normal fastpath.
3677 */
3678 c = slub_get_cpu_ptr(s->cpu_slab);
3679 local_lock_irq(&s->cpu_slab->lock);
3680
3681 for (i = 0; i < size; i++) {
3682 void *object = kfence_alloc(s, s->object_size, flags);
3683
3684 if (unlikely(object)) {
3685 p[i] = object;
3686 continue;
3687 }
3688
3689 object = c->freelist;
3690 if (unlikely(!object)) {
3691 /*
3692 * We may have removed an object from c->freelist using
3693 * the fastpath in the previous iteration; in that case,
3694 * c->tid has not been bumped yet.
3695 * Since ___slab_alloc() may reenable interrupts while
3696 * allocating memory, we should bump c->tid now.
3697 */
3698 c->tid = next_tid(c->tid);
3699
3700 local_unlock_irq(&s->cpu_slab->lock);
3701
3702 /*
3703 * Invoking slow path likely have side-effect
3704 * of re-populating per CPU c->freelist
3705 */
3706 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
3707 _RET_IP_, c);
3708 if (unlikely(!p[i]))
3709 goto error;
3710
3711 c = this_cpu_ptr(s->cpu_slab);
3712 maybe_wipe_obj_freeptr(s, p[i]);
3713
3714 local_lock_irq(&s->cpu_slab->lock);
3715
3716 continue; /* goto for-loop */
3717 }
3718 c->freelist = get_freepointer(s, object);
3719 p[i] = object;
3720 maybe_wipe_obj_freeptr(s, p[i]);
3721 }
3722 c->tid = next_tid(c->tid);
3723 local_unlock_irq(&s->cpu_slab->lock);
3724 slub_put_cpu_ptr(s->cpu_slab);
3725
3726 /*
3727 * memcg and kmem_cache debug support and memory initialization.
3728 * Done outside of the IRQ disabled fastpath loop.
3729 */
3730 slab_post_alloc_hook(s, objcg, flags, size, p,
3731 slab_want_init_on_alloc(flags, s));
3732 return i;
3733 error:
3734 slub_put_cpu_ptr(s->cpu_slab);
3735 slab_post_alloc_hook(s, objcg, flags, i, p, false);
3736 __kmem_cache_free_bulk(s, i, p);
3737 return 0;
3738 }
3739 EXPORT_SYMBOL(kmem_cache_alloc_bulk);
3740
3741
3742 /*
3743 * Object placement in a slab is made very easy because we always start at
3744 * offset 0. If we tune the size of the object to the alignment then we can
3745 * get the required alignment by putting one properly sized object after
3746 * another.
3747 *
3748 * Notice that the allocation order determines the sizes of the per cpu
3749 * caches. Each processor has always one slab available for allocations.
3750 * Increasing the allocation order reduces the number of times that slabs
3751 * must be moved on and off the partial lists and is therefore a factor in
3752 * locking overhead.
3753 */
3754
3755 /*
3756 * Minimum / Maximum order of slab pages. This influences locking overhead
3757 * and slab fragmentation. A higher order reduces the number of partial slabs
3758 * and increases the number of allocations possible without having to
3759 * take the list_lock.
3760 */
3761 static unsigned int slub_min_order;
3762 static unsigned int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
3763 static unsigned int slub_min_objects;
3764
3765 /*
3766 * Calculate the order of allocation given an slab object size.
3767 *
3768 * The order of allocation has significant impact on performance and other
3769 * system components. Generally order 0 allocations should be preferred since
3770 * order 0 does not cause fragmentation in the page allocator. Larger objects
3771 * be problematic to put into order 0 slabs because there may be too much
3772 * unused space left. We go to a higher order if more than 1/16th of the slab
3773 * would be wasted.
3774 *
3775 * In order to reach satisfactory performance we must ensure that a minimum
3776 * number of objects is in one slab. Otherwise we may generate too much
3777 * activity on the partial lists which requires taking the list_lock. This is
3778 * less a concern for large slabs though which are rarely used.
3779 *
3780 * slub_max_order specifies the order where we begin to stop considering the
3781 * number of objects in a slab as critical. If we reach slub_max_order then
3782 * we try to keep the page order as low as possible. So we accept more waste
3783 * of space in favor of a small page order.
3784 *
3785 * Higher order allocations also allow the placement of more objects in a
3786 * slab and thereby reduce object handling overhead. If the user has
3787 * requested a higher minimum order then we start with that one instead of
3788 * the smallest order which will fit the object.
3789 */
slab_order(unsigned int size,unsigned int min_objects,unsigned int max_order,unsigned int fract_leftover)3790 static inline unsigned int slab_order(unsigned int size,
3791 unsigned int min_objects, unsigned int max_order,
3792 unsigned int fract_leftover)
3793 {
3794 unsigned int min_order = slub_min_order;
3795 unsigned int order;
3796
3797 if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE)
3798 return get_order(size * MAX_OBJS_PER_PAGE) - 1;
3799
3800 for (order = max(min_order, (unsigned int)get_order(min_objects * size));
3801 order <= max_order; order++) {
3802
3803 unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
3804 unsigned int rem;
3805
3806 rem = slab_size % size;
3807
3808 if (rem <= slab_size / fract_leftover)
3809 break;
3810 }
3811
3812 return order;
3813 }
3814
calculate_order(unsigned int size)3815 static inline int calculate_order(unsigned int size)
3816 {
3817 unsigned int order;
3818 unsigned int min_objects;
3819 unsigned int max_objects;
3820 unsigned int nr_cpus;
3821
3822 /*
3823 * Attempt to find best configuration for a slab. This
3824 * works by first attempting to generate a layout with
3825 * the best configuration and backing off gradually.
3826 *
3827 * First we increase the acceptable waste in a slab. Then
3828 * we reduce the minimum objects required in a slab.
3829 */
3830 min_objects = slub_min_objects;
3831 if (!min_objects) {
3832 /*
3833 * Some architectures will only update present cpus when
3834 * onlining them, so don't trust the number if it's just 1. But
3835 * we also don't want to use nr_cpu_ids always, as on some other
3836 * architectures, there can be many possible cpus, but never
3837 * onlined. Here we compromise between trying to avoid too high
3838 * order on systems that appear larger than they are, and too
3839 * low order on systems that appear smaller than they are.
3840 */
3841 nr_cpus = num_present_cpus();
3842 if (nr_cpus <= 1)
3843 nr_cpus = nr_cpu_ids;
3844 min_objects = 4 * (fls(nr_cpus) + 1);
3845 }
3846 max_objects = order_objects(slub_max_order, size);
3847 min_objects = min(min_objects, max_objects);
3848
3849 while (min_objects > 1) {
3850 unsigned int fraction;
3851
3852 fraction = 16;
3853 while (fraction >= 4) {
3854 order = slab_order(size, min_objects,
3855 slub_max_order, fraction);
3856 if (order <= slub_max_order)
3857 return order;
3858 fraction /= 2;
3859 }
3860 min_objects--;
3861 }
3862
3863 /*
3864 * We were unable to place multiple objects in a slab. Now
3865 * lets see if we can place a single object there.
3866 */
3867 order = slab_order(size, 1, slub_max_order, 1);
3868 if (order <= slub_max_order)
3869 return order;
3870
3871 /*
3872 * Doh this slab cannot be placed using slub_max_order.
3873 */
3874 order = slab_order(size, 1, MAX_ORDER, 1);
3875 if (order < MAX_ORDER)
3876 return order;
3877 return -ENOSYS;
3878 }
3879
3880 static void
init_kmem_cache_node(struct kmem_cache_node * n)3881 init_kmem_cache_node(struct kmem_cache_node *n)
3882 {
3883 n->nr_partial = 0;
3884 spin_lock_init(&n->list_lock);
3885 INIT_LIST_HEAD(&n->partial);
3886 #ifdef CONFIG_SLUB_DEBUG
3887 atomic_long_set(&n->nr_slabs, 0);
3888 atomic_long_set(&n->total_objects, 0);
3889 INIT_LIST_HEAD(&n->full);
3890 #endif
3891 }
3892
alloc_kmem_cache_cpus(struct kmem_cache * s)3893 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
3894 {
3895 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
3896 KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
3897
3898 /*
3899 * Must align to double word boundary for the double cmpxchg
3900 * instructions to work; see __pcpu_double_call_return_bool().
3901 */
3902 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
3903 2 * sizeof(void *));
3904
3905 if (!s->cpu_slab)
3906 return 0;
3907
3908 init_kmem_cache_cpus(s);
3909
3910 return 1;
3911 }
3912
3913 static struct kmem_cache *kmem_cache_node;
3914
3915 /*
3916 * No kmalloc_node yet so do it by hand. We know that this is the first
3917 * slab on the node for this slabcache. There are no concurrent accesses
3918 * possible.
3919 *
3920 * Note that this function only works on the kmem_cache_node
3921 * when allocating for the kmem_cache_node. This is used for bootstrapping
3922 * memory on a fresh node that has no slab structures yet.
3923 */
early_kmem_cache_node_alloc(int node)3924 static void early_kmem_cache_node_alloc(int node)
3925 {
3926 struct page *page;
3927 struct kmem_cache_node *n;
3928
3929 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
3930
3931 page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
3932
3933 BUG_ON(!page);
3934 if (page_to_nid(page) != node) {
3935 pr_err("SLUB: Unable to allocate memory from node %d\n", node);
3936 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
3937 }
3938
3939 n = page->freelist;
3940 BUG_ON(!n);
3941 #ifdef CONFIG_SLUB_DEBUG
3942 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
3943 init_tracking(kmem_cache_node, n);
3944 #endif
3945 n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false);
3946 page->freelist = get_freepointer(kmem_cache_node, n);
3947 page->inuse = 1;
3948 page->frozen = 0;
3949 kmem_cache_node->node[node] = n;
3950 init_kmem_cache_node(n);
3951 inc_slabs_node(kmem_cache_node, node, page->objects);
3952
3953 /*
3954 * No locks need to be taken here as it has just been
3955 * initialized and there is no concurrent access.
3956 */
3957 __add_partial(n, page, DEACTIVATE_TO_HEAD);
3958 }
3959
free_kmem_cache_nodes(struct kmem_cache * s)3960 static void free_kmem_cache_nodes(struct kmem_cache *s)
3961 {
3962 int node;
3963 struct kmem_cache_node *n;
3964
3965 for_each_kmem_cache_node(s, node, n) {
3966 s->node[node] = NULL;
3967 kmem_cache_free(kmem_cache_node, n);
3968 }
3969 }
3970
__kmem_cache_release(struct kmem_cache * s)3971 void __kmem_cache_release(struct kmem_cache *s)
3972 {
3973 cache_random_seq_destroy(s);
3974 free_percpu(s->cpu_slab);
3975 free_kmem_cache_nodes(s);
3976 }
3977
init_kmem_cache_nodes(struct kmem_cache * s)3978 static int init_kmem_cache_nodes(struct kmem_cache *s)
3979 {
3980 int node;
3981
3982 for_each_node_mask(node, slab_nodes) {
3983 struct kmem_cache_node *n;
3984
3985 if (slab_state == DOWN) {
3986 early_kmem_cache_node_alloc(node);
3987 continue;
3988 }
3989 n = kmem_cache_alloc_node(kmem_cache_node,
3990 GFP_KERNEL, node);
3991
3992 if (!n) {
3993 free_kmem_cache_nodes(s);
3994 return 0;
3995 }
3996
3997 init_kmem_cache_node(n);
3998 s->node[node] = n;
3999 }
4000 return 1;
4001 }
4002
set_min_partial(struct kmem_cache * s,unsigned long min)4003 static void set_min_partial(struct kmem_cache *s, unsigned long min)
4004 {
4005 if (min < MIN_PARTIAL)
4006 min = MIN_PARTIAL;
4007 else if (min > MAX_PARTIAL)
4008 min = MAX_PARTIAL;
4009 s->min_partial = min;
4010 }
4011
set_cpu_partial(struct kmem_cache * s)4012 static void set_cpu_partial(struct kmem_cache *s)
4013 {
4014 #ifdef CONFIG_SLUB_CPU_PARTIAL
4015 /*
4016 * cpu_partial determined the maximum number of objects kept in the
4017 * per cpu partial lists of a processor.
4018 *
4019 * Per cpu partial lists mainly contain slabs that just have one
4020 * object freed. If they are used for allocation then they can be
4021 * filled up again with minimal effort. The slab will never hit the
4022 * per node partial lists and therefore no locking will be required.
4023 *
4024 * This setting also determines
4025 *
4026 * A) The number of objects from per cpu partial slabs dumped to the
4027 * per node list when we reach the limit.
4028 * B) The number of objects in cpu partial slabs to extract from the
4029 * per node list when we run out of per cpu objects. We only fetch
4030 * 50% to keep some capacity around for frees.
4031 */
4032 if (!kmem_cache_has_cpu_partial(s))
4033 slub_set_cpu_partial(s, 0);
4034 else if (s->size >= PAGE_SIZE)
4035 slub_set_cpu_partial(s, 2);
4036 else if (s->size >= 1024)
4037 slub_set_cpu_partial(s, 6);
4038 else if (s->size >= 256)
4039 slub_set_cpu_partial(s, 13);
4040 else
4041 slub_set_cpu_partial(s, 30);
4042 #endif
4043 }
4044
4045 /*
4046 * calculate_sizes() determines the order and the distribution of data within
4047 * a slab object.
4048 */
calculate_sizes(struct kmem_cache * s,int forced_order)4049 static int calculate_sizes(struct kmem_cache *s, int forced_order)
4050 {
4051 slab_flags_t flags = s->flags;
4052 unsigned int size = s->object_size;
4053 unsigned int order;
4054
4055 /*
4056 * Round up object size to the next word boundary. We can only
4057 * place the free pointer at word boundaries and this determines
4058 * the possible location of the free pointer.
4059 */
4060 size = ALIGN(size, sizeof(void *));
4061
4062 #ifdef CONFIG_SLUB_DEBUG
4063 /*
4064 * Determine if we can poison the object itself. If the user of
4065 * the slab may touch the object after free or before allocation
4066 * then we should never poison the object itself.
4067 */
4068 if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) &&
4069 !s->ctor)
4070 s->flags |= __OBJECT_POISON;
4071 else
4072 s->flags &= ~__OBJECT_POISON;
4073
4074
4075 /*
4076 * If we are Redzoning then check if there is some space between the
4077 * end of the object and the free pointer. If not then add an
4078 * additional word to have some bytes to store Redzone information.
4079 */
4080 if ((flags & SLAB_RED_ZONE) && size == s->object_size)
4081 size += sizeof(void *);
4082 #endif
4083
4084 /*
4085 * With that we have determined the number of bytes in actual use
4086 * by the object and redzoning.
4087 */
4088 s->inuse = size;
4089
4090 if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
4091 ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) ||
4092 s->ctor) {
4093 /*
4094 * Relocate free pointer after the object if it is not
4095 * permitted to overwrite the first word of the object on
4096 * kmem_cache_free.
4097 *
4098 * This is the case if we do RCU, have a constructor or
4099 * destructor, are poisoning the objects, or are
4100 * redzoning an object smaller than sizeof(void *).
4101 *
4102 * The assumption that s->offset >= s->inuse means free
4103 * pointer is outside of the object is used in the
4104 * freeptr_outside_object() function. If that is no
4105 * longer true, the function needs to be modified.
4106 */
4107 s->offset = size;
4108 size += sizeof(void *);
4109 } else {
4110 /*
4111 * Store freelist pointer near middle of object to keep
4112 * it away from the edges of the object to avoid small
4113 * sized over/underflows from neighboring allocations.
4114 */
4115 s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *));
4116 }
4117
4118 #ifdef CONFIG_SLUB_DEBUG
4119 if (flags & SLAB_STORE_USER)
4120 /*
4121 * Need to store information about allocs and frees after
4122 * the object.
4123 */
4124 size += 2 * sizeof(struct track);
4125 #endif
4126
4127 kasan_cache_create(s, &size, &s->flags);
4128 #ifdef CONFIG_SLUB_DEBUG
4129 if (flags & SLAB_RED_ZONE) {
4130 /*
4131 * Add some empty padding so that we can catch
4132 * overwrites from earlier objects rather than let
4133 * tracking information or the free pointer be
4134 * corrupted if a user writes before the start
4135 * of the object.
4136 */
4137 size += sizeof(void *);
4138
4139 s->red_left_pad = sizeof(void *);
4140 s->red_left_pad = ALIGN(s->red_left_pad, s->align);
4141 size += s->red_left_pad;
4142 }
4143 #endif
4144
4145 /*
4146 * SLUB stores one object immediately after another beginning from
4147 * offset 0. In order to align the objects we have to simply size
4148 * each object to conform to the alignment.
4149 */
4150 size = ALIGN(size, s->align);
4151 s->size = size;
4152 s->reciprocal_size = reciprocal_value(size);
4153 if (forced_order >= 0)
4154 order = forced_order;
4155 else
4156 order = calculate_order(size);
4157
4158 if ((int)order < 0)
4159 return 0;
4160
4161 s->allocflags = 0;
4162 if (order)
4163 s->allocflags |= __GFP_COMP;
4164
4165 if (s->flags & SLAB_CACHE_DMA)
4166 s->allocflags |= GFP_DMA;
4167
4168 if (s->flags & SLAB_CACHE_DMA32)
4169 s->allocflags |= GFP_DMA32;
4170
4171 if (s->flags & SLAB_RECLAIM_ACCOUNT)
4172 s->allocflags |= __GFP_RECLAIMABLE;
4173
4174 /*
4175 * Determine the number of objects per slab
4176 */
4177 s->oo = oo_make(order, size);
4178 s->min = oo_make(get_order(size), size);
4179 if (oo_objects(s->oo) > oo_objects(s->max))
4180 s->max = s->oo;
4181
4182 return !!oo_objects(s->oo);
4183 }
4184
kmem_cache_open(struct kmem_cache * s,slab_flags_t flags)4185 static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
4186 {
4187 s->flags = kmem_cache_flags(s->size, flags, s->name);
4188 #ifdef CONFIG_SLAB_FREELIST_HARDENED
4189 s->random = get_random_long();
4190 #endif
4191
4192 if (!calculate_sizes(s, -1))
4193 goto error;
4194 if (disable_higher_order_debug) {
4195 /*
4196 * Disable debugging flags that store metadata if the min slab
4197 * order increased.
4198 */
4199 if (get_order(s->size) > get_order(s->object_size)) {
4200 s->flags &= ~DEBUG_METADATA_FLAGS;
4201 s->offset = 0;
4202 if (!calculate_sizes(s, -1))
4203 goto error;
4204 }
4205 }
4206
4207 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
4208 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
4209 if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0)
4210 /* Enable fast mode */
4211 s->flags |= __CMPXCHG_DOUBLE;
4212 #endif
4213
4214 /*
4215 * The larger the object size is, the more pages we want on the partial
4216 * list to avoid pounding the page allocator excessively.
4217 */
4218 set_min_partial(s, ilog2(s->size) / 2);
4219
4220 set_cpu_partial(s);
4221
4222 #ifdef CONFIG_NUMA
4223 s->remote_node_defrag_ratio = 1000;
4224 #endif
4225
4226 /* Initialize the pre-computed randomized freelist if slab is up */
4227 if (slab_state >= UP) {
4228 if (init_cache_random_seq(s))
4229 goto error;
4230 }
4231
4232 if (!init_kmem_cache_nodes(s))
4233 goto error;
4234
4235 if (alloc_kmem_cache_cpus(s))
4236 return 0;
4237
4238 error:
4239 __kmem_cache_release(s);
4240 return -EINVAL;
4241 }
4242
list_slab_objects(struct kmem_cache * s,struct page * page,const char * text)4243 static void list_slab_objects(struct kmem_cache *s, struct page *page,
4244 const char *text)
4245 {
4246 #ifdef CONFIG_SLUB_DEBUG
4247 void *addr = page_address(page);
4248 unsigned long flags;
4249 unsigned long *map;
4250 void *p;
4251
4252 slab_err(s, page, text, s->name);
4253 slab_lock(page, &flags);
4254
4255 map = get_map(s, page);
4256 for_each_object(p, s, addr, page->objects) {
4257
4258 if (!test_bit(__obj_to_index(s, addr, p), map)) {
4259 pr_err("Object 0x%p @offset=%tu\n", p, p - addr);
4260 print_tracking(s, p);
4261 }
4262 }
4263 put_map(map);
4264 slab_unlock(page, &flags);
4265 #endif
4266 }
4267
4268 /*
4269 * Attempt to free all partial slabs on a node.
4270 * This is called from __kmem_cache_shutdown(). We must take list_lock
4271 * because sysfs file might still access partial list after the shutdowning.
4272 */
free_partial(struct kmem_cache * s,struct kmem_cache_node * n)4273 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
4274 {
4275 LIST_HEAD(discard);
4276 struct page *page, *h;
4277
4278 BUG_ON(irqs_disabled());
4279 spin_lock_irq(&n->list_lock);
4280 list_for_each_entry_safe(page, h, &n->partial, slab_list) {
4281 if (!page->inuse) {
4282 remove_partial(n, page);
4283 list_add(&page->slab_list, &discard);
4284 } else {
4285 list_slab_objects(s, page,
4286 "Objects remaining in %s on __kmem_cache_shutdown()");
4287 }
4288 }
4289 spin_unlock_irq(&n->list_lock);
4290
4291 list_for_each_entry_safe(page, h, &discard, slab_list)
4292 discard_slab(s, page);
4293 }
4294
__kmem_cache_empty(struct kmem_cache * s)4295 bool __kmem_cache_empty(struct kmem_cache *s)
4296 {
4297 int node;
4298 struct kmem_cache_node *n;
4299
4300 for_each_kmem_cache_node(s, node, n)
4301 if (n->nr_partial || slabs_node(s, node))
4302 return false;
4303 return true;
4304 }
4305
4306 /*
4307 * Release all resources used by a slab cache.
4308 */
__kmem_cache_shutdown(struct kmem_cache * s)4309 int __kmem_cache_shutdown(struct kmem_cache *s)
4310 {
4311 int node;
4312 struct kmem_cache_node *n;
4313
4314 flush_all_cpus_locked(s);
4315 /* Attempt to free all objects */
4316 for_each_kmem_cache_node(s, node, n) {
4317 free_partial(s, n);
4318 if (n->nr_partial || slabs_node(s, node))
4319 return 1;
4320 }
4321 return 0;
4322 }
4323
4324 #ifdef CONFIG_PRINTK
__kmem_obj_info(struct kmem_obj_info * kpp,void * object,struct page * page)4325 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page)
4326 {
4327 void *base;
4328 int __maybe_unused i;
4329 unsigned int objnr;
4330 void *objp;
4331 void *objp0;
4332 struct kmem_cache *s = page->slab_cache;
4333 struct track __maybe_unused *trackp;
4334
4335 kpp->kp_ptr = object;
4336 kpp->kp_page = page;
4337 kpp->kp_slab_cache = s;
4338 base = page_address(page);
4339 objp0 = kasan_reset_tag(object);
4340 #ifdef CONFIG_SLUB_DEBUG
4341 objp = restore_red_left(s, objp0);
4342 #else
4343 objp = objp0;
4344 #endif
4345 objnr = obj_to_index(s, page, objp);
4346 kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp);
4347 objp = base + s->size * objnr;
4348 kpp->kp_objp = objp;
4349 if (WARN_ON_ONCE(objp < base || objp >= base + page->objects * s->size || (objp - base) % s->size) ||
4350 !(s->flags & SLAB_STORE_USER))
4351 return;
4352 #ifdef CONFIG_SLUB_DEBUG
4353 objp = fixup_red_left(s, objp);
4354 trackp = get_track(s, objp, TRACK_ALLOC);
4355 kpp->kp_ret = (void *)trackp->addr;
4356 #ifdef CONFIG_STACKTRACE
4357 for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) {
4358 kpp->kp_stack[i] = (void *)trackp->addrs[i];
4359 if (!kpp->kp_stack[i])
4360 break;
4361 }
4362
4363 trackp = get_track(s, objp, TRACK_FREE);
4364 for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) {
4365 kpp->kp_free_stack[i] = (void *)trackp->addrs[i];
4366 if (!kpp->kp_free_stack[i])
4367 break;
4368 }
4369 #endif
4370 #endif
4371 }
4372 #endif
4373
4374 /********************************************************************
4375 * Kmalloc subsystem
4376 *******************************************************************/
4377
setup_slub_min_order(char * str)4378 static int __init setup_slub_min_order(char *str)
4379 {
4380 get_option(&str, (int *)&slub_min_order);
4381
4382 return 1;
4383 }
4384
4385 __setup("slub_min_order=", setup_slub_min_order);
4386
setup_slub_max_order(char * str)4387 static int __init setup_slub_max_order(char *str)
4388 {
4389 get_option(&str, (int *)&slub_max_order);
4390 slub_max_order = min(slub_max_order, (unsigned int)MAX_ORDER - 1);
4391
4392 return 1;
4393 }
4394
4395 __setup("slub_max_order=", setup_slub_max_order);
4396
setup_slub_min_objects(char * str)4397 static int __init setup_slub_min_objects(char *str)
4398 {
4399 get_option(&str, (int *)&slub_min_objects);
4400
4401 return 1;
4402 }
4403
4404 __setup("slub_min_objects=", setup_slub_min_objects);
4405
__kmalloc(size_t size,gfp_t flags)4406 void *__kmalloc(size_t size, gfp_t flags)
4407 {
4408 struct kmem_cache *s;
4409 void *ret;
4410
4411 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
4412 return kmalloc_large(size, flags);
4413
4414 s = kmalloc_slab(size, flags);
4415
4416 if (unlikely(ZERO_OR_NULL_PTR(s)))
4417 return s;
4418
4419 ret = slab_alloc(s, flags, _RET_IP_, size);
4420
4421 trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
4422
4423 ret = kasan_kmalloc(s, ret, size, flags);
4424
4425 return ret;
4426 }
4427 EXPORT_SYMBOL(__kmalloc);
4428
4429 #ifdef CONFIG_NUMA
kmalloc_large_node(size_t size,gfp_t flags,int node)4430 static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
4431 {
4432 struct page *page;
4433 void *ptr = NULL;
4434 unsigned int order = get_order(size);
4435
4436 flags |= __GFP_COMP;
4437 page = alloc_pages_node(node, flags, order);
4438 if (page) {
4439 ptr = page_address(page);
4440 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
4441 PAGE_SIZE << order);
4442 }
4443
4444 return kmalloc_large_node_hook(ptr, size, flags);
4445 }
4446
__kmalloc_node(size_t size,gfp_t flags,int node)4447 void *__kmalloc_node(size_t size, gfp_t flags, int node)
4448 {
4449 struct kmem_cache *s;
4450 void *ret;
4451
4452 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
4453 ret = kmalloc_large_node(size, flags, node);
4454
4455 trace_kmalloc_node(_RET_IP_, ret,
4456 size, PAGE_SIZE << get_order(size),
4457 flags, node);
4458
4459 return ret;
4460 }
4461
4462 s = kmalloc_slab(size, flags);
4463
4464 if (unlikely(ZERO_OR_NULL_PTR(s)))
4465 return s;
4466
4467 ret = slab_alloc_node(s, flags, node, _RET_IP_, size);
4468
4469 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
4470
4471 ret = kasan_kmalloc(s, ret, size, flags);
4472
4473 return ret;
4474 }
4475 EXPORT_SYMBOL(__kmalloc_node);
4476 #endif /* CONFIG_NUMA */
4477
4478 #ifdef CONFIG_HARDENED_USERCOPY
4479 /*
4480 * Rejects incorrectly sized objects and objects that are to be copied
4481 * to/from userspace but do not fall entirely within the containing slab
4482 * cache's usercopy region.
4483 *
4484 * Returns NULL if check passes, otherwise const char * to name of cache
4485 * to indicate an error.
4486 */
__check_heap_object(const void * ptr,unsigned long n,struct page * page,bool to_user)4487 void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
4488 bool to_user)
4489 {
4490 struct kmem_cache *s;
4491 unsigned int offset;
4492 size_t object_size;
4493 bool is_kfence = is_kfence_address(ptr);
4494
4495 ptr = kasan_reset_tag(ptr);
4496
4497 /* Find object and usable object size. */
4498 s = page->slab_cache;
4499
4500 /* Reject impossible pointers. */
4501 if (ptr < page_address(page))
4502 usercopy_abort("SLUB object not in SLUB page?!", NULL,
4503 to_user, 0, n);
4504
4505 /* Find offset within object. */
4506 if (is_kfence)
4507 offset = ptr - kfence_object_start(ptr);
4508 else
4509 offset = (ptr - page_address(page)) % s->size;
4510
4511 /* Adjust for redzone and reject if within the redzone. */
4512 if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) {
4513 if (offset < s->red_left_pad)
4514 usercopy_abort("SLUB object in left red zone",
4515 s->name, to_user, offset, n);
4516 offset -= s->red_left_pad;
4517 }
4518
4519 /* Allow address range falling entirely within usercopy region. */
4520 if (offset >= s->useroffset &&
4521 offset - s->useroffset <= s->usersize &&
4522 n <= s->useroffset - offset + s->usersize)
4523 return;
4524
4525 /*
4526 * If the copy is still within the allocated object, produce
4527 * a warning instead of rejecting the copy. This is intended
4528 * to be a temporary method to find any missing usercopy
4529 * whitelists.
4530 */
4531 object_size = slab_ksize(s);
4532 if (usercopy_fallback &&
4533 offset <= object_size && n <= object_size - offset) {
4534 usercopy_warn("SLUB object", s->name, to_user, offset, n);
4535 return;
4536 }
4537
4538 usercopy_abort("SLUB object", s->name, to_user, offset, n);
4539 }
4540 #endif /* CONFIG_HARDENED_USERCOPY */
4541
__ksize(const void * object)4542 size_t __ksize(const void *object)
4543 {
4544 struct page *page;
4545
4546 if (unlikely(object == ZERO_SIZE_PTR))
4547 return 0;
4548
4549 page = virt_to_head_page(object);
4550
4551 if (unlikely(!PageSlab(page))) {
4552 WARN_ON(!PageCompound(page));
4553 return page_size(page);
4554 }
4555
4556 return slab_ksize(page->slab_cache);
4557 }
4558 EXPORT_SYMBOL(__ksize);
4559
kfree(const void * x)4560 void kfree(const void *x)
4561 {
4562 struct page *page;
4563 void *object = (void *)x;
4564
4565 trace_kfree(_RET_IP_, x);
4566
4567 if (unlikely(ZERO_OR_NULL_PTR(x)))
4568 return;
4569
4570 page = virt_to_head_page(x);
4571 if (unlikely(!PageSlab(page))) {
4572 free_nonslab_page(page, object);
4573 return;
4574 }
4575 slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
4576 }
4577 EXPORT_SYMBOL(kfree);
4578
4579 #define SHRINK_PROMOTE_MAX 32
4580
4581 /*
4582 * kmem_cache_shrink discards empty slabs and promotes the slabs filled
4583 * up most to the head of the partial lists. New allocations will then
4584 * fill those up and thus they can be removed from the partial lists.
4585 *
4586 * The slabs with the least items are placed last. This results in them
4587 * being allocated from last increasing the chance that the last objects
4588 * are freed in them.
4589 */
__kmem_cache_do_shrink(struct kmem_cache * s)4590 static int __kmem_cache_do_shrink(struct kmem_cache *s)
4591 {
4592 int node;
4593 int i;
4594 struct kmem_cache_node *n;
4595 struct page *page;
4596 struct page *t;
4597 struct list_head discard;
4598 struct list_head promote[SHRINK_PROMOTE_MAX];
4599 unsigned long flags;
4600 int ret = 0;
4601
4602 for_each_kmem_cache_node(s, node, n) {
4603 INIT_LIST_HEAD(&discard);
4604 for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
4605 INIT_LIST_HEAD(promote + i);
4606
4607 spin_lock_irqsave(&n->list_lock, flags);
4608
4609 /*
4610 * Build lists of slabs to discard or promote.
4611 *
4612 * Note that concurrent frees may occur while we hold the
4613 * list_lock. page->inuse here is the upper limit.
4614 */
4615 list_for_each_entry_safe(page, t, &n->partial, slab_list) {
4616 int free = page->objects - page->inuse;
4617
4618 /* Do not reread page->inuse */
4619 barrier();
4620
4621 /* We do not keep full slabs on the list */
4622 BUG_ON(free <= 0);
4623
4624 if (free == page->objects) {
4625 list_move(&page->slab_list, &discard);
4626 n->nr_partial--;
4627 } else if (free <= SHRINK_PROMOTE_MAX)
4628 list_move(&page->slab_list, promote + free - 1);
4629 }
4630
4631 /*
4632 * Promote the slabs filled up most to the head of the
4633 * partial list.
4634 */
4635 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
4636 list_splice(promote + i, &n->partial);
4637
4638 spin_unlock_irqrestore(&n->list_lock, flags);
4639
4640 /* Release empty slabs */
4641 list_for_each_entry_safe(page, t, &discard, slab_list)
4642 discard_slab(s, page);
4643
4644 if (slabs_node(s, node))
4645 ret = 1;
4646 }
4647
4648 return ret;
4649 }
4650
__kmem_cache_shrink(struct kmem_cache * s)4651 int __kmem_cache_shrink(struct kmem_cache *s)
4652 {
4653 flush_all(s);
4654 return __kmem_cache_do_shrink(s);
4655 }
4656
slab_mem_going_offline_callback(void * arg)4657 static int slab_mem_going_offline_callback(void *arg)
4658 {
4659 struct kmem_cache *s;
4660
4661 mutex_lock(&slab_mutex);
4662 list_for_each_entry(s, &slab_caches, list) {
4663 flush_all_cpus_locked(s);
4664 __kmem_cache_do_shrink(s);
4665 }
4666 mutex_unlock(&slab_mutex);
4667
4668 return 0;
4669 }
4670
slab_mem_offline_callback(void * arg)4671 static void slab_mem_offline_callback(void *arg)
4672 {
4673 struct memory_notify *marg = arg;
4674 int offline_node;
4675
4676 offline_node = marg->status_change_nid_normal;
4677
4678 /*
4679 * If the node still has available memory. we need kmem_cache_node
4680 * for it yet.
4681 */
4682 if (offline_node < 0)
4683 return;
4684
4685 mutex_lock(&slab_mutex);
4686 node_clear(offline_node, slab_nodes);
4687 /*
4688 * We no longer free kmem_cache_node structures here, as it would be
4689 * racy with all get_node() users, and infeasible to protect them with
4690 * slab_mutex.
4691 */
4692 mutex_unlock(&slab_mutex);
4693 }
4694
slab_mem_going_online_callback(void * arg)4695 static int slab_mem_going_online_callback(void *arg)
4696 {
4697 struct kmem_cache_node *n;
4698 struct kmem_cache *s;
4699 struct memory_notify *marg = arg;
4700 int nid = marg->status_change_nid_normal;
4701 int ret = 0;
4702
4703 /*
4704 * If the node's memory is already available, then kmem_cache_node is
4705 * already created. Nothing to do.
4706 */
4707 if (nid < 0)
4708 return 0;
4709
4710 /*
4711 * We are bringing a node online. No memory is available yet. We must
4712 * allocate a kmem_cache_node structure in order to bring the node
4713 * online.
4714 */
4715 mutex_lock(&slab_mutex);
4716 list_for_each_entry(s, &slab_caches, list) {
4717 /*
4718 * The structure may already exist if the node was previously
4719 * onlined and offlined.
4720 */
4721 if (get_node(s, nid))
4722 continue;
4723 /*
4724 * XXX: kmem_cache_alloc_node will fallback to other nodes
4725 * since memory is not yet available from the node that
4726 * is brought up.
4727 */
4728 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
4729 if (!n) {
4730 ret = -ENOMEM;
4731 goto out;
4732 }
4733 init_kmem_cache_node(n);
4734 s->node[nid] = n;
4735 }
4736 /*
4737 * Any cache created after this point will also have kmem_cache_node
4738 * initialized for the new node.
4739 */
4740 node_set(nid, slab_nodes);
4741 out:
4742 mutex_unlock(&slab_mutex);
4743 return ret;
4744 }
4745
slab_memory_callback(struct notifier_block * self,unsigned long action,void * arg)4746 static int slab_memory_callback(struct notifier_block *self,
4747 unsigned long action, void *arg)
4748 {
4749 int ret = 0;
4750
4751 switch (action) {
4752 case MEM_GOING_ONLINE:
4753 ret = slab_mem_going_online_callback(arg);
4754 break;
4755 case MEM_GOING_OFFLINE:
4756 ret = slab_mem_going_offline_callback(arg);
4757 break;
4758 case MEM_OFFLINE:
4759 case MEM_CANCEL_ONLINE:
4760 slab_mem_offline_callback(arg);
4761 break;
4762 case MEM_ONLINE:
4763 case MEM_CANCEL_OFFLINE:
4764 break;
4765 }
4766 if (ret)
4767 ret = notifier_from_errno(ret);
4768 else
4769 ret = NOTIFY_OK;
4770 return ret;
4771 }
4772
4773 static struct notifier_block slab_memory_callback_nb = {
4774 .notifier_call = slab_memory_callback,
4775 .priority = SLAB_CALLBACK_PRI,
4776 };
4777
4778 /********************************************************************
4779 * Basic setup of slabs
4780 *******************************************************************/
4781
4782 /*
4783 * Used for early kmem_cache structures that were allocated using
4784 * the page allocator. Allocate them properly then fix up the pointers
4785 * that may be pointing to the wrong kmem_cache structure.
4786 */
4787
bootstrap(struct kmem_cache * static_cache)4788 static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
4789 {
4790 int node;
4791 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
4792 struct kmem_cache_node *n;
4793
4794 memcpy(s, static_cache, kmem_cache->object_size);
4795
4796 /*
4797 * This runs very early, and only the boot processor is supposed to be
4798 * up. Even if it weren't true, IRQs are not up so we couldn't fire
4799 * IPIs around.
4800 */
4801 __flush_cpu_slab(s, smp_processor_id());
4802 for_each_kmem_cache_node(s, node, n) {
4803 struct page *p;
4804
4805 list_for_each_entry(p, &n->partial, slab_list)
4806 p->slab_cache = s;
4807
4808 #ifdef CONFIG_SLUB_DEBUG
4809 list_for_each_entry(p, &n->full, slab_list)
4810 p->slab_cache = s;
4811 #endif
4812 }
4813 list_add(&s->list, &slab_caches);
4814 return s;
4815 }
4816
kmem_cache_init(void)4817 void __init kmem_cache_init(void)
4818 {
4819 static __initdata struct kmem_cache boot_kmem_cache,
4820 boot_kmem_cache_node;
4821 int node;
4822
4823 if (debug_guardpage_minorder())
4824 slub_max_order = 0;
4825
4826 /* Print slub debugging pointers without hashing */
4827 if (__slub_debug_enabled())
4828 no_hash_pointers_enable(NULL);
4829
4830 kmem_cache_node = &boot_kmem_cache_node;
4831 kmem_cache = &boot_kmem_cache;
4832
4833 /*
4834 * Initialize the nodemask for which we will allocate per node
4835 * structures. Here we don't need taking slab_mutex yet.
4836 */
4837 for_each_node_state(node, N_NORMAL_MEMORY)
4838 node_set(node, slab_nodes);
4839
4840 create_boot_cache(kmem_cache_node, "kmem_cache_node",
4841 sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0);
4842
4843 register_hotmemory_notifier(&slab_memory_callback_nb);
4844
4845 /* Able to allocate the per node structures */
4846 slab_state = PARTIAL;
4847
4848 create_boot_cache(kmem_cache, "kmem_cache",
4849 offsetof(struct kmem_cache, node) +
4850 nr_node_ids * sizeof(struct kmem_cache_node *),
4851 SLAB_HWCACHE_ALIGN, 0, 0);
4852
4853 kmem_cache = bootstrap(&boot_kmem_cache);
4854 kmem_cache_node = bootstrap(&boot_kmem_cache_node);
4855
4856 /* Now we can use the kmem_cache to allocate kmalloc slabs */
4857 setup_kmalloc_cache_index_table();
4858 create_kmalloc_caches(0);
4859
4860 /* Setup random freelists for each cache */
4861 init_freelist_randomization();
4862
4863 cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
4864 slub_cpu_dead);
4865
4866 pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n",
4867 cache_line_size(),
4868 slub_min_order, slub_max_order, slub_min_objects,
4869 nr_cpu_ids, nr_node_ids);
4870 }
4871
kmem_cache_init_late(void)4872 void __init kmem_cache_init_late(void)
4873 {
4874 flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM, 0);
4875 WARN_ON(!flushwq);
4876 }
4877
4878 struct kmem_cache *
__kmem_cache_alias(const char * name,unsigned int size,unsigned int align,slab_flags_t flags,void (* ctor)(void *))4879 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
4880 slab_flags_t flags, void (*ctor)(void *))
4881 {
4882 struct kmem_cache *s;
4883
4884 s = find_mergeable(size, align, flags, name, ctor);
4885 if (s) {
4886 s->refcount++;
4887
4888 /*
4889 * Adjust the object sizes so that we clear
4890 * the complete object on kzalloc.
4891 */
4892 s->object_size = max(s->object_size, size);
4893 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *)));
4894
4895 if (sysfs_slab_alias(s, name)) {
4896 s->refcount--;
4897 s = NULL;
4898 }
4899 }
4900
4901 return s;
4902 }
4903
__kmem_cache_create(struct kmem_cache * s,slab_flags_t flags)4904 int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
4905 {
4906 int err;
4907
4908 err = kmem_cache_open(s, flags);
4909 if (err)
4910 return err;
4911
4912 /* Mutex is not taken during early boot */
4913 if (slab_state <= UP)
4914 return 0;
4915
4916 err = sysfs_slab_add(s);
4917 if (err) {
4918 __kmem_cache_release(s);
4919 return err;
4920 }
4921
4922 if (s->flags & SLAB_STORE_USER)
4923 debugfs_slab_add(s);
4924
4925 return 0;
4926 }
4927
__kmalloc_track_caller(size_t size,gfp_t gfpflags,unsigned long caller)4928 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
4929 {
4930 struct kmem_cache *s;
4931 void *ret;
4932
4933 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
4934 return kmalloc_large(size, gfpflags);
4935
4936 s = kmalloc_slab(size, gfpflags);
4937
4938 if (unlikely(ZERO_OR_NULL_PTR(s)))
4939 return s;
4940
4941 ret = slab_alloc(s, gfpflags, caller, size);
4942
4943 /* Honor the call site pointer we received. */
4944 trace_kmalloc(caller, ret, size, s->size, gfpflags);
4945
4946 ret = kasan_kmalloc(s, ret, size, gfpflags);
4947
4948 return ret;
4949 }
4950 EXPORT_SYMBOL(__kmalloc_track_caller);
4951
4952 #ifdef CONFIG_NUMA
__kmalloc_node_track_caller(size_t size,gfp_t gfpflags,int node,unsigned long caller)4953 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
4954 int node, unsigned long caller)
4955 {
4956 struct kmem_cache *s;
4957 void *ret;
4958
4959 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
4960 ret = kmalloc_large_node(size, gfpflags, node);
4961
4962 trace_kmalloc_node(caller, ret,
4963 size, PAGE_SIZE << get_order(size),
4964 gfpflags, node);
4965
4966 return ret;
4967 }
4968
4969 s = kmalloc_slab(size, gfpflags);
4970
4971 if (unlikely(ZERO_OR_NULL_PTR(s)))
4972 return s;
4973
4974 ret = slab_alloc_node(s, gfpflags, node, caller, size);
4975
4976 /* Honor the call site pointer we received. */
4977 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
4978
4979 ret = kasan_kmalloc(s, ret, size, gfpflags);
4980
4981 return ret;
4982 }
4983 EXPORT_SYMBOL(__kmalloc_node_track_caller);
4984 #endif
4985
4986 #ifdef CONFIG_SYSFS
count_inuse(struct page * page)4987 static int count_inuse(struct page *page)
4988 {
4989 return page->inuse;
4990 }
4991
count_total(struct page * page)4992 static int count_total(struct page *page)
4993 {
4994 return page->objects;
4995 }
4996 #endif
4997
4998 #ifdef CONFIG_SLUB_DEBUG
validate_slab(struct kmem_cache * s,struct page * page,unsigned long * obj_map)4999 static void validate_slab(struct kmem_cache *s, struct page *page,
5000 unsigned long *obj_map)
5001 {
5002 void *p;
5003 void *addr = page_address(page);
5004 unsigned long flags;
5005
5006 slab_lock(page, &flags);
5007
5008 if (!check_slab(s, page) || !on_freelist(s, page, NULL))
5009 goto unlock;
5010
5011 /* Now we know that a valid freelist exists */
5012 __fill_map(obj_map, s, page);
5013 for_each_object(p, s, addr, page->objects) {
5014 u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ?
5015 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE;
5016
5017 if (!check_object(s, page, p, val))
5018 break;
5019 }
5020 unlock:
5021 slab_unlock(page, &flags);
5022 }
5023
validate_slab_node(struct kmem_cache * s,struct kmem_cache_node * n,unsigned long * obj_map)5024 static int validate_slab_node(struct kmem_cache *s,
5025 struct kmem_cache_node *n, unsigned long *obj_map)
5026 {
5027 unsigned long count = 0;
5028 struct page *page;
5029 unsigned long flags;
5030
5031 spin_lock_irqsave(&n->list_lock, flags);
5032
5033 list_for_each_entry(page, &n->partial, slab_list) {
5034 validate_slab(s, page, obj_map);
5035 count++;
5036 }
5037 if (count != n->nr_partial) {
5038 pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
5039 s->name, count, n->nr_partial);
5040 slab_add_kunit_errors();
5041 }
5042
5043 if (!(s->flags & SLAB_STORE_USER))
5044 goto out;
5045
5046 list_for_each_entry(page, &n->full, slab_list) {
5047 validate_slab(s, page, obj_map);
5048 count++;
5049 }
5050 if (count != atomic_long_read(&n->nr_slabs)) {
5051 pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
5052 s->name, count, atomic_long_read(&n->nr_slabs));
5053 slab_add_kunit_errors();
5054 }
5055
5056 out:
5057 spin_unlock_irqrestore(&n->list_lock, flags);
5058 return count;
5059 }
5060
validate_slab_cache(struct kmem_cache * s)5061 long validate_slab_cache(struct kmem_cache *s)
5062 {
5063 int node;
5064 unsigned long count = 0;
5065 struct kmem_cache_node *n;
5066 unsigned long *obj_map;
5067
5068 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
5069 if (!obj_map)
5070 return -ENOMEM;
5071
5072 flush_all(s);
5073 for_each_kmem_cache_node(s, node, n)
5074 count += validate_slab_node(s, n, obj_map);
5075
5076 bitmap_free(obj_map);
5077
5078 return count;
5079 }
5080 EXPORT_SYMBOL(validate_slab_cache);
5081
5082 #ifdef CONFIG_DEBUG_FS
5083 /*
5084 * Generate lists of code addresses where slabcache objects are allocated
5085 * and freed.
5086 */
5087
5088 struct location {
5089 unsigned long count;
5090 unsigned long addr;
5091 long long sum_time;
5092 long min_time;
5093 long max_time;
5094 long min_pid;
5095 long max_pid;
5096 DECLARE_BITMAP(cpus, NR_CPUS);
5097 nodemask_t nodes;
5098 };
5099
5100 struct loc_track {
5101 unsigned long max;
5102 unsigned long count;
5103 struct location *loc;
5104 loff_t idx;
5105 };
5106
5107 static struct dentry *slab_debugfs_root;
5108
free_loc_track(struct loc_track * t)5109 static void free_loc_track(struct loc_track *t)
5110 {
5111 if (t->max)
5112 free_pages((unsigned long)t->loc,
5113 get_order(sizeof(struct location) * t->max));
5114 }
5115
alloc_loc_track(struct loc_track * t,unsigned long max,gfp_t flags)5116 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
5117 {
5118 struct location *l;
5119 int order;
5120
5121 order = get_order(sizeof(struct location) * max);
5122
5123 l = (void *)__get_free_pages(flags, order);
5124 if (!l)
5125 return 0;
5126
5127 if (t->count) {
5128 memcpy(l, t->loc, sizeof(struct location) * t->count);
5129 free_loc_track(t);
5130 }
5131 t->max = max;
5132 t->loc = l;
5133 return 1;
5134 }
5135
add_location(struct loc_track * t,struct kmem_cache * s,const struct track * track)5136 static int add_location(struct loc_track *t, struct kmem_cache *s,
5137 const struct track *track)
5138 {
5139 long start, end, pos;
5140 struct location *l;
5141 unsigned long caddr;
5142 unsigned long age = jiffies - track->when;
5143
5144 start = -1;
5145 end = t->count;
5146
5147 for ( ; ; ) {
5148 pos = start + (end - start + 1) / 2;
5149
5150 /*
5151 * There is nothing at "end". If we end up there
5152 * we need to add something to before end.
5153 */
5154 if (pos == end)
5155 break;
5156
5157 caddr = t->loc[pos].addr;
5158 if (track->addr == caddr) {
5159
5160 l = &t->loc[pos];
5161 l->count++;
5162 if (track->when) {
5163 l->sum_time += age;
5164 if (age < l->min_time)
5165 l->min_time = age;
5166 if (age > l->max_time)
5167 l->max_time = age;
5168
5169 if (track->pid < l->min_pid)
5170 l->min_pid = track->pid;
5171 if (track->pid > l->max_pid)
5172 l->max_pid = track->pid;
5173
5174 cpumask_set_cpu(track->cpu,
5175 to_cpumask(l->cpus));
5176 }
5177 node_set(page_to_nid(virt_to_page(track)), l->nodes);
5178 return 1;
5179 }
5180
5181 if (track->addr < caddr)
5182 end = pos;
5183 else
5184 start = pos;
5185 }
5186
5187 /*
5188 * Not found. Insert new tracking element.
5189 */
5190 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
5191 return 0;
5192
5193 l = t->loc + pos;
5194 if (pos < t->count)
5195 memmove(l + 1, l,
5196 (t->count - pos) * sizeof(struct location));
5197 t->count++;
5198 l->count = 1;
5199 l->addr = track->addr;
5200 l->sum_time = age;
5201 l->min_time = age;
5202 l->max_time = age;
5203 l->min_pid = track->pid;
5204 l->max_pid = track->pid;
5205 cpumask_clear(to_cpumask(l->cpus));
5206 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
5207 nodes_clear(l->nodes);
5208 node_set(page_to_nid(virt_to_page(track)), l->nodes);
5209 return 1;
5210 }
5211
process_slab(struct loc_track * t,struct kmem_cache * s,struct page * page,enum track_item alloc,unsigned long * obj_map)5212 static void process_slab(struct loc_track *t, struct kmem_cache *s,
5213 struct page *page, enum track_item alloc,
5214 unsigned long *obj_map)
5215 {
5216 void *addr = page_address(page);
5217 void *p;
5218
5219 __fill_map(obj_map, s, page);
5220
5221 for_each_object(p, s, addr, page->objects)
5222 if (!test_bit(__obj_to_index(s, addr, p), obj_map))
5223 add_location(t, s, get_track(s, p, alloc));
5224 }
5225 #endif /* CONFIG_DEBUG_FS */
5226 #endif /* CONFIG_SLUB_DEBUG */
5227
5228 #ifdef CONFIG_SYSFS
5229 enum slab_stat_type {
5230 SL_ALL, /* All slabs */
5231 SL_PARTIAL, /* Only partially allocated slabs */
5232 SL_CPU, /* Only slabs used for cpu caches */
5233 SL_OBJECTS, /* Determine allocated objects not slabs */
5234 SL_TOTAL /* Determine object capacity not slabs */
5235 };
5236
5237 #define SO_ALL (1 << SL_ALL)
5238 #define SO_PARTIAL (1 << SL_PARTIAL)
5239 #define SO_CPU (1 << SL_CPU)
5240 #define SO_OBJECTS (1 << SL_OBJECTS)
5241 #define SO_TOTAL (1 << SL_TOTAL)
5242
show_slab_objects(struct kmem_cache * s,char * buf,unsigned long flags)5243 static ssize_t show_slab_objects(struct kmem_cache *s,
5244 char *buf, unsigned long flags)
5245 {
5246 unsigned long total = 0;
5247 int node;
5248 int x;
5249 unsigned long *nodes;
5250 int len = 0;
5251
5252 nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
5253 if (!nodes)
5254 return -ENOMEM;
5255
5256 if (flags & SO_CPU) {
5257 int cpu;
5258
5259 for_each_possible_cpu(cpu) {
5260 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
5261 cpu);
5262 int node;
5263 struct page *page;
5264
5265 page = READ_ONCE(c->page);
5266 if (!page)
5267 continue;
5268
5269 node = page_to_nid(page);
5270 if (flags & SO_TOTAL)
5271 x = page->objects;
5272 else if (flags & SO_OBJECTS)
5273 x = page->inuse;
5274 else
5275 x = 1;
5276
5277 total += x;
5278 nodes[node] += x;
5279
5280 page = slub_percpu_partial_read_once(c);
5281 if (page) {
5282 node = page_to_nid(page);
5283 if (flags & SO_TOTAL)
5284 WARN_ON_ONCE(1);
5285 else if (flags & SO_OBJECTS)
5286 WARN_ON_ONCE(1);
5287 else
5288 x = page->pages;
5289 total += x;
5290 nodes[node] += x;
5291 }
5292 }
5293 }
5294
5295 /*
5296 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
5297 * already held which will conflict with an existing lock order:
5298 *
5299 * mem_hotplug_lock->slab_mutex->kernfs_mutex
5300 *
5301 * We don't really need mem_hotplug_lock (to hold off
5302 * slab_mem_going_offline_callback) here because slab's memory hot
5303 * unplug code doesn't destroy the kmem_cache->node[] data.
5304 */
5305
5306 #ifdef CONFIG_SLUB_DEBUG
5307 if (flags & SO_ALL) {
5308 struct kmem_cache_node *n;
5309
5310 for_each_kmem_cache_node(s, node, n) {
5311
5312 if (flags & SO_TOTAL)
5313 x = atomic_long_read(&n->total_objects);
5314 else if (flags & SO_OBJECTS)
5315 x = atomic_long_read(&n->total_objects) -
5316 count_partial(n, count_free);
5317 else
5318 x = atomic_long_read(&n->nr_slabs);
5319 total += x;
5320 nodes[node] += x;
5321 }
5322
5323 } else
5324 #endif
5325 if (flags & SO_PARTIAL) {
5326 struct kmem_cache_node *n;
5327
5328 for_each_kmem_cache_node(s, node, n) {
5329 if (flags & SO_TOTAL)
5330 x = count_partial(n, count_total);
5331 else if (flags & SO_OBJECTS)
5332 x = count_partial(n, count_inuse);
5333 else
5334 x = n->nr_partial;
5335 total += x;
5336 nodes[node] += x;
5337 }
5338 }
5339
5340 len += sysfs_emit_at(buf, len, "%lu", total);
5341 #ifdef CONFIG_NUMA
5342 for (node = 0; node < nr_node_ids; node++) {
5343 if (nodes[node])
5344 len += sysfs_emit_at(buf, len, " N%d=%lu",
5345 node, nodes[node]);
5346 }
5347 #endif
5348 len += sysfs_emit_at(buf, len, "\n");
5349 kfree(nodes);
5350
5351 return len;
5352 }
5353
5354 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
5355 #define to_slab(n) container_of(n, struct kmem_cache, kobj)
5356
5357 struct slab_attribute {
5358 struct attribute attr;
5359 ssize_t (*show)(struct kmem_cache *s, char *buf);
5360 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
5361 };
5362
5363 #define SLAB_ATTR_RO(_name) \
5364 static struct slab_attribute _name##_attr = \
5365 __ATTR(_name, 0400, _name##_show, NULL)
5366
5367 #define SLAB_ATTR(_name) \
5368 static struct slab_attribute _name##_attr = \
5369 __ATTR(_name, 0600, _name##_show, _name##_store)
5370
slab_size_show(struct kmem_cache * s,char * buf)5371 static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
5372 {
5373 return sysfs_emit(buf, "%u\n", s->size);
5374 }
5375 SLAB_ATTR_RO(slab_size);
5376
align_show(struct kmem_cache * s,char * buf)5377 static ssize_t align_show(struct kmem_cache *s, char *buf)
5378 {
5379 return sysfs_emit(buf, "%u\n", s->align);
5380 }
5381 SLAB_ATTR_RO(align);
5382
object_size_show(struct kmem_cache * s,char * buf)5383 static ssize_t object_size_show(struct kmem_cache *s, char *buf)
5384 {
5385 return sysfs_emit(buf, "%u\n", s->object_size);
5386 }
5387 SLAB_ATTR_RO(object_size);
5388
objs_per_slab_show(struct kmem_cache * s,char * buf)5389 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
5390 {
5391 return sysfs_emit(buf, "%u\n", oo_objects(s->oo));
5392 }
5393 SLAB_ATTR_RO(objs_per_slab);
5394
order_show(struct kmem_cache * s,char * buf)5395 static ssize_t order_show(struct kmem_cache *s, char *buf)
5396 {
5397 return sysfs_emit(buf, "%u\n", oo_order(s->oo));
5398 }
5399 SLAB_ATTR_RO(order);
5400
min_partial_show(struct kmem_cache * s,char * buf)5401 static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
5402 {
5403 return sysfs_emit(buf, "%lu\n", s->min_partial);
5404 }
5405
min_partial_store(struct kmem_cache * s,const char * buf,size_t length)5406 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
5407 size_t length)
5408 {
5409 unsigned long min;
5410 int err;
5411
5412 err = kstrtoul(buf, 10, &min);
5413 if (err)
5414 return err;
5415
5416 set_min_partial(s, min);
5417 return length;
5418 }
5419 SLAB_ATTR(min_partial);
5420
cpu_partial_show(struct kmem_cache * s,char * buf)5421 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
5422 {
5423 return sysfs_emit(buf, "%u\n", slub_cpu_partial(s));
5424 }
5425
cpu_partial_store(struct kmem_cache * s,const char * buf,size_t length)5426 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
5427 size_t length)
5428 {
5429 unsigned int objects;
5430 int err;
5431
5432 err = kstrtouint(buf, 10, &objects);
5433 if (err)
5434 return err;
5435 if (objects && !kmem_cache_has_cpu_partial(s))
5436 return -EINVAL;
5437
5438 slub_set_cpu_partial(s, objects);
5439 flush_all(s);
5440 return length;
5441 }
5442 SLAB_ATTR(cpu_partial);
5443
ctor_show(struct kmem_cache * s,char * buf)5444 static ssize_t ctor_show(struct kmem_cache *s, char *buf)
5445 {
5446 if (!s->ctor)
5447 return 0;
5448 return sysfs_emit(buf, "%pS\n", s->ctor);
5449 }
5450 SLAB_ATTR_RO(ctor);
5451
aliases_show(struct kmem_cache * s,char * buf)5452 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
5453 {
5454 return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
5455 }
5456 SLAB_ATTR_RO(aliases);
5457
partial_show(struct kmem_cache * s,char * buf)5458 static ssize_t partial_show(struct kmem_cache *s, char *buf)
5459 {
5460 return show_slab_objects(s, buf, SO_PARTIAL);
5461 }
5462 SLAB_ATTR_RO(partial);
5463
cpu_slabs_show(struct kmem_cache * s,char * buf)5464 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
5465 {
5466 return show_slab_objects(s, buf, SO_CPU);
5467 }
5468 SLAB_ATTR_RO(cpu_slabs);
5469
objects_show(struct kmem_cache * s,char * buf)5470 static ssize_t objects_show(struct kmem_cache *s, char *buf)
5471 {
5472 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
5473 }
5474 SLAB_ATTR_RO(objects);
5475
objects_partial_show(struct kmem_cache * s,char * buf)5476 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
5477 {
5478 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
5479 }
5480 SLAB_ATTR_RO(objects_partial);
5481
slabs_cpu_partial_show(struct kmem_cache * s,char * buf)5482 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
5483 {
5484 int objects = 0;
5485 int pages = 0;
5486 int cpu;
5487 int len = 0;
5488
5489 for_each_online_cpu(cpu) {
5490 struct page *page;
5491
5492 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
5493
5494 if (page) {
5495 pages += page->pages;
5496 objects += page->pobjects;
5497 }
5498 }
5499
5500 len += sysfs_emit_at(buf, len, "%d(%d)", objects, pages);
5501
5502 #ifdef CONFIG_SMP
5503 for_each_online_cpu(cpu) {
5504 struct page *page;
5505
5506 page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
5507 if (page)
5508 len += sysfs_emit_at(buf, len, " C%d=%d(%d)",
5509 cpu, page->pobjects, page->pages);
5510 }
5511 #endif
5512 len += sysfs_emit_at(buf, len, "\n");
5513
5514 return len;
5515 }
5516 SLAB_ATTR_RO(slabs_cpu_partial);
5517
reclaim_account_show(struct kmem_cache * s,char * buf)5518 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
5519 {
5520 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
5521 }
5522 SLAB_ATTR_RO(reclaim_account);
5523
hwcache_align_show(struct kmem_cache * s,char * buf)5524 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
5525 {
5526 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
5527 }
5528 SLAB_ATTR_RO(hwcache_align);
5529
5530 #ifdef CONFIG_ZONE_DMA
cache_dma_show(struct kmem_cache * s,char * buf)5531 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
5532 {
5533 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
5534 }
5535 SLAB_ATTR_RO(cache_dma);
5536 #endif
5537
usersize_show(struct kmem_cache * s,char * buf)5538 static ssize_t usersize_show(struct kmem_cache *s, char *buf)
5539 {
5540 return sysfs_emit(buf, "%u\n", s->usersize);
5541 }
5542 SLAB_ATTR_RO(usersize);
5543
destroy_by_rcu_show(struct kmem_cache * s,char * buf)5544 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
5545 {
5546 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
5547 }
5548 SLAB_ATTR_RO(destroy_by_rcu);
5549
5550 #ifdef CONFIG_SLUB_DEBUG
slabs_show(struct kmem_cache * s,char * buf)5551 static ssize_t slabs_show(struct kmem_cache *s, char *buf)
5552 {
5553 return show_slab_objects(s, buf, SO_ALL);
5554 }
5555 SLAB_ATTR_RO(slabs);
5556
total_objects_show(struct kmem_cache * s,char * buf)5557 static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
5558 {
5559 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
5560 }
5561 SLAB_ATTR_RO(total_objects);
5562
sanity_checks_show(struct kmem_cache * s,char * buf)5563 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
5564 {
5565 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
5566 }
5567 SLAB_ATTR_RO(sanity_checks);
5568
trace_show(struct kmem_cache * s,char * buf)5569 static ssize_t trace_show(struct kmem_cache *s, char *buf)
5570 {
5571 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE));
5572 }
5573 SLAB_ATTR_RO(trace);
5574
red_zone_show(struct kmem_cache * s,char * buf)5575 static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
5576 {
5577 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
5578 }
5579
5580 SLAB_ATTR_RO(red_zone);
5581
poison_show(struct kmem_cache * s,char * buf)5582 static ssize_t poison_show(struct kmem_cache *s, char *buf)
5583 {
5584 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON));
5585 }
5586
5587 SLAB_ATTR_RO(poison);
5588
store_user_show(struct kmem_cache * s,char * buf)5589 static ssize_t store_user_show(struct kmem_cache *s, char *buf)
5590 {
5591 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
5592 }
5593
5594 SLAB_ATTR_RO(store_user);
5595
validate_show(struct kmem_cache * s,char * buf)5596 static ssize_t validate_show(struct kmem_cache *s, char *buf)
5597 {
5598 return 0;
5599 }
5600
validate_store(struct kmem_cache * s,const char * buf,size_t length)5601 static ssize_t validate_store(struct kmem_cache *s,
5602 const char *buf, size_t length)
5603 {
5604 int ret = -EINVAL;
5605
5606 if (buf[0] == '1') {
5607 ret = validate_slab_cache(s);
5608 if (ret >= 0)
5609 ret = length;
5610 }
5611 return ret;
5612 }
5613 SLAB_ATTR(validate);
5614
5615 #endif /* CONFIG_SLUB_DEBUG */
5616
5617 #ifdef CONFIG_FAILSLAB
failslab_show(struct kmem_cache * s,char * buf)5618 static ssize_t failslab_show(struct kmem_cache *s, char *buf)
5619 {
5620 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
5621 }
5622 SLAB_ATTR_RO(failslab);
5623 #endif
5624
shrink_show(struct kmem_cache * s,char * buf)5625 static ssize_t shrink_show(struct kmem_cache *s, char *buf)
5626 {
5627 return 0;
5628 }
5629
shrink_store(struct kmem_cache * s,const char * buf,size_t length)5630 static ssize_t shrink_store(struct kmem_cache *s,
5631 const char *buf, size_t length)
5632 {
5633 if (buf[0] == '1')
5634 kmem_cache_shrink(s);
5635 else
5636 return -EINVAL;
5637 return length;
5638 }
5639 SLAB_ATTR(shrink);
5640
5641 #ifdef CONFIG_NUMA
remote_node_defrag_ratio_show(struct kmem_cache * s,char * buf)5642 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
5643 {
5644 return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10);
5645 }
5646
remote_node_defrag_ratio_store(struct kmem_cache * s,const char * buf,size_t length)5647 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
5648 const char *buf, size_t length)
5649 {
5650 unsigned int ratio;
5651 int err;
5652
5653 err = kstrtouint(buf, 10, &ratio);
5654 if (err)
5655 return err;
5656 if (ratio > 100)
5657 return -ERANGE;
5658
5659 s->remote_node_defrag_ratio = ratio * 10;
5660
5661 return length;
5662 }
5663 SLAB_ATTR(remote_node_defrag_ratio);
5664 #endif
5665
5666 #ifdef CONFIG_SLUB_STATS
show_stat(struct kmem_cache * s,char * buf,enum stat_item si)5667 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
5668 {
5669 unsigned long sum = 0;
5670 int cpu;
5671 int len = 0;
5672 int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL);
5673
5674 if (!data)
5675 return -ENOMEM;
5676
5677 for_each_online_cpu(cpu) {
5678 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
5679
5680 data[cpu] = x;
5681 sum += x;
5682 }
5683
5684 len += sysfs_emit_at(buf, len, "%lu", sum);
5685
5686 #ifdef CONFIG_SMP
5687 for_each_online_cpu(cpu) {
5688 if (data[cpu])
5689 len += sysfs_emit_at(buf, len, " C%d=%u",
5690 cpu, data[cpu]);
5691 }
5692 #endif
5693 kfree(data);
5694 len += sysfs_emit_at(buf, len, "\n");
5695
5696 return len;
5697 }
5698
clear_stat(struct kmem_cache * s,enum stat_item si)5699 static void clear_stat(struct kmem_cache *s, enum stat_item si)
5700 {
5701 int cpu;
5702
5703 for_each_online_cpu(cpu)
5704 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
5705 }
5706
5707 #define STAT_ATTR(si, text) \
5708 static ssize_t text##_show(struct kmem_cache *s, char *buf) \
5709 { \
5710 return show_stat(s, buf, si); \
5711 } \
5712 static ssize_t text##_store(struct kmem_cache *s, \
5713 const char *buf, size_t length) \
5714 { \
5715 if (buf[0] != '0') \
5716 return -EINVAL; \
5717 clear_stat(s, si); \
5718 return length; \
5719 } \
5720 SLAB_ATTR(text); \
5721
5722 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
5723 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
5724 STAT_ATTR(FREE_FASTPATH, free_fastpath);
5725 STAT_ATTR(FREE_SLOWPATH, free_slowpath);
5726 STAT_ATTR(FREE_FROZEN, free_frozen);
5727 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
5728 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
5729 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
5730 STAT_ATTR(ALLOC_SLAB, alloc_slab);
5731 STAT_ATTR(ALLOC_REFILL, alloc_refill);
5732 STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
5733 STAT_ATTR(FREE_SLAB, free_slab);
5734 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
5735 STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
5736 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
5737 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
5738 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
5739 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
5740 STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
5741 STAT_ATTR(ORDER_FALLBACK, order_fallback);
5742 STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
5743 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
5744 STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
5745 STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
5746 STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
5747 STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
5748 #endif /* CONFIG_SLUB_STATS */
5749
5750 static struct attribute *slab_attrs[] = {
5751 &slab_size_attr.attr,
5752 &object_size_attr.attr,
5753 &objs_per_slab_attr.attr,
5754 &order_attr.attr,
5755 &min_partial_attr.attr,
5756 &cpu_partial_attr.attr,
5757 &objects_attr.attr,
5758 &objects_partial_attr.attr,
5759 &partial_attr.attr,
5760 &cpu_slabs_attr.attr,
5761 &ctor_attr.attr,
5762 &aliases_attr.attr,
5763 &align_attr.attr,
5764 &hwcache_align_attr.attr,
5765 &reclaim_account_attr.attr,
5766 &destroy_by_rcu_attr.attr,
5767 &shrink_attr.attr,
5768 &slabs_cpu_partial_attr.attr,
5769 #ifdef CONFIG_SLUB_DEBUG
5770 &total_objects_attr.attr,
5771 &slabs_attr.attr,
5772 &sanity_checks_attr.attr,
5773 &trace_attr.attr,
5774 &red_zone_attr.attr,
5775 &poison_attr.attr,
5776 &store_user_attr.attr,
5777 &validate_attr.attr,
5778 #endif
5779 #ifdef CONFIG_ZONE_DMA
5780 &cache_dma_attr.attr,
5781 #endif
5782 #ifdef CONFIG_NUMA
5783 &remote_node_defrag_ratio_attr.attr,
5784 #endif
5785 #ifdef CONFIG_SLUB_STATS
5786 &alloc_fastpath_attr.attr,
5787 &alloc_slowpath_attr.attr,
5788 &free_fastpath_attr.attr,
5789 &free_slowpath_attr.attr,
5790 &free_frozen_attr.attr,
5791 &free_add_partial_attr.attr,
5792 &free_remove_partial_attr.attr,
5793 &alloc_from_partial_attr.attr,
5794 &alloc_slab_attr.attr,
5795 &alloc_refill_attr.attr,
5796 &alloc_node_mismatch_attr.attr,
5797 &free_slab_attr.attr,
5798 &cpuslab_flush_attr.attr,
5799 &deactivate_full_attr.attr,
5800 &deactivate_empty_attr.attr,
5801 &deactivate_to_head_attr.attr,
5802 &deactivate_to_tail_attr.attr,
5803 &deactivate_remote_frees_attr.attr,
5804 &deactivate_bypass_attr.attr,
5805 &order_fallback_attr.attr,
5806 &cmpxchg_double_fail_attr.attr,
5807 &cmpxchg_double_cpu_fail_attr.attr,
5808 &cpu_partial_alloc_attr.attr,
5809 &cpu_partial_free_attr.attr,
5810 &cpu_partial_node_attr.attr,
5811 &cpu_partial_drain_attr.attr,
5812 #endif
5813 #ifdef CONFIG_FAILSLAB
5814 &failslab_attr.attr,
5815 #endif
5816 &usersize_attr.attr,
5817
5818 NULL
5819 };
5820
5821 static const struct attribute_group slab_attr_group = {
5822 .attrs = slab_attrs,
5823 };
5824
slab_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)5825 static ssize_t slab_attr_show(struct kobject *kobj,
5826 struct attribute *attr,
5827 char *buf)
5828 {
5829 struct slab_attribute *attribute;
5830 struct kmem_cache *s;
5831 int err;
5832
5833 attribute = to_slab_attr(attr);
5834 s = to_slab(kobj);
5835
5836 if (!attribute->show)
5837 return -EIO;
5838
5839 err = attribute->show(s, buf);
5840
5841 return err;
5842 }
5843
slab_attr_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t len)5844 static ssize_t slab_attr_store(struct kobject *kobj,
5845 struct attribute *attr,
5846 const char *buf, size_t len)
5847 {
5848 struct slab_attribute *attribute;
5849 struct kmem_cache *s;
5850 int err;
5851
5852 attribute = to_slab_attr(attr);
5853 s = to_slab(kobj);
5854
5855 if (!attribute->store)
5856 return -EIO;
5857
5858 err = attribute->store(s, buf, len);
5859 return err;
5860 }
5861
kmem_cache_release(struct kobject * k)5862 static void kmem_cache_release(struct kobject *k)
5863 {
5864 slab_kmem_cache_release(to_slab(k));
5865 }
5866
5867 static const struct sysfs_ops slab_sysfs_ops = {
5868 .show = slab_attr_show,
5869 .store = slab_attr_store,
5870 };
5871
5872 static struct kobj_type slab_ktype = {
5873 .sysfs_ops = &slab_sysfs_ops,
5874 .release = kmem_cache_release,
5875 };
5876
5877 static struct kset *slab_kset;
5878
cache_kset(struct kmem_cache * s)5879 static inline struct kset *cache_kset(struct kmem_cache *s)
5880 {
5881 return slab_kset;
5882 }
5883
5884 #define ID_STR_LENGTH 64
5885
5886 /* Create a unique string id for a slab cache:
5887 *
5888 * Format :[flags-]size
5889 */
create_unique_id(struct kmem_cache * s)5890 static char *create_unique_id(struct kmem_cache *s)
5891 {
5892 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
5893 char *p = name;
5894
5895 if (!name)
5896 return ERR_PTR(-ENOMEM);
5897
5898 *p++ = ':';
5899 /*
5900 * First flags affecting slabcache operations. We will only
5901 * get here for aliasable slabs so we do not need to support
5902 * too many flags. The flags here must cover all flags that
5903 * are matched during merging to guarantee that the id is
5904 * unique.
5905 */
5906 if (s->flags & SLAB_CACHE_DMA)
5907 *p++ = 'd';
5908 if (s->flags & SLAB_CACHE_DMA32)
5909 *p++ = 'D';
5910 if (s->flags & SLAB_RECLAIM_ACCOUNT)
5911 *p++ = 'a';
5912 if (s->flags & SLAB_CONSISTENCY_CHECKS)
5913 *p++ = 'F';
5914 if (s->flags & SLAB_ACCOUNT)
5915 *p++ = 'A';
5916 if (p != name + 1)
5917 *p++ = '-';
5918 p += sprintf(p, "%07u", s->size);
5919
5920 BUG_ON(p > name + ID_STR_LENGTH - 1);
5921 return name;
5922 }
5923
sysfs_slab_add(struct kmem_cache * s)5924 static int sysfs_slab_add(struct kmem_cache *s)
5925 {
5926 int err;
5927 const char *name;
5928 struct kset *kset = cache_kset(s);
5929 int unmergeable = slab_unmergeable(s);
5930
5931 if (!kset) {
5932 kobject_init(&s->kobj, &slab_ktype);
5933 return 0;
5934 }
5935
5936 if (!unmergeable && disable_higher_order_debug &&
5937 (slub_debug & DEBUG_METADATA_FLAGS))
5938 unmergeable = 1;
5939
5940 if (unmergeable) {
5941 /*
5942 * Slabcache can never be merged so we can use the name proper.
5943 * This is typically the case for debug situations. In that
5944 * case we can catch duplicate names easily.
5945 */
5946 sysfs_remove_link(&slab_kset->kobj, s->name);
5947 name = s->name;
5948 } else {
5949 /*
5950 * Create a unique name for the slab as a target
5951 * for the symlinks.
5952 */
5953 name = create_unique_id(s);
5954 if (IS_ERR(name))
5955 return PTR_ERR(name);
5956 }
5957
5958 s->kobj.kset = kset;
5959 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
5960 if (err)
5961 goto out;
5962
5963 err = sysfs_create_group(&s->kobj, &slab_attr_group);
5964 if (err)
5965 goto out_del_kobj;
5966
5967 if (!unmergeable) {
5968 /* Setup first alias */
5969 sysfs_slab_alias(s, s->name);
5970 }
5971 out:
5972 if (!unmergeable)
5973 kfree(name);
5974 return err;
5975 out_del_kobj:
5976 kobject_del(&s->kobj);
5977 goto out;
5978 }
5979
sysfs_slab_unlink(struct kmem_cache * s)5980 void sysfs_slab_unlink(struct kmem_cache *s)
5981 {
5982 if (slab_state >= FULL)
5983 kobject_del(&s->kobj);
5984 }
5985
sysfs_slab_release(struct kmem_cache * s)5986 void sysfs_slab_release(struct kmem_cache *s)
5987 {
5988 if (slab_state >= FULL)
5989 kobject_put(&s->kobj);
5990 }
5991
5992 /*
5993 * Need to buffer aliases during bootup until sysfs becomes
5994 * available lest we lose that information.
5995 */
5996 struct saved_alias {
5997 struct kmem_cache *s;
5998 const char *name;
5999 struct saved_alias *next;
6000 };
6001
6002 static struct saved_alias *alias_list;
6003
sysfs_slab_alias(struct kmem_cache * s,const char * name)6004 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
6005 {
6006 struct saved_alias *al;
6007
6008 if (slab_state == FULL) {
6009 /*
6010 * If we have a leftover link then remove it.
6011 */
6012 sysfs_remove_link(&slab_kset->kobj, name);
6013 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
6014 }
6015
6016 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
6017 if (!al)
6018 return -ENOMEM;
6019
6020 al->s = s;
6021 al->name = name;
6022 al->next = alias_list;
6023 alias_list = al;
6024 return 0;
6025 }
6026
slab_sysfs_init(void)6027 static int __init slab_sysfs_init(void)
6028 {
6029 struct kmem_cache *s;
6030 int err;
6031
6032 mutex_lock(&slab_mutex);
6033
6034 slab_kset = kset_create_and_add("slab", NULL, kernel_kobj);
6035 if (!slab_kset) {
6036 mutex_unlock(&slab_mutex);
6037 pr_err("Cannot register slab subsystem.\n");
6038 return -ENOSYS;
6039 }
6040
6041 slab_state = FULL;
6042
6043 list_for_each_entry(s, &slab_caches, list) {
6044 err = sysfs_slab_add(s);
6045 if (err)
6046 pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
6047 s->name);
6048 }
6049
6050 while (alias_list) {
6051 struct saved_alias *al = alias_list;
6052
6053 alias_list = alias_list->next;
6054 err = sysfs_slab_alias(al->s, al->name);
6055 if (err)
6056 pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
6057 al->name);
6058 kfree(al);
6059 }
6060
6061 mutex_unlock(&slab_mutex);
6062 return 0;
6063 }
6064
6065 __initcall(slab_sysfs_init);
6066 #endif /* CONFIG_SYSFS */
6067
6068 #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS)
slab_debugfs_show(struct seq_file * seq,void * v)6069 static int slab_debugfs_show(struct seq_file *seq, void *v)
6070 {
6071 struct loc_track *t = seq->private;
6072 struct location *l;
6073 unsigned long idx;
6074
6075 idx = (unsigned long) t->idx;
6076 if (idx < t->count) {
6077 l = &t->loc[idx];
6078
6079 seq_printf(seq, "%7ld ", l->count);
6080
6081 if (l->addr)
6082 seq_printf(seq, "%pS", (void *)l->addr);
6083 else
6084 seq_puts(seq, "<not-available>");
6085
6086 if (l->sum_time != l->min_time) {
6087 seq_printf(seq, " age=%ld/%llu/%ld",
6088 l->min_time, div_u64(l->sum_time, l->count),
6089 l->max_time);
6090 } else
6091 seq_printf(seq, " age=%ld", l->min_time);
6092
6093 if (l->min_pid != l->max_pid)
6094 seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid);
6095 else
6096 seq_printf(seq, " pid=%ld",
6097 l->min_pid);
6098
6099 if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus)))
6100 seq_printf(seq, " cpus=%*pbl",
6101 cpumask_pr_args(to_cpumask(l->cpus)));
6102
6103 if (nr_online_nodes > 1 && !nodes_empty(l->nodes))
6104 seq_printf(seq, " nodes=%*pbl",
6105 nodemask_pr_args(&l->nodes));
6106
6107 seq_puts(seq, "\n");
6108 }
6109
6110 if (!idx && !t->count)
6111 seq_puts(seq, "No data\n");
6112
6113 return 0;
6114 }
6115
slab_debugfs_stop(struct seq_file * seq,void * v)6116 static void slab_debugfs_stop(struct seq_file *seq, void *v)
6117 {
6118 }
6119
slab_debugfs_next(struct seq_file * seq,void * v,loff_t * ppos)6120 static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
6121 {
6122 struct loc_track *t = seq->private;
6123
6124 t->idx = ++(*ppos);
6125 if (*ppos <= t->count)
6126 return ppos;
6127
6128 return NULL;
6129 }
6130
slab_debugfs_start(struct seq_file * seq,loff_t * ppos)6131 static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
6132 {
6133 struct loc_track *t = seq->private;
6134
6135 t->idx = *ppos;
6136 return ppos;
6137 }
6138
6139 static const struct seq_operations slab_debugfs_sops = {
6140 .start = slab_debugfs_start,
6141 .next = slab_debugfs_next,
6142 .stop = slab_debugfs_stop,
6143 .show = slab_debugfs_show,
6144 };
6145
slab_debug_trace_open(struct inode * inode,struct file * filep)6146 static int slab_debug_trace_open(struct inode *inode, struct file *filep)
6147 {
6148
6149 struct kmem_cache_node *n;
6150 enum track_item alloc;
6151 int node;
6152 struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops,
6153 sizeof(struct loc_track));
6154 struct kmem_cache *s = file_inode(filep)->i_private;
6155 unsigned long *obj_map;
6156
6157 if (!t)
6158 return -ENOMEM;
6159
6160 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
6161 if (!obj_map) {
6162 seq_release_private(inode, filep);
6163 return -ENOMEM;
6164 }
6165
6166 if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0)
6167 alloc = TRACK_ALLOC;
6168 else
6169 alloc = TRACK_FREE;
6170
6171 if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) {
6172 bitmap_free(obj_map);
6173 seq_release_private(inode, filep);
6174 return -ENOMEM;
6175 }
6176
6177 for_each_kmem_cache_node(s, node, n) {
6178 unsigned long flags;
6179 struct page *page;
6180
6181 if (!atomic_long_read(&n->nr_slabs))
6182 continue;
6183
6184 spin_lock_irqsave(&n->list_lock, flags);
6185 list_for_each_entry(page, &n->partial, slab_list)
6186 process_slab(t, s, page, alloc, obj_map);
6187 list_for_each_entry(page, &n->full, slab_list)
6188 process_slab(t, s, page, alloc, obj_map);
6189 spin_unlock_irqrestore(&n->list_lock, flags);
6190 }
6191
6192 bitmap_free(obj_map);
6193 return 0;
6194 }
6195
slab_debug_trace_release(struct inode * inode,struct file * file)6196 static int slab_debug_trace_release(struct inode *inode, struct file *file)
6197 {
6198 struct seq_file *seq = file->private_data;
6199 struct loc_track *t = seq->private;
6200
6201 free_loc_track(t);
6202 return seq_release_private(inode, file);
6203 }
6204
6205 static const struct file_operations slab_debugfs_fops = {
6206 .open = slab_debug_trace_open,
6207 .read = seq_read,
6208 .llseek = seq_lseek,
6209 .release = slab_debug_trace_release,
6210 };
6211
debugfs_slab_add(struct kmem_cache * s)6212 static void debugfs_slab_add(struct kmem_cache *s)
6213 {
6214 struct dentry *slab_cache_dir;
6215
6216 if (unlikely(!slab_debugfs_root))
6217 return;
6218
6219 slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root);
6220
6221 debugfs_create_file("alloc_traces", 0400,
6222 slab_cache_dir, s, &slab_debugfs_fops);
6223
6224 debugfs_create_file("free_traces", 0400,
6225 slab_cache_dir, s, &slab_debugfs_fops);
6226 }
6227
debugfs_slab_release(struct kmem_cache * s)6228 void debugfs_slab_release(struct kmem_cache *s)
6229 {
6230 debugfs_remove_recursive(debugfs_lookup(s->name, slab_debugfs_root));
6231 }
6232
slab_debugfs_init(void)6233 static int __init slab_debugfs_init(void)
6234 {
6235 struct kmem_cache *s;
6236
6237 slab_debugfs_root = debugfs_create_dir("slab", NULL);
6238
6239 list_for_each_entry(s, &slab_caches, list)
6240 if (s->flags & SLAB_STORE_USER)
6241 debugfs_slab_add(s);
6242
6243 return 0;
6244
6245 }
6246 __initcall(slab_debugfs_init);
6247 #endif
6248 /*
6249 * The /proc/slabinfo ABI
6250 */
6251 #ifdef CONFIG_SLUB_DEBUG
get_slabinfo(struct kmem_cache * s,struct slabinfo * sinfo)6252 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
6253 {
6254 unsigned long nr_slabs = 0;
6255 unsigned long nr_objs = 0;
6256 unsigned long nr_free = 0;
6257 int node;
6258 struct kmem_cache_node *n;
6259
6260 for_each_kmem_cache_node(s, node, n) {
6261 nr_slabs += node_nr_slabs(n);
6262 nr_objs += node_nr_objs(n);
6263 nr_free += count_partial(n, count_free);
6264 }
6265
6266 sinfo->active_objs = nr_objs - nr_free;
6267 sinfo->num_objs = nr_objs;
6268 sinfo->active_slabs = nr_slabs;
6269 sinfo->num_slabs = nr_slabs;
6270 sinfo->objects_per_slab = oo_objects(s->oo);
6271 sinfo->cache_order = oo_order(s->oo);
6272 }
6273 EXPORT_SYMBOL_NS_GPL(get_slabinfo, MINIDUMP);
6274
slabinfo_show_stats(struct seq_file * m,struct kmem_cache * s)6275 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s)
6276 {
6277 }
6278
slabinfo_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)6279 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
6280 size_t count, loff_t *ppos)
6281 {
6282 return -EIO;
6283 }
6284 #endif /* CONFIG_SLUB_DEBUG */
6285